mirror of
https://github.com/BurntSushi/ripgrep.git
synced 2025-08-19 14:13:49 -07:00
Compare commits
333 Commits
0.5.0
...
globset-0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6c8b1e93d5 | ||
|
|
ebdb7c1d4c | ||
|
|
58bd0c67da | ||
|
|
1503b3175f | ||
|
|
0345e089aa | ||
|
|
0911ab1546 | ||
|
|
c4dd927a13 | ||
|
|
34abed597f | ||
|
|
835600794f | ||
|
|
07713fb5c5 | ||
|
|
d7c9323a68 | ||
|
|
b7d29d126f | ||
|
|
42b8132d0a | ||
|
|
cd08707c7c | ||
|
|
c2e97cd858 | ||
|
|
1f70e9187c | ||
|
|
7120f32258 | ||
|
|
00520b30f5 | ||
|
|
11a8f0eaf0 | ||
|
|
27fc9f2fd3 | ||
|
|
96f73293c0 | ||
|
|
b006943c01 | ||
|
|
91d0756f62 | ||
|
|
54256515b4 | ||
|
|
e2516ed095 | ||
|
|
c0c80e0209 | ||
|
|
dbf6f15625 | ||
|
|
9163aaac27 | ||
|
|
9d7448bfc0 | ||
|
|
b98585b429 | ||
|
|
f5411b992c | ||
|
|
492effc7be | ||
|
|
4889d2d37c | ||
|
|
354996a16f | ||
|
|
cbebb010a7 | ||
|
|
7098daf6a8 | ||
|
|
17d09c0882 | ||
|
|
c8e9f25b85 | ||
|
|
9305f89f39 | ||
|
|
9c216ad9a4 | ||
|
|
6862e07870 | ||
|
|
a6d09b2d42 | ||
|
|
ab1b877c20 | ||
|
|
2b5c488814 | ||
|
|
cb47be938e | ||
|
|
fe9be658f4 | ||
|
|
8c800adab7 | ||
|
|
d65966efbc | ||
|
|
597bf04a56 | ||
|
|
c78ab9e669 | ||
|
|
d57fc58081 | ||
|
|
d09538c974 | ||
|
|
94768881e1 | ||
|
|
f3a9ced82c | ||
|
|
18f549d289 | ||
|
|
c749b604dc | ||
|
|
d6748a3445 | ||
|
|
9b7f420faa | ||
|
|
361698b90a | ||
|
|
b71a110ccf | ||
|
|
5c1af3c25d | ||
|
|
ad3f55b0e5 | ||
|
|
b8e6d50bbe | ||
|
|
81afe8c5a0 | ||
|
|
c4e0d4bd7b | ||
|
|
23d1b91ead | ||
|
|
ac83ed4992 | ||
|
|
555fbd1201 | ||
|
|
f3146f8316 | ||
|
|
56341973ee | ||
|
|
a431160d4c | ||
|
|
5d15f49f0c | ||
|
|
7718ee362e | ||
|
|
739f8f596b | ||
|
|
e818d7529b | ||
|
|
a2a7f58aa6 | ||
|
|
c4a5bc06c5 | ||
|
|
96ee4482cd | ||
|
|
3effea0b7c | ||
|
|
2d68054b1d | ||
|
|
65a63788bc | ||
|
|
7e5589f07d | ||
|
|
09c5b2c4ea | ||
|
|
904c75bd30 | ||
|
|
ca3e0e8a49 | ||
|
|
ae2d036dd4 | ||
|
|
8e93fa0e7f | ||
|
|
7f5c07434b | ||
|
|
874f0b96a6 | ||
|
|
706323ad8f | ||
|
|
8460d7fe3d | ||
|
|
e1f1ede17d | ||
|
|
6553940328 | ||
|
|
b50ae9a99c | ||
|
|
224c112e05 | ||
|
|
8cb5833ef9 | ||
|
|
85cd3f0a6e | ||
|
|
c57d0fb4e8 | ||
|
|
d83bab4d3f | ||
|
|
ce84e1ef04 | ||
|
|
c8e755f11f | ||
|
|
68dac7c4b0 | ||
|
|
3535047094 | ||
|
|
fe00255494 | ||
|
|
07c837e740 | ||
|
|
cb0e693e31 | ||
|
|
e9d448e93b | ||
|
|
c7fc916e6b | ||
|
|
e36b65a11a | ||
|
|
11ad7ab204 | ||
|
|
93943793c3 | ||
|
|
0fedaa7d28 | ||
|
|
e05023b406 | ||
|
|
f007f940c5 | ||
|
|
a8543f798d | ||
|
|
ef9e17d28a | ||
|
|
3cb4d1337e | ||
|
|
8514d4fbb4 | ||
|
|
ed9150c9b4 | ||
|
|
51864c13fc | ||
|
|
35f802166d | ||
|
|
bba2d56292 | ||
|
|
012880914b | ||
|
|
832f5baf1a | ||
|
|
a6d3a959eb | ||
|
|
f00625c3f4 | ||
|
|
82d03b99cd | ||
|
|
ab2e8190e7 | ||
|
|
58bdc366ec | ||
|
|
34c0b1bc70 | ||
|
|
74e96b498c | ||
|
|
7e0fa1c6be | ||
|
|
50616935a9 | ||
|
|
01b7859399 | ||
|
|
5aed0522e8 | ||
|
|
d1fa295bb2 | ||
|
|
85d463c0cc | ||
|
|
75a4b7b361 | ||
|
|
c687d3a7c0 | ||
|
|
fbc1e7fa18 | ||
|
|
14779ed0ea | ||
|
|
b6177f0459 | ||
|
|
ba1023e1e4 | ||
|
|
5e73075ef5 | ||
|
|
1b42c02489 | ||
|
|
0d03145293 | ||
|
|
f8162d2707 | ||
|
|
e044cfb33f | ||
|
|
7dd1194a97 | ||
|
|
a5855a5d73 | ||
|
|
03b0d832ed | ||
|
|
636bbc7c8f | ||
|
|
162e085b98 | ||
|
|
86c890bcec | ||
|
|
d775259ed9 | ||
|
|
d73a75d6cd | ||
|
|
7ae1f373c2 | ||
|
|
4d34132365 | ||
|
|
5173bfb11b | ||
|
|
8141da9d39 | ||
|
|
373e0595e6 | ||
|
|
1374f15bdf | ||
|
|
263e8f92b9 | ||
|
|
231698f802 | ||
|
|
3e8b44619d | ||
|
|
679198e71a | ||
|
|
2c84825ccb | ||
|
|
948821753c | ||
|
|
d2a3b61220 | ||
|
|
acb57eb4ad | ||
|
|
256aeb5546 | ||
|
|
a9377da624 | ||
|
|
c794ef2f04 | ||
|
|
8b9eba2147 | ||
|
|
c4732ca012 | ||
|
|
1aec4b1123 | ||
|
|
c4e1945384 | ||
|
|
04d17040e7 | ||
|
|
8c8c83a1f8 | ||
|
|
5714dbde09 | ||
|
|
311ccb1f6b | ||
|
|
efa4de8126 | ||
|
|
ad5fa56490 | ||
|
|
1bf9d29259 | ||
|
|
2a14bf2249 | ||
|
|
f0028a66ec | ||
|
|
08060a2105 | ||
|
|
cd575d99f8 | ||
|
|
1267f01c24 | ||
|
|
322d5515e5 | ||
|
|
f4770c2094 | ||
|
|
f887bc1f86 | ||
|
|
363a4fa9b7 | ||
|
|
712311fdc6 | ||
|
|
0d2354aca6 | ||
|
|
8dc513b5d2 | ||
|
|
a98156e71c | ||
|
|
cf94072429 | ||
|
|
db14046de4 | ||
|
|
36091591f0 | ||
|
|
12ffcb4296 | ||
|
|
e7c06b92fb | ||
|
|
353806b87a | ||
|
|
aebb132a86 | ||
|
|
ab4b6ab9c3 | ||
|
|
413178bc2c | ||
|
|
58fb4f987e | ||
|
|
4f1d6af296 | ||
|
|
6b79349f83 | ||
|
|
f858828f61 | ||
|
|
67b835fe2a | ||
|
|
214f2bef66 | ||
|
|
1136f8adab | ||
|
|
beb010d004 | ||
|
|
f9cbf7d3d4 | ||
|
|
7eb1dd129e | ||
|
|
a5f82e8826 | ||
|
|
ca6bd648ab | ||
|
|
af77dd55a2 | ||
|
|
3065a8c9c8 | ||
|
|
208c11af56 | ||
|
|
12a78a992c | ||
|
|
d97c80be63 | ||
|
|
5213bd30ea | ||
|
|
82d101907a | ||
|
|
30608f2444 | ||
|
|
3d323928a0 | ||
|
|
8b6a3bc858 | ||
|
|
e10544f819 | ||
|
|
dc7e39a6ba | ||
|
|
36c16eb00c | ||
|
|
fffee61f80 | ||
|
|
4cfb2b515b | ||
|
|
398326bfe2 | ||
|
|
01358a155c | ||
|
|
30ca3ecca6 | ||
|
|
dbc91644fd | ||
|
|
73c9ac4da5 | ||
|
|
fe7fe74b0a | ||
|
|
3d9acdab18 | ||
|
|
40bacbcd7c | ||
|
|
b3a9c34515 | ||
|
|
972ec1adc6 | ||
|
|
a2d4c03c71 | ||
|
|
b7c3cf314d | ||
|
|
6dce04963d | ||
|
|
d4b790fd8d | ||
|
|
9283dd122e | ||
|
|
4c41e9225b | ||
|
|
9f2b054550 | ||
|
|
5613df3034 | ||
|
|
79ad81626f | ||
|
|
354a5cad97 | ||
|
|
92e5fad27d | ||
|
|
f86f987d71 | ||
|
|
bfbd53eb92 | ||
|
|
0668c74ed4 | ||
|
|
1c03298903 | ||
|
|
e0e8f26c56 | ||
|
|
f5337329f4 | ||
|
|
84f4b4ef68 | ||
|
|
aeac85389d | ||
|
|
9b3921098a | ||
|
|
ad262f1146 | ||
|
|
170c078440 | ||
|
|
db044a058a | ||
|
|
c1f8040b32 | ||
|
|
c8a5a7a3f4 | ||
|
|
dd3df0ded7 | ||
|
|
62a182af78 | ||
|
|
4047d9db71 | ||
|
|
4683a325fa | ||
|
|
b6f1e5db1a | ||
|
|
9e51b18ac7 | ||
|
|
9d7b6eb09a | ||
|
|
7763c98188 | ||
|
|
06393f888c | ||
|
|
e0989ef13b | ||
|
|
45e850aff7 | ||
|
|
f2d1c582a8 | ||
|
|
ab70815ea2 | ||
|
|
27f97db510 | ||
|
|
506ad1f3cf | ||
|
|
13235b596f | ||
|
|
2628c8f38e | ||
|
|
112b3c5e0a | ||
|
|
4c78ca8b70 | ||
|
|
ff898cd105 | ||
|
|
2c98e5ce1e | ||
|
|
1e3fc79949 | ||
|
|
d1bbc6956b | ||
|
|
cd6c54f5f4 | ||
|
|
44c03f58bc | ||
|
|
d1a6ab922e | ||
|
|
b860fa3acd | ||
|
|
229b8e3b33 | ||
|
|
a515c4d601 | ||
|
|
5a666b042d | ||
|
|
16109166fe | ||
|
|
0b685c8429 | ||
|
|
d2c7a76a3c | ||
|
|
20f7d9b3a2 | ||
|
|
362abed44a | ||
|
|
c50b8b4125 | ||
|
|
7ad23e5565 | ||
|
|
66efbad871 | ||
|
|
1f2a9b0306 | ||
|
|
a45fe94240 | ||
|
|
ac1c95a6d9 | ||
|
|
685b431d80 | ||
|
|
487713aa34 | ||
|
|
e300541701 | ||
|
|
e9df420d2f | ||
|
|
201b4fc757 | ||
|
|
90a11dec5e | ||
|
|
9456d95e8f | ||
|
|
0c298f60a6 | ||
|
|
79271fcb33 | ||
|
|
fc975af8e9 | ||
|
|
1425d6735e | ||
|
|
aed3ccb9c7 | ||
|
|
33c95d2919 | ||
|
|
01deac9427 | ||
|
|
b4bc3b6349 | ||
|
|
685cc6c562 | ||
|
|
08c017330f | ||
|
|
2f3a8c7f69 | ||
|
|
3ac1b68e54 | ||
|
|
0ebd5465b7 | ||
|
|
5cb4bb9ea0 | ||
|
|
c8a179b4da | ||
|
|
46f94826fd | ||
|
|
75f1855a91 |
9
.gitignore
vendored
9
.gitignore
vendored
@@ -6,3 +6,12 @@ target
|
||||
/ignore/Cargo.lock
|
||||
/termcolor/Cargo.lock
|
||||
/wincolor/Cargo.lock
|
||||
/deployment
|
||||
|
||||
# Snapcraft files
|
||||
stage
|
||||
prime
|
||||
parts
|
||||
*.snap
|
||||
*.pyc
|
||||
ripgrep*_source.tar.bz2
|
||||
|
||||
113
.travis.yml
113
.travis.yml
@@ -1,73 +1,106 @@
|
||||
language: rust
|
||||
|
||||
env:
|
||||
global:
|
||||
- PROJECT_NAME=ripgrep
|
||||
- PROJECT_NAME: ripgrep
|
||||
- RUST_BACKTRACE: full
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
# For generating man page.
|
||||
- libxslt1-dev
|
||||
- asciidoc
|
||||
- docbook-xsl
|
||||
- xsltproc
|
||||
- libxml2-utils
|
||||
# Needed for completion-function test.
|
||||
- zsh
|
||||
# Needed for testing decompression search.
|
||||
- xz-utils
|
||||
matrix:
|
||||
fast_finish: true
|
||||
include:
|
||||
# Nightly channel.
|
||||
# (All *nix releases are done on the nightly channel to take advantage
|
||||
# of the regex library's multiple pattern SIMD search.)
|
||||
# All *nix releases are done on the nightly channel to take advantage
|
||||
# of the regex library's multiple pattern SIMD search.
|
||||
- os: linux
|
||||
rust: nightly-2017-03-13
|
||||
rust: nightly
|
||||
env: TARGET=i686-unknown-linux-musl
|
||||
- os: linux
|
||||
rust: nightly-2017-03-13
|
||||
rust: nightly
|
||||
env: TARGET=x86_64-unknown-linux-musl
|
||||
- os: osx
|
||||
rust: nightly-2017-03-13
|
||||
env: TARGET=x86_64-apple-darwin
|
||||
# Beta channel.
|
||||
rust: nightly
|
||||
# XML_CATALOG_FILES is apparently necessary for asciidoc on macOS.
|
||||
env: TARGET=x86_64-apple-darwin XML_CATALOG_FILES=/usr/local/etc/xml/catalog
|
||||
- os: linux
|
||||
rust: nightly
|
||||
env: TARGET=arm-unknown-linux-gnueabihf GCC_VERSION=4.8
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- gcc-4.8-arm-linux-gnueabihf
|
||||
- binutils-arm-linux-gnueabihf
|
||||
- libc6-armhf-cross
|
||||
- libc6-dev-armhf-cross
|
||||
# For generating man page.
|
||||
- libxslt1-dev
|
||||
- asciidoc
|
||||
- docbook-xsl
|
||||
- xsltproc
|
||||
- libxml2-utils
|
||||
# Beta channel. We enable these to make sure there are no regressions in
|
||||
# Rust beta releases.
|
||||
- os: linux
|
||||
rust: beta
|
||||
env: TARGET=x86_64-unknown-linux-musl
|
||||
- os: linux
|
||||
rust: beta
|
||||
env: TARGET=x86_64-unknown-linux-gnu
|
||||
# Minimum Rust supported channel.
|
||||
# Minimum Rust supported channel. We enable these to make sure ripgrep
|
||||
# continues to work on the advertised minimum Rust version.
|
||||
- os: linux
|
||||
rust: 1.12.0
|
||||
rust: 1.20.0
|
||||
env: TARGET=x86_64-unknown-linux-gnu
|
||||
- os: linux
|
||||
rust: 1.12.0
|
||||
rust: 1.20.0
|
||||
env: TARGET=x86_64-unknown-linux-musl
|
||||
|
||||
before_install:
|
||||
- export PATH="$PATH:$HOME/.cargo/bin"
|
||||
|
||||
install:
|
||||
- bash ci/install.sh
|
||||
|
||||
script:
|
||||
- bash ci/script.sh
|
||||
|
||||
before_deploy:
|
||||
- bash ci/before_deploy.sh
|
||||
|
||||
- os: linux
|
||||
rust: 1.20.0
|
||||
env: TARGET=arm-unknown-linux-gnueabihf GCC_VERSION=4.8
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- gcc-4.8-arm-linux-gnueabihf
|
||||
- binutils-arm-linux-gnueabihf
|
||||
- libc6-armhf-cross
|
||||
- libc6-dev-armhf-cross
|
||||
# For generating man page.
|
||||
- libxslt1-dev
|
||||
- asciidoc
|
||||
- docbook-xsl
|
||||
- xsltproc
|
||||
- libxml2-utils
|
||||
install: ci/install.sh
|
||||
script: ci/script.sh
|
||||
before_deploy: ci/before_deploy.sh
|
||||
deploy:
|
||||
provider: releases
|
||||
file_glob: true
|
||||
file: deployment/${PROJECT_NAME}-${TRAVIS_TAG}-${TARGET}.tar.gz
|
||||
skip_cleanup: true
|
||||
on:
|
||||
condition: $TRAVIS_RUST_VERSION = nightly
|
||||
branch: master
|
||||
tags: true
|
||||
api_key:
|
||||
secure: "IbSnsbGkxSydR/sozOf1/SRvHplzwRUHzcTjM7BKnr7GccL86gRPUrsrvD103KjQUGWIc1TnK1YTq5M0Onswg/ORDjqa1JEJPkPdPnVh9ipbF7M2De/7IlB4X4qXLKoApn8+bx2x/mfYXu4G+G1/2QdbaKK2yfXZKyjz0YFx+6CNrVCT2Nk8q7aHvOOzAL58vsG8iPDpupuhxlMDDn/UhyOWVInmPPQ0iJR1ZUJN8xJwXvKvBbfp3AhaBiAzkhXHNLgBR8QC5noWWMXnuVDMY3k4f3ic0V+p/qGUCN/nhptuceLxKFicMCYObSZeUzE5RAI0/OBW7l3z2iCoc+TbAnn+JrX/ObJCfzgAOXAU3tLaBFMiqQPGFKjKg1ltSYXomOFP/F7zALjpvFp4lYTBajRR+O3dqaxA9UQuRjw27vOeUpMcga4ZzL4VXFHzrxZKBHN//XIGjYAVhJ1NSSeGpeJV5/+jYzzWKfwSagRxQyVCzMooYFFXzn8Yxdm3PJlmp3GaAogNkdB9qKcrEvRINCelalzALPi0hD/HUDi8DD2PNTCLLMo6VSYtvc685Zbe+KgNzDV1YyTrRCUW6JotrS0r2ULLwnsh40hSB//nNv3XmwNmC/CmW5QAnIGj8cBMF4S2t6ohADIndojdAfNiptmaZOIT6owK7bWMgPMyopo="
|
||||
file_glob: true
|
||||
file: ${PROJECT_NAME}-${TRAVIS_TAG}-${TARGET}.*
|
||||
# don't delete the artifacts from previous phases
|
||||
skip_cleanup: true
|
||||
# deploy when a new tag is pushed
|
||||
on:
|
||||
# channel to use to produce the release artifacts
|
||||
# NOTE make sure you only release *once* per target
|
||||
# TODO you may want to pick a different channel
|
||||
condition: $TRAVIS_RUST_VERSION = nightly-2017-03-13
|
||||
tags: true
|
||||
|
||||
branches:
|
||||
only:
|
||||
# Pushes and PR to the master branch
|
||||
- master
|
||||
# IMPORTANT Ruby regex to match tags. Required, or travis won't trigger deploys when a new tag
|
||||
# is pushed. This regex matches semantic versions like v1.2.3-rc4+2016.02.22
|
||||
# Ruby regex to match tags. Required, or travis won't trigger deploys when
|
||||
# a new tag is pushed.
|
||||
- /^\d+\.\d+\.\d+.*$/
|
||||
|
||||
notifications:
|
||||
email:
|
||||
on_success: never
|
||||
|
||||
377
CHANGELOG.md
377
CHANGELOG.md
@@ -1,3 +1,380 @@
|
||||
0.9.0 (TBD)
|
||||
===========
|
||||
This is a new minor version release of ripgrep that mostly contains bug fixes.
|
||||
|
||||
Releases provided on Github for `x86` and `x86_64` will now work on all target
|
||||
CPUs, and will also automatically take advantage of features found on modern
|
||||
CPUs (such as AVX2) for additional optimizations.
|
||||
|
||||
**BREAKING CHANGES**:
|
||||
|
||||
* When `--count` and `--only-matching` are provided simultaneously, the
|
||||
behavior of ripgrep is as if the `--count-matches` flag was given. That is,
|
||||
the total number of matches is reported, where there may be multiple matches
|
||||
per line. Previously, the behavior of ripgrep was to report the total number
|
||||
of matching lines. (Note that this behavior diverges from the behavior of
|
||||
GNU grep.)
|
||||
* Octal syntax is no longer supported. ripgrep previously accepted expressions
|
||||
like `\1` as syntax for matching `U+0001`, but ripgrep will now report an
|
||||
error instead.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* [FEATURE #411](https://github.com/BurntSushi/ripgrep/issues/411):
|
||||
Add a `--stats` flag, which emits aggregate statistics after search results.
|
||||
* [FEATURE #702](https://github.com/BurntSushi/ripgrep/issues/702):
|
||||
Support `\u{..}` Unicode escape sequences.
|
||||
* [FEATURE #812](https://github.com/BurntSushi/ripgrep/issues/812):
|
||||
Add `-b/--byte-offset` flag that reports byte offset of each matching line.
|
||||
* [FEATURE #814](https://github.com/BurntSushi/ripgrep/issues/814):
|
||||
Add `--count-matches` flag, which is like `--count`, but for each match.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #135](https://github.com/BurntSushi/ripgrep/issues/135):
|
||||
Release portable binaries that conditionally use SSSE3, AVX2, etc., at
|
||||
runtime.
|
||||
* [BUG #268](https://github.com/BurntSushi/ripgrep/issues/268):
|
||||
Print descriptive error message when trying to use look-around or
|
||||
backreferences.
|
||||
* [BUG #395](https://github.com/BurntSushi/ripgrep/issues/395):
|
||||
Show comprehensible error messages for regexes like `\s*{`.
|
||||
* [BUG #526](https://github.com/BurntSushi/ripgrep/issues/526):
|
||||
Support backslash escapes in globs.
|
||||
* [BUG #832](https://github.com/BurntSushi/ripgrep/issues/832):
|
||||
Clarify usage instructions for `-f/--file` flag.
|
||||
* [BUG #851](https://github.com/BurntSushi/ripgrep/issues/851):
|
||||
Fix `-S/--smart-case` detection once and for all.
|
||||
* [BUG #852](https://github.com/BurntSushi/ripgrep/issues/852):
|
||||
Be robust with respect to `ENOMEM` errors returned by `mmap`.
|
||||
* [BUG #853](https://github.com/BurntSushi/ripgrep/issues/853):
|
||||
Upgrade `grep` crate to `regex-syntax 0.5.0`.
|
||||
|
||||
|
||||
0.8.1 (2018-02-20)
|
||||
==================
|
||||
This is a patch release of ripgrep that primarily fixes regressions introduced
|
||||
in 0.8.0 (#820 and #824) in directory traversal on Windows. These regressions
|
||||
do not impact non-Windows users.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for csv and VHDL.
|
||||
* [FEATURE #798](https://github.com/BurntSushi/ripgrep/issues/798):
|
||||
Add `underline` support to `termcolor` and ripgrep. See documentation on the
|
||||
`--colors` flag for details.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #684](https://github.com/BurntSushi/ripgrep/issues/684):
|
||||
Improve documentation for the `--ignore-file` flag.
|
||||
* [BUG #789](https://github.com/BurntSushi/ripgrep/issues/789):
|
||||
Don't show `(rev )` if the revision wasn't available during the build.
|
||||
* [BUG #791](https://github.com/BurntSushi/ripgrep/issues/791):
|
||||
Add man page to ARM release.
|
||||
* [BUG #797](https://github.com/BurntSushi/ripgrep/issues/797):
|
||||
Improve documentation for "intense" setting in `termcolor`.
|
||||
* [BUG #800](https://github.com/BurntSushi/ripgrep/issues/800):
|
||||
Fix a bug in the `ignore` crate for custom ignore files. This had no impact
|
||||
on ripgrep.
|
||||
* [BUG #807](https://github.com/BurntSushi/ripgrep/issues/807):
|
||||
Fix a bug where `rg --hidden .` behaved differently from `rg --hidden ./`.
|
||||
* [BUG #815](https://github.com/BurntSushi/ripgrep/issues/815):
|
||||
Clarify a common failure mode in user guide.
|
||||
* [BUG #820](https://github.com/BurntSushi/ripgrep/issues/820):
|
||||
Fixes a bug on Windows where symlinks were followed even if not requested.
|
||||
* [BUG #824](https://github.com/BurntSushi/ripgrep/issues/824):
|
||||
Fix a performance regression in directory traversal on Windows.
|
||||
|
||||
|
||||
0.8.0 (2018-02-11)
|
||||
==================
|
||||
This is a new minor version releae of ripgrep that satisfies several popular
|
||||
feature requests (config files, search compressed files, true colors), fixes
|
||||
many bugs and improves the quality of life for ripgrep maintainers. This
|
||||
release also includes greatly improved documentation in the form of a
|
||||
[User Guide](GUIDE.md) and a [FAQ](FAQ.md).
|
||||
|
||||
This release increases the **minimum supported Rust version** from 1.17 to
|
||||
1.20.
|
||||
|
||||
**BREAKING CHANGES**:
|
||||
|
||||
Note that these are all very minor and unlikely to impact most users.
|
||||
|
||||
* In order to support configuration files, flag overrides needed to be
|
||||
rethought. In some cases, this changed ripgrep's behavior. For example,
|
||||
in ripgrep 0.7.1, `rg foo -s -i` will perform a case sensitive search
|
||||
since the `-s/--case-sensitive` flag was defined to always take precedence
|
||||
over the `-i/--ignore-case` flag, regardless of position. In ripgrep 0.8.0
|
||||
however, the override rule for all flags has changed to "the most recent
|
||||
flag wins among competing flags." That is, `rg foo -s -i` now performs a
|
||||
case insensitive search.
|
||||
* The `-M/--max-columns` flag was tweaked so that specifying a value of `0`
|
||||
now makes ripgrep behave as if the flag was absent. This makes it possible
|
||||
to set a default value in a configuration file and then override it. The
|
||||
previous ripgrep behavior was to suppress all matching non-empty lines.
|
||||
* In all globs, `[^...]` is now equivalent to `[!...]` (indicating class
|
||||
negation). Previously, `^` had no special significance in a character class.
|
||||
* For **downstream packagers**, the directory hierarchy in ripgrep's archive
|
||||
releases has changed. The root directory now only contains the executable,
|
||||
README and license. There is now a new directory called `doc` which contains
|
||||
the man page (previously in the root), a user guide (new), a FAQ (new) and
|
||||
the CHANGELOG (previously not included in release). The `complete`
|
||||
directory remains the same.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for
|
||||
Apache Avro, C++, GN, Google Closure Templates, Jupyter notebooks, man pages,
|
||||
Protocol Buffers, Smarty and Web IDL.
|
||||
* [FEATURE #196](https://github.com/BurntSushi/ripgrep/issues/196):
|
||||
Support a configuration file. See
|
||||
[the new user guide](GUIDE.md#configuration-file)
|
||||
for details.
|
||||
* [FEATURE #261](https://github.com/BurntSushi/ripgrep/issues/261):
|
||||
Add extended or "true" color support. Works in Windows 10!
|
||||
[See the FAQ for details.](FAQ.md#colors)
|
||||
* [FEATURE #539](https://github.com/BurntSushi/ripgrep/issues/539):
|
||||
Search gzip, bzip2, lzma or xz files when given `-z/--search-zip` flag.
|
||||
* [FEATURE #544](https://github.com/BurntSushi/ripgrep/issues/544):
|
||||
Add support for line number alignment via a new `--line-number-width` flag.
|
||||
* [FEATURE #654](https://github.com/BurntSushi/ripgrep/pull/654):
|
||||
Support linuxbrew in ripgrep's Brew tap.
|
||||
* [FEATURE #673](https://github.com/BurntSushi/ripgrep/issues/673):
|
||||
Bring back `.rgignore` files. (A higher precedent, application specific
|
||||
version of `.ignore`.)
|
||||
* [FEATURE #676](https://github.com/BurntSushi/ripgrep/issues/676):
|
||||
Provide ARM binaries. **WARNING:** This will be provided on a best effort
|
||||
basis.
|
||||
* [FEATURE #709](https://github.com/BurntSushi/ripgrep/issues/709):
|
||||
Suggest `-F/--fixed-strings` flag on a regex syntax error.
|
||||
* [FEATURE #740](https://github.com/BurntSushi/ripgrep/issues/740):
|
||||
Add a `--passthru` flag that causes ripgrep to print every line it reads.
|
||||
* [FEATURE #785](https://github.com/BurntSushi/ripgrep/pull/785):
|
||||
Overhaul documentation. Cleaned up README, added user guide and FAQ.
|
||||
* [FEATURE 7f5c07](https://github.com/BurntSushi/ripgrep/commit/7f5c07434be92103b5bf7e216b9c7494aed2d8cb):
|
||||
Add hidden flags for convenient overrides (e.g., `--no-text`).
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #553](https://github.com/BurntSushi/ripgrep/issues/553):
|
||||
Permit flags to be repeated.
|
||||
* [BUG #633](https://github.com/BurntSushi/ripgrep/issues/633):
|
||||
Fix a bug where ripgrep would panic on Windows while following symlinks.
|
||||
* [BUG #649](https://github.com/BurntSushi/ripgrep/issues/649):
|
||||
Fix handling of `!**/` in `.gitignore`.
|
||||
* [BUG #663](https://github.com/BurntSushi/ripgrep/issues/663):
|
||||
**BREAKING CHANGE:** Support `[^...]` glob syntax (as identical to `[!...]`).
|
||||
* [BUG #693](https://github.com/BurntSushi/ripgrep/issues/693):
|
||||
Don't display context separators when not printing matches.
|
||||
* [BUG #705](https://github.com/BurntSushi/ripgrep/issues/705):
|
||||
Fix a bug that prevented ripgrep from searching OneDrive directories.
|
||||
* [BUG #717](https://github.com/BurntSushi/ripgrep/issues/717):
|
||||
Improve `--smart-case` uppercase character detection.
|
||||
* [BUG #725](https://github.com/BurntSushi/ripgrep/issues/725):
|
||||
Clarify that globs do not override explicitly given paths to search.
|
||||
* [BUG #742](https://github.com/BurntSushi/ripgrep/pull/742):
|
||||
Write ANSI reset code as `\x1B[0m` instead of `\x1B[m`.
|
||||
* [BUG #747](https://github.com/BurntSushi/ripgrep/issues/747):
|
||||
Remove `yarn.lock` from YAML file type.
|
||||
* [BUG #760](https://github.com/BurntSushi/ripgrep/issues/760):
|
||||
ripgrep can now search `/sys/devices/system/cpu/vulnerabilities/*` files.
|
||||
* [BUG #761](https://github.com/BurntSushi/ripgrep/issues/761):
|
||||
Fix handling of gitignore patterns that contain a `/`.
|
||||
* [BUG #776](https://github.com/BurntSushi/ripgrep/pull/776):
|
||||
**BREAKING CHANGE:** `--max-columns=0` now disables the limit.
|
||||
* [BUG #779](https://github.com/BurntSushi/ripgrep/issues/779):
|
||||
Clarify documentation for `--files-without-match`.
|
||||
* [BUG #780](https://github.com/BurntSushi/ripgrep/issues/780),
|
||||
[BUG #781](https://github.com/BurntSushi/ripgrep/issues/781):
|
||||
Fix bug where ripgrep missed some matching lines.
|
||||
|
||||
Maintenance fixes:
|
||||
|
||||
* [MAINT #772](https://github.com/BurntSushi/ripgrep/pull/772):
|
||||
Drop `env_logger` in favor of simpler logger to avoid many new dependencies.
|
||||
* [MAINT #772](https://github.com/BurntSushi/ripgrep/pull/772):
|
||||
Add git revision hash to ripgrep's version string.
|
||||
* [MAINT #772](https://github.com/BurntSushi/ripgrep/pull/772):
|
||||
(Seemingly) improve compile times.
|
||||
* [MAINT #776](https://github.com/BurntSushi/ripgrep/pull/776):
|
||||
Automatically generate man page during build.
|
||||
* [MAINT #786](https://github.com/BurntSushi/ripgrep/pull/786):
|
||||
Remove use of `unsafe` in `globset`. :tada:
|
||||
* [MAINT e9d448](https://github.com/BurntSushi/ripgrep/commit/e9d448e93bb4e1fb3b0c1afc29adb5af6ed5283d):
|
||||
Add an issue template (has already drastically improved bug reports).
|
||||
* [MAINT ae2d03](https://github.com/BurntSushi/ripgrep/commit/ae2d036dd4ba2a46acac9c2d77c32e7c667eb850):
|
||||
Remove the `compile` script.
|
||||
|
||||
Friends of ripgrep:
|
||||
|
||||
I'd like to extend my gratitude to
|
||||
[@balajisivaraman](https://github.com/balajisivaraman)
|
||||
for their recent hard work in a number of areas, and in particular, for
|
||||
implementing the "search compressed files" feature. Their work in sketching out
|
||||
a specification for that and other work has been exemplary.
|
||||
|
||||
Thanks
|
||||
[@balajisivaraman](https://github.com/balajisivaraman)!
|
||||
|
||||
|
||||
0.7.1 (2017-10-22)
|
||||
==================
|
||||
This is a patch release of ripgrep that includes a fix to very bad regression
|
||||
introduced in ripgrep 0.7.0.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #648](https://github.com/BurntSushi/ripgrep/issues/648):
|
||||
Fix a bug where it was very easy to exceed standard file descriptor limits.
|
||||
|
||||
|
||||
0.7.0 (2017-10-20)
|
||||
==================
|
||||
This is a new minor version release of ripgrep that includes mostly bug fixes.
|
||||
|
||||
ripgrep continues to require Rust 1.17, and there are no known breaking changes
|
||||
introduced in this release.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for config & license files, Elm,
|
||||
Purescript, Standard ML, sh, systemd, Terraform
|
||||
* [FEATURE #593](https://github.com/BurntSushi/ripgrep/pull/593):
|
||||
Using both `-o/--only-matching` and `-r/--replace` does the right thing.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #200](https://github.com/BurntSushi/ripgrep/issues/200):
|
||||
ripgrep will stop when its pipe is closed.
|
||||
* [BUG #402](https://github.com/BurntSushi/ripgrep/issues/402):
|
||||
Fix context printing bug when the `-m/--max-count` flag is used.
|
||||
* [BUG #521](https://github.com/BurntSushi/ripgrep/issues/521):
|
||||
Fix interaction between `-r/--replace` and terminal colors.
|
||||
* [BUG #559](https://github.com/BurntSushi/ripgrep/issues/559):
|
||||
Ignore test that tried reading a non-UTF-8 file path on macOS.
|
||||
* [BUG #599](https://github.com/BurntSushi/ripgrep/issues/599):
|
||||
Fix color escapes on empty matches.
|
||||
* [BUG #600](https://github.com/BurntSushi/ripgrep/issues/600):
|
||||
Avoid expensive (on Windows) file handle check when using --files.
|
||||
* [BUG #618](https://github.com/BurntSushi/ripgrep/issues/618):
|
||||
Clarify installation instructions for Ubuntu users.
|
||||
* [BUG #633](https://github.com/BurntSushi/ripgrep/issues/633):
|
||||
Faster symlink loop checking on Windows.
|
||||
|
||||
|
||||
0.6.0 (2017-08-23)
|
||||
==================
|
||||
This is a new minor version release of ripgrep that includes many bug fixes
|
||||
and a few new features such as `--iglob` and `-x/--line-regexp`.
|
||||
|
||||
Note that this release increases the minimum supported Rust version from 1.12
|
||||
to 1.17.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for BitBake, C++, Cabal, cshtml, Julia,
|
||||
Make, msbuild, QMake, Yocto
|
||||
* [FEATURE #163](https://github.com/BurntSushi/ripgrep/issues/163):
|
||||
Add an `--iglob` flag that is like `-g/--glob`, but matches globs
|
||||
case insensitively.
|
||||
* [FEATURE #520](https://github.com/BurntSushi/ripgrep/pull/518):
|
||||
Add `-x/--line-regexp` flag, which requires a match to span an entire line.
|
||||
* [FEATURE #551](https://github.com/BurntSushi/ripgrep/pull/551),
|
||||
[FEATURE #554](https://github.com/BurntSushi/ripgrep/pull/554):
|
||||
`ignore`: add new `matched_path_or_any_parents` method.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #342](https://github.com/BurntSushi/ripgrep/issues/342):
|
||||
Fix invisible text in some PowerShell environments by changing the
|
||||
default color scheme on Windows.
|
||||
* [BUG #413](https://github.com/BurntSushi/ripgrep/issues/413):
|
||||
Release binaries on Unix are now `strip`'d by default. This decreases
|
||||
binary size by an order of magnitude.
|
||||
* [BUG #483](https://github.com/BurntSushi/ripgrep/issues/483):
|
||||
When `--quiet` is passed, `--files` should be quiet.
|
||||
* [BUG #488](https://github.com/BurntSushi/ripgrep/pull/488):
|
||||
When `--vimgrep` is passed, `--with-filename` should be enabled
|
||||
automatically.
|
||||
* [BUG #493](https://github.com/BurntSushi/ripgrep/issues/493):
|
||||
Fix another bug in the implementation of the `-o/--only-matching`
|
||||
flag.
|
||||
* [BUG #499](https://github.com/BurntSushi/ripgrep/pull/499):
|
||||
Permit certain flags to override others.
|
||||
* [BUG #523](https://github.com/BurntSushi/ripgrep/pull/523):
|
||||
`wincolor`: Re-fetch Windows console on all calls.
|
||||
* [BUG #523](https://github.com/BurntSushi/ripgrep/issues/524):
|
||||
`--version` now shows enabled compile-time features.
|
||||
* [BUG #532](https://github.com/BurntSushi/ripgrep/issues/532),
|
||||
[BUG #536](https://github.com/BurntSushi/ripgrep/pull/536),
|
||||
[BUG #538](https://github.com/BurntSushi/ripgrep/pull/538),
|
||||
[BUG #540](https://github.com/BurntSushi/ripgrep/pull/540),
|
||||
[BUG #560](https://github.com/BurntSushi/ripgrep/pull/560),
|
||||
[BUG #565](https://github.com/BurntSushi/ripgrep/pull/565):
|
||||
Improve zsh completion.
|
||||
* [BUG #578](https://github.com/BurntSushi/ripgrep/pull/578):
|
||||
Enable SIMD for `encoding_rs` when appropriate.
|
||||
* [BUG #580](https://github.com/BurntSushi/ripgrep/issues/580):
|
||||
Fix `-w/--word-regexp` in the presence of capturing groups.
|
||||
* [BUG #581](https://github.com/BurntSushi/ripgrep/issues/581):
|
||||
Document that ripgrep may terminate unexpectedly when searching via
|
||||
memory maps (which can happen using default settings).
|
||||
|
||||
Friends of ripgrep:
|
||||
|
||||
I'd like to give a big Thank You to @okdana for their recent hard work on
|
||||
ripgrep. This includes new features like `--line-regexp`, heroic effort on
|
||||
zsh auto-completion and thinking through some thorny argv issues with me.
|
||||
|
||||
I'd also like to thank @ericbn for their work on improving ripgrep's argv
|
||||
parsing by allowing some flags to override others.
|
||||
|
||||
Thanks @okdana and @ericbn!
|
||||
|
||||
|
||||
0.5.2 (2017-05-11)
|
||||
==================
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for Nix.
|
||||
* [FEATURE #362](https://github.com/BurntSushi/ripgrep/issues/362):
|
||||
Add `--regex-size-limit` and `--dfa-size-limit` flags.
|
||||
* [FEATURE #444](https://github.com/BurntSushi/ripgrep/issues/444):
|
||||
Improve error messages for invalid globs.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #442](https://github.com/BurntSushi/ripgrep/issues/442):
|
||||
Fix line wrapping in `--help` output.
|
||||
* [BUG #451](https://github.com/BurntSushi/ripgrep/issues/451):
|
||||
Fix bug with duplicate output when using `-o/--only-matching` flag.
|
||||
|
||||
|
||||
0.5.1 (2017-04-09)
|
||||
==================
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for vim.
|
||||
* [FEATURE #34](https://github.com/BurntSushi/ripgrep/issues/34):
|
||||
Add a `-o/--only-matching` flag.
|
||||
* [FEATURE #377](https://github.com/BurntSushi/ripgrep/issues/377):
|
||||
Column numbers can now be customized with a color. (The default is
|
||||
no color.)
|
||||
* [FEATURE #419](https://github.com/BurntSushi/ripgrep/issues/419):
|
||||
Added `-0` short flag option for `--null`.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #381](https://github.com/BurntSushi/ripgrep/issues/381):
|
||||
Include license text in all subcrates.
|
||||
* [BUG #418](https://github.com/BurntSushi/ripgrep/issues/418),
|
||||
[BUG #426](https://github.com/BurntSushi/ripgrep/issues/426),
|
||||
[BUG #439](https://github.com/BurntSushi/ripgrep/issues/439):
|
||||
Fix a few bugs with `-h/--help` output.
|
||||
|
||||
|
||||
0.5.0 (2017-03-12)
|
||||
==================
|
||||
This is a new minor version release of ripgrep that includes one minor breaking
|
||||
|
||||
399
Cargo.lock
generated
399
Cargo.lock
generated
@@ -1,278 +1,318 @@
|
||||
[root]
|
||||
name = "ripgrep"
|
||||
version = "0.4.0"
|
||||
dependencies = [
|
||||
"atty 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bytecount 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"clap 2.21.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"encoding_rs 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"grep 0.1.6",
|
||||
"ignore 0.1.8",
|
||||
"lazy_static 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memmap 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"same-file 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"termcolor 0.3.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "aho-corasick"
|
||||
version = "0.6.2"
|
||||
version = "0.6.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ansi_term"
|
||||
version = "0.9.0"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "atty"
|
||||
version = "0.2.2"
|
||||
version = "0.2.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.40 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "0.8.0"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "bytecount"
|
||||
version = "0.1.6"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"simd 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"simd 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "0.1.0"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "2.21.1"
|
||||
version = "2.31.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"atty 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bitflags 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"term_size 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicode-segmentation 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"atty 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"strsim 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"textwrap 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"vec_map 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam"
|
||||
version = "0.2.10"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "encoding_rs"
|
||||
version = "0.5.0"
|
||||
version = "0.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "env_logger"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"simd 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fnv"
|
||||
version = "1.0.5"
|
||||
version = "1.0.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "fs2"
|
||||
version = "0.4.1"
|
||||
name = "fuchsia-zircon"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fuchsia-zircon-sys"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "glob"
|
||||
version = "0.2.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "globset"
|
||||
version = "0.1.4"
|
||||
version = "0.4.0"
|
||||
dependencies = [
|
||||
"aho-corasick 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"aho-corasick 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep"
|
||||
version = "0.1.6"
|
||||
version = "0.1.8"
|
||||
dependencies = [
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex-syntax 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex-syntax 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ignore"
|
||||
version = "0.1.8"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"globset 0.1.4",
|
||||
"lazy_static 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"thread_local 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"walkdir 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "kernel32-sys"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"globset 0.4.0",
|
||||
"lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"same-file 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"thread_local 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"walkdir 2.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "0.2.4"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.21"
|
||||
version = "0.2.40"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.3.7"
|
||||
version = "0.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "1.0.1"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.40 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "memmap"
|
||||
version = "0.5.2"
|
||||
version = "0.6.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"fs2 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.40 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num_cpus"
|
||||
version = "1.3.0"
|
||||
version = "1.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.40 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.40 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.1.37"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "redox_termios"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"redox_syscall 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "0.2.1"
|
||||
version = "0.2.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"aho-corasick 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex-syntax 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"simd 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"thread_local 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"aho-corasick 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex-syntax 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"thread_local 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.4.0"
|
||||
version = "0.5.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"ucd-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "remove_dir_all"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ripgrep"
|
||||
version = "0.8.1"
|
||||
dependencies = [
|
||||
"atty 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bytecount 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"clap 2.31.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"encoding_rs 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"globset 0.4.0",
|
||||
"grep 0.1.8",
|
||||
"ignore 0.4.1",
|
||||
"lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.40 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"same-file 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"termcolor 0.3.6",
|
||||
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "same-file"
|
||||
version = "0.1.3"
|
||||
version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "simd"
|
||||
version = "0.1.1"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.6.0"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "term_size"
|
||||
version = "0.2.3"
|
||||
name = "tempdir"
|
||||
version = "0.3.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"remove_dir_all 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "termcolor"
|
||||
version = "0.3.1"
|
||||
version = "0.3.6"
|
||||
dependencies = [
|
||||
"wincolor 0.1.3",
|
||||
"wincolor 0.1.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thread-id"
|
||||
version = "3.0.0"
|
||||
name = "termion"
|
||||
version = "1.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.40 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"redox_syscall 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "textwrap"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thread_local"
|
||||
version = "0.3.3"
|
||||
version = "0.3.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"thread-id 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unreachable 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-segmentation"
|
||||
version = "1.1.0"
|
||||
name = "ucd-util"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
@@ -282,7 +322,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "unreachable"
|
||||
version = "0.1.1"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@@ -293,11 +333,6 @@ name = "utf8-ranges"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "vec_map"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "void"
|
||||
version = "1.0.2"
|
||||
@@ -305,66 +340,78 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "walkdir"
|
||||
version = "1.0.7"
|
||||
version = "2.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"same-file 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"same-file 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.2.8"
|
||||
version = "0.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-i686-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-build"
|
||||
version = "0.1.1"
|
||||
name = "winapi-x86_64-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "wincolor"
|
||||
version = "0.1.3"
|
||||
version = "0.1.6"
|
||||
dependencies = [
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[metadata]
|
||||
"checksum aho-corasick 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "0638fd549427caa90c499814196d1b9e3725eb4d15d7339d6de073a680ed0ca2"
|
||||
"checksum ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "23ac7c30002a5accbf7e8987d0632fa6de155b7c3d39d0067317a391e00a2ef6"
|
||||
"checksum atty 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d912da0db7fa85514874458ca3651fe2cddace8d0b0505571dbdcd41ab490159"
|
||||
"checksum bitflags 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e1ab483fc81a8143faa7203c4a3c02888ebd1a782e37e41fa34753ba9a162"
|
||||
"checksum bytecount 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "1e8f09fbc8c6726a4b616dcfbd4f54491068d6bb1b93ac03c78ac18ff9a5924a"
|
||||
"checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c"
|
||||
"checksum clap 2.21.1 (registry+https://github.com/rust-lang/crates.io-index)" = "74a80f603221c9cd9aa27a28f52af452850051598537bb6b359c38a7d61e5cda"
|
||||
"checksum crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "0c5ea215664ca264da8a9d9c3be80d2eaf30923c259d03e870388eb927508f97"
|
||||
"checksum encoding_rs 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a1cca0a26f904955d80d70b9bff1019e4f4cbc06f2fcbccf8bd3d889cc1c9b7"
|
||||
"checksum env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e3856f1697098606fc6cb97a93de88ca3f3bc35bb878c725920e6e82ecf05e83"
|
||||
"checksum fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6cc484842f1e2884faf56f529f960cc12ad8c71ce96cc7abba0a067c98fee344"
|
||||
"checksum fs2 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "34edaee07555859dc13ca387e6ae05686bb4d0364c95d649b6dab959511f4baf"
|
||||
"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
|
||||
"checksum lazy_static 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "7291b1dd97d331f752620b02dfdbc231df7fc01bf282a00769e1cdb963c460dc"
|
||||
"checksum libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)" = "88ee81885f9f04bff991e306fea7c1c60a5f0f9e409e99f6b40e3311a3363135"
|
||||
"checksum log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "5141eca02775a762cc6cd564d8d2c50f67c0ea3a372cbf1c51592b3e029e10ad"
|
||||
"checksum memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1dbccc0e46f1ea47b9f17e6d67c5a96bd27030519c519c9c91327e31275a47b4"
|
||||
"checksum memmap 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "46f3c7359028b31999287dae4e5047ddfe90a23b7dca2282ce759b491080c99b"
|
||||
"checksum num_cpus 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a18c392466409c50b87369414a2680c93e739aedeb498eb2bff7d7eb569744e2"
|
||||
"checksum regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4278c17d0f6d62dfef0ab00028feb45bd7d2102843f80763474eeb1be8a10c01"
|
||||
"checksum regex-syntax 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9191b1f57603095f105d317e375d19b1c9c5c3185ea9633a99a6dcbed04457"
|
||||
"checksum same-file 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d931a44fdaa43b8637009e7632a02adc4f2b2e0733c08caa4cf00e8da4a117a7"
|
||||
"checksum simd 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "63b5847c2d766ca7ce7227672850955802fabd779ba616aeabead4c2c3877023"
|
||||
"checksum strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b4d15c810519a91cf877e7e36e63fe068815c678181439f2f29e2562147c3694"
|
||||
"checksum term_size 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "07b6c1ac5b3fffd75073276bca1ceed01f67a28537097a2a9539e116e50fb21a"
|
||||
"checksum thread-id 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4437c97558c70d129e40629a5b385b3fb1ffac301e63941335e4d354081ec14a"
|
||||
"checksum thread_local 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c85048c6260d17cf486ceae3282d9fb6b90be220bf5b28c400f5485ffc29f0c7"
|
||||
"checksum unicode-segmentation 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "18127285758f0e2c6cf325bb3f3d138a12fee27de4f23e146cd6a179f26c2cf3"
|
||||
"checksum aho-corasick 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "d6531d44de723825aa81398a6415283229725a00fa30713812ab9323faa82fc4"
|
||||
"checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
|
||||
"checksum atty 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "8352656fd42c30a0c3c89d26dea01e3b77c0ab2af18230835c15e2e13cd51859"
|
||||
"checksum bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b3c30d3802dfb7281680d6285f2ccdaa8c2d8fee41f93805dba5c4cf50dc23cf"
|
||||
"checksum bytecount 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "882585cd7ec84e902472df34a5e01891202db3bf62614e1f0afe459c1afcf744"
|
||||
"checksum cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d4c819a1287eb618df47cc647173c5c4c66ba19d888a6e50d605672aed3140de"
|
||||
"checksum clap 2.31.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f0f16b89cbb9ee36d87483dc939fe9f1e13c05898d56d7b230a0d4dff033a536"
|
||||
"checksum crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "24ce9782d4d5c53674646a6a4c1863a21a8fc0cb649b3c94dfc16e45071dea19"
|
||||
"checksum encoding_rs 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "98fd0f24d1fb71a4a6b9330c8ca04cbd4e7cc5d846b54ca74ff376bc7c9f798d"
|
||||
"checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3"
|
||||
"checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82"
|
||||
"checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7"
|
||||
"checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb"
|
||||
"checksum lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c8f31047daa365f19be14b47c29df4f7c3b581832407daabe6ae77397619237d"
|
||||
"checksum libc 0.2.40 (registry+https://github.com/rust-lang/crates.io-index)" = "6fd41f331ac7c5b8ac259b8bf82c75c0fb2e469bbf37d2becbba9a6a2221965b"
|
||||
"checksum log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "89f010e843f2b1a31dbd316b3b8d443758bc634bed37aabade59c686d644e0a2"
|
||||
"checksum memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "796fba70e76612589ed2ce7f45282f5af869e0fdd7cc6199fa1aa1f1d591ba9d"
|
||||
"checksum memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e2ffa2c986de11a9df78620c01eeaaf27d94d3ff02bf81bfcca953102dd0c6ff"
|
||||
"checksum num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c51a3322e4bca9d212ad9a158a02abc6934d005490c054a2778df73a70aa0a30"
|
||||
"checksum rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "eba5f8cb59cc50ed56be8880a5c7b496bfd9bd26394e176bc67884094145c2c5"
|
||||
"checksum redox_syscall 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)" = "0d92eecebad22b767915e4d529f89f28ee96dbbf5a4810d2b844373f136417fd"
|
||||
"checksum redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7e891cfe48e9100a70a3b6eb652fef28920c117d366339687bd5576160db0f76"
|
||||
"checksum regex 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "aec3f58d903a7d2a9dc2bf0e41a746f4530e0cab6b615494e058f67a3ef947fb"
|
||||
"checksum regex-syntax 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)" = "bd90079345f4a4c3409214734ae220fd773c6f2e8a543d07370c6c1c369cfbfb"
|
||||
"checksum remove_dir_all 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "dfc5b3ce5d5ea144bb04ebd093a9e14e9765bcfec866aecda9b6dec43b3d1e24"
|
||||
"checksum same-file 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "cfb6eded0b06a0b512c8ddbcf04089138c9b4362c2f696f3c3d76039d68f3637"
|
||||
"checksum simd 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3dd0805c7363ab51a829a1511ad24b6ed0349feaa756c4bc2f977f9f496e6673"
|
||||
"checksum strsim 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb4f380125926a99e52bc279241539c018323fab05ad6368b56f93d9369ff550"
|
||||
"checksum tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8"
|
||||
"checksum termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "689a3bdfaab439fd92bc87df5c4c78417d3cbe537487274e9b0b2dce76e92096"
|
||||
"checksum textwrap 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c0b59b6b4b44d867f1370ef1bd91bfb262bf07bf0ae65c202ea2fbc16153b693"
|
||||
"checksum thread_local 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "279ef31c19ededf577bfd12dfae728040a21f635b06a24cd670ff510edd38963"
|
||||
"checksum ucd-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fd2be2d6639d0f8fe6cdda291ad456e23629558d466e2789d2c3e9892bda285d"
|
||||
"checksum unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "bf3a113775714a22dcb774d8ea3655c53a32debae63a063acc00a91cc586245f"
|
||||
"checksum unreachable 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1f2ae5ddb18e1c92664717616dd9549dde73f539f01bd7b77c2edb2446bdff91"
|
||||
"checksum unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56"
|
||||
"checksum utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "662fab6525a98beff2921d7f61a39e7d59e0b425ebc7d0d9e66d316e55124122"
|
||||
"checksum vec_map 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8cdc8b93bd0198ed872357fb2e667f7125646b1762f16d60b2c96350d361897"
|
||||
"checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"
|
||||
"checksum walkdir 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "bb08f9e670fab86099470b97cd2b252d6527f0b3cc1401acdb595ffc9dd288ff"
|
||||
"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a"
|
||||
"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
|
||||
"checksum walkdir 2.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "63636bd0eb3d00ccb8b9036381b526efac53caf112b7783b730ab3f8e44da369"
|
||||
"checksum winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "04e3bd221fcbe8a271359c04f21a76db7d0c6028862d1bb5512d85e1e2eb5bb3"
|
||||
"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
|
||||
62
Cargo.toml
62
Cargo.toml
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "ripgrep"
|
||||
version = "0.5.0" #:version
|
||||
version = "0.8.1" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
Line oriented search tool using Rust's regex library. Combines the raw
|
||||
@@ -16,6 +16,10 @@ license = "Unlicense/MIT"
|
||||
exclude = ["HomebrewFormula"]
|
||||
build = "build.rs"
|
||||
|
||||
[badges]
|
||||
travis-ci = { repository = "BurntSushi/ripgrep" }
|
||||
appveyor = { repository = "BurntSushi/ripgrep" }
|
||||
|
||||
[[bin]]
|
||||
bench = false
|
||||
path = "src/main.rs"
|
||||
@@ -25,31 +29,51 @@ name = "rg"
|
||||
name = "integration"
|
||||
path = "tests/tests.rs"
|
||||
|
||||
[workspace]
|
||||
members = ["grep", "globset", "ignore", "termcolor", "wincolor"]
|
||||
|
||||
[dependencies]
|
||||
atty = "0.2.2"
|
||||
bytecount = "0.1.4"
|
||||
clap = "2.20.5"
|
||||
encoding_rs = "0.5.0"
|
||||
env_logger = { version = "0.4", default-features = false }
|
||||
grep = { version = "0.1.5", path = "grep" }
|
||||
ignore = { version = "0.1.7", path = "ignore" }
|
||||
lazy_static = "0.2"
|
||||
atty = "=0.2.6"
|
||||
bytecount = "0.3.1"
|
||||
encoding_rs = "0.7"
|
||||
globset = { version = "0.4.0", path = "globset" }
|
||||
grep = { version = "0.1.8", path = "grep" }
|
||||
ignore = { version = "0.4.0", path = "ignore" }
|
||||
lazy_static = "1"
|
||||
libc = "0.2"
|
||||
log = "0.3"
|
||||
memchr = "1"
|
||||
memmap = "0.5"
|
||||
log = "0.4"
|
||||
memchr = "2"
|
||||
memmap = "0.6"
|
||||
num_cpus = "1"
|
||||
regex = "0.2.1"
|
||||
same-file = "0.1.1"
|
||||
termcolor = { version = "0.3.0", path = "termcolor" }
|
||||
regex = "0.2.10"
|
||||
same-file = "1"
|
||||
termcolor = { version = "0.3.4", path = "termcolor" }
|
||||
|
||||
[dependencies.clap]
|
||||
version = "2.29.4"
|
||||
default-features = false
|
||||
features = ["suggestions", "color"]
|
||||
|
||||
[target.'cfg(windows)'.dependencies.winapi]
|
||||
version = "0.3"
|
||||
features = ["std", "winnt"]
|
||||
|
||||
[build-dependencies]
|
||||
clap = "2.18"
|
||||
lazy_static = "0.2"
|
||||
lazy_static = "1"
|
||||
|
||||
[build-dependencies.clap]
|
||||
version = "2.29.4"
|
||||
default-features = false
|
||||
features = ["suggestions", "color"]
|
||||
|
||||
[features]
|
||||
avx-accel = ["bytecount/avx-accel"]
|
||||
simd-accel = ["bytecount/simd-accel", "regex/simd-accel"]
|
||||
avx-accel = ["bytecount/avx-accel", "regex/unstable"]
|
||||
simd-accel = [
|
||||
"bytecount/simd-accel",
|
||||
"encoding_rs/simd-accel",
|
||||
"regex/unstable",
|
||||
]
|
||||
unstable = ["regex/unstable"]
|
||||
|
||||
[profile.release]
|
||||
debug = true
|
||||
|
||||
599
FAQ.md
Normal file
599
FAQ.md
Normal file
@@ -0,0 +1,599 @@
|
||||
## FAQ
|
||||
|
||||
* [Does ripgrep support configuration files?](#config)
|
||||
* [What's changed in ripgrep recently?](#changelog)
|
||||
* [When is the next release?](#release)
|
||||
* [Does ripgrep have a man page?](#manpage)
|
||||
* [Does ripgrep have support for shell auto-completion?](#complete)
|
||||
* [How do I use lookaround and/or backreferences?](#fancy)
|
||||
* [How do I configure ripgrep's colors?](#colors)
|
||||
* [How do I enable true colors on Windows?](#truecolors-windows)
|
||||
* [How do I stop ripgrep from messing up colors when I kill it?](#stop-ripgrep)
|
||||
* [How can I get results in a consistent order?](#order)
|
||||
* [How do I search files that aren't UTF-8?](#encoding)
|
||||
* [How do I search compressed files?](#compressed)
|
||||
* [How do I search over multiple lines?](#multiline)
|
||||
* [How do I get around the regex size limit?](#size-limit)
|
||||
* [How do I make the `-f/--file` flag faster?](#dfa-size)
|
||||
* [How do I make the output look like The Silver Searcher's output?](#silver-searcher-output)
|
||||
* [When I run `rg`, why does it execute some other command?](#rg-other-cmd)
|
||||
* [How do I create an alias for ripgrep on Windows?](#rg-alias-windows)
|
||||
* [How do I create a PowerShell profile?](#powershell-profile)
|
||||
* [How do I pipe non-ASCII content to ripgrep on Windows?](#pipe-non-ascii-windows)
|
||||
* [How is ripgrep licensed?](#license)
|
||||
* [Can ripgrep replace grep?](#posix4ever)
|
||||
* [What does the "rip" in ripgrep mean?](#intentcountsforsomething)
|
||||
|
||||
|
||||
<h3 name="config">
|
||||
Does ripgrep support configuration files?
|
||||
</h3>
|
||||
|
||||
Yes. See the
|
||||
[guide's section on configuration files](GUIDE.md#configuration-file).
|
||||
|
||||
|
||||
<h3 name="changelog">
|
||||
What's changed in ripgrep recently?
|
||||
</h3>
|
||||
|
||||
Please consult ripgrep's [CHANGELOG](CHANGELOG.md).
|
||||
|
||||
|
||||
<h3 name="release">
|
||||
When is the next release?
|
||||
</h3>
|
||||
|
||||
ripgrep is a project whose contributors are volunteers. A release schedule
|
||||
adds undue stress to said volunteers. Therefore, releases are made on a best
|
||||
effort basis and no dates **will ever be given**.
|
||||
|
||||
One exception to this is high impact bugs. If a ripgrep release contains a
|
||||
significant regression, then there will generally be a strong push to get a
|
||||
patch release out with a fix.
|
||||
|
||||
|
||||
<h3 name="manpage">
|
||||
Does ripgrep have a man page?
|
||||
</h3>
|
||||
|
||||
Yes! Whenever ripgrep is compiled on a system with `asciidoc` present, then a
|
||||
man page is generated from ripgrep's argv parser. After compiling ripgrep, you
|
||||
can find the man page like so from the root of the repository:
|
||||
|
||||
```
|
||||
$ find ./target -name rg.1 -print0 | xargs -0 ls -t | head -n1
|
||||
./target/debug/build/ripgrep-79899d0edd4129ca/out/rg.1
|
||||
```
|
||||
|
||||
Running `man -l ./target/debug/build/ripgrep-79899d0edd4129ca/out/rg.1` will
|
||||
show the man page in your normal pager.
|
||||
|
||||
Note that the man page's documentation for options is equivalent to the output
|
||||
shown in `rg --help`. To see more condensed documentation (one line per flag),
|
||||
run `rg -h`.
|
||||
|
||||
The man page is also included in all
|
||||
[ripgrep binary releases](https://github.com/BurntSushi/ripgrep/releases).
|
||||
|
||||
|
||||
<h3 name="complete">
|
||||
Does ripgrep have support for shell auto-completion?
|
||||
</h3>
|
||||
|
||||
Yes! Shell completions can be found in the
|
||||
[same directory as the man page](#manpage)
|
||||
after building ripgrep. Zsh completions are maintained separately and committed
|
||||
to the repository in `complete/_rg`.
|
||||
|
||||
Shell completions are also included in all
|
||||
[ripgrep binary releases](https://github.com/BurntSushi/ripgrep/releases).
|
||||
|
||||
For **bash**, move `rg.bash` to
|
||||
`$XDG_CONFIG_HOME/bash_completion` or `/etc/bash_completion.d/`.
|
||||
|
||||
For **fish**, move `rg.fish` to `$HOME/.config/fish/completions/`.
|
||||
|
||||
For **zsh**, move `_rg` to one of your `$fpath` directories.
|
||||
|
||||
For **PowerShell**, add `. _rg.ps1` to your PowerShell
|
||||
[profile](https://technet.microsoft.com/en-us/library/bb613488(v=vs.85).aspx)
|
||||
(note the leading period). If the `_rg.ps1` file is not on your `PATH`, do
|
||||
`. /path/to/_rg.ps1` instead.
|
||||
|
||||
|
||||
<h3 name="order">
|
||||
How can I get results in a consistent order?
|
||||
</h3>
|
||||
|
||||
By default, ripgrep uses parallelism to execute its search because this makes
|
||||
the search much faster on most modern systems. This in turn means that ripgrep
|
||||
has a non-deterministic aspect to it, since the interleaving of threads during
|
||||
the execution of the program is itself non-deterministic. This has the effect
|
||||
of printing results in a somewhat arbitrary order, and this order can change
|
||||
from run to run of ripgrep.
|
||||
|
||||
The only way to make the order of results consistent is to ask ripgrep to
|
||||
sort the output. Currently, this will disable all parallelism. (On smaller
|
||||
repositories, you might not notice much of a performance difference!) You
|
||||
can achieve this with the `--sort-files` flag.
|
||||
|
||||
There is more discussion on this topic here:
|
||||
https://github.com/BurntSushi/ripgrep/issues/152
|
||||
|
||||
|
||||
<h3 name="encoding">
|
||||
How do I search files that aren't UTF-8?
|
||||
</h3>
|
||||
|
||||
See the [guide's section on file encoding](GUIDE.md#file-encoding).
|
||||
|
||||
|
||||
<h3 name="compressed">
|
||||
How do I search compressed files?
|
||||
</h3>
|
||||
|
||||
ripgrep's `-z/--search-zip` flag will cause it to search compressed files
|
||||
automatically. Currently, this supports gzip, bzip2, lzma and xz only and
|
||||
requires the corresponding `gzip`, `bzip2` and `xz` binaries to be installed on
|
||||
your system. (That is, ripgrep does decompression by shelling out to another
|
||||
process.)
|
||||
|
||||
ripgrep currently does not search archive formats, so `*.tar.gz` files, for
|
||||
example, are skipped.
|
||||
|
||||
|
||||
<h3 name="multiline">
|
||||
How do I search over multiple lines?
|
||||
</h3>
|
||||
|
||||
This isn't currently possible. ripgrep is fundamentally a line-oriented search
|
||||
tool. With that said,
|
||||
[multiline search is a planned opt-in feature](https://github.com/BurntSushi/ripgrep/issues/176).
|
||||
|
||||
|
||||
<h3 name="fancy">
|
||||
How do I use lookaround and/or backreferences?
|
||||
</h3>
|
||||
|
||||
This isn't currently possible. ripgrep uses finite automata to implement
|
||||
regular expression search, and in turn, guarantees linear time searching on all
|
||||
inputs. It is difficult to efficiently support lookaround and backreferences in
|
||||
finite automata engines, so ripgrep does not provide these features.
|
||||
|
||||
If a production quality regular expression engine with these features is ever
|
||||
written in Rust, then it is possible ripgrep will provide it as an opt-in
|
||||
feature.
|
||||
|
||||
|
||||
<h3 name="colors">
|
||||
How do I configure ripgrep's colors?
|
||||
</h3>
|
||||
|
||||
ripgrep has two flags related to colors:
|
||||
|
||||
* `--color` controls *when* to use colors.
|
||||
* `--colors` controls *which* colors to use.
|
||||
|
||||
The `--color` flag accepts one of the following possible values: `never`,
|
||||
`auto`, `always` or `ansi`. The `auto` value is the default and will cause
|
||||
ripgrep to only enable colors when it is printing to a terminal. But if you
|
||||
pipe ripgrep to a file or some other process, then it will suppress colors.
|
||||
|
||||
The --colors` flag is a bit more complicated. The general format is:
|
||||
|
||||
```
|
||||
--colors '{type}:{attribute}:{value}'
|
||||
```
|
||||
|
||||
* `{type}` should be one of `path`, `line`, `column` or `match`. Each of these
|
||||
correspond to the four different types of things that ripgrep will add color
|
||||
to in its output. Select the type whose color you want to change.
|
||||
* `{attribute}` should be one of `fg`, `bg` or `style`, corresponding to
|
||||
foreground color, background color, or miscellaneous styling (such as whether
|
||||
to bold the output or not).
|
||||
* `{value}` is determined by the value of `{attribute}`. If
|
||||
`{attribute}` is `style`, then `{value}` should be one of `nobold`,
|
||||
`bold`, `nointense`, `intense`, `nounderline` or `underline`. If
|
||||
`{attribute}` is `fg` or `bg`, then `{value}` should be a color.
|
||||
|
||||
A color is specified by either one of eight of English names, a single 256-bit
|
||||
number or an RGB triple (with over 16 million possible values, or "true
|
||||
color").
|
||||
|
||||
The color names are `red`, `blue`, `green`, `cyan`, `magenta`, `yellow`,
|
||||
`white` or `black`.
|
||||
|
||||
A single 256-bit number is a value in the range 0-255 (inclusive). It can
|
||||
either be in decimal format (e.g., `62`) or hexadecimal format (e.g., `0x3E`).
|
||||
|
||||
An RGB triple corresponds to three numbers (decimal or hexadecimal) separated
|
||||
by commas.
|
||||
|
||||
As a special case, `--colors '{type}:none'` will clear all colors and styles
|
||||
associated with `{type}`, which lets you start with a clean slate (instead of
|
||||
building on top of ripgrep's default color settings).
|
||||
|
||||
Here's an example that makes highlights the matches with a nice blue background
|
||||
with bolded white text:
|
||||
|
||||
```
|
||||
$ rg somepattern \
|
||||
--colors 'match:none' \
|
||||
--colors 'match:bg:0x33,0x66,0xFF' \
|
||||
--colors 'match:fg:white' \
|
||||
--colors 'match:style:bold'
|
||||
```
|
||||
|
||||
Colors are an ideal candidate to set in your
|
||||
[configuration file](GUIDE.md#configuration-file). See the
|
||||
[question on emulating The Silver Searcher's output style](#silver-searcher-output)
|
||||
for an example specific to colors.
|
||||
|
||||
|
||||
<h3 name="truecolors-windows">
|
||||
How do I enable true colors on Windows?
|
||||
</h3>
|
||||
|
||||
First, see the previous question's
|
||||
[answer on configuring colors](#colors).
|
||||
|
||||
Secondly, coloring on Windows is a bit complicated. If you're using a terminal
|
||||
like Cygwin, then it's likely true color support already works out of the box.
|
||||
However, if you are using a normal Windows console (`cmd` or `PowerShell`) and
|
||||
a version of Windows prior to 10, then there is no known way to get true
|
||||
color support. If you are on Windows 10 and using a Windows console, then
|
||||
true colors should work out of the box with one caveat: you might need to
|
||||
clear ripgrep's default color settings first. That is, instead of this:
|
||||
|
||||
```
|
||||
$ rg somepattern --colors 'match:fg:0x33,0x66,0xFF'
|
||||
```
|
||||
|
||||
you should do this
|
||||
|
||||
```
|
||||
$ rg somepattern --colors 'match:none' --colors 'match:fg:0x33,0x66,0xFF'
|
||||
```
|
||||
|
||||
This is because ripgrep might set the default style for `match` to `bold`, and
|
||||
it seems like Windows 10's VT100 support doesn't permit bold and true color
|
||||
ANSI escapes to be used simultaneously. The work-around above will clear
|
||||
ripgrep's default styling, allowing you to craft it exactly as desired.
|
||||
|
||||
|
||||
<h3 name="stop-ripgrep">
|
||||
How do I stop ripgrep from messing up colors when I kill it?
|
||||
</h3>
|
||||
|
||||
Type in `color` in cmd.exe (Command Prompt) and `echo -ne "\033[0m"` on
|
||||
Unix-like systems to restore your original foreground color.
|
||||
|
||||
In PowerShell, you can add the following code to your profile which will
|
||||
restore the original foreground color when `Reset-ForegroundColor` is called.
|
||||
Including the `Set-Alias` line will allow you to call it with simply `color`.
|
||||
|
||||
```powershell
|
||||
$OrigFgColor = $Host.UI.RawUI.ForegroundColor
|
||||
function Reset-ForegroundColor {
|
||||
$Host.UI.RawUI.ForegroundColor = $OrigFgColor
|
||||
}
|
||||
Set-Alias -Name color -Value Reset-ForegroundColor
|
||||
```
|
||||
|
||||
PR [#187](https://github.com/BurntSushi/ripgrep/pull/187) fixed this, and it
|
||||
was later deprecated in
|
||||
[#281](https://github.com/BurntSushi/ripgrep/issues/281). A full explanation is
|
||||
available
|
||||
[here](https://github.com/BurntSushi/ripgrep/issues/281#issuecomment-269093893).
|
||||
|
||||
|
||||
<h3 name="size-limit">
|
||||
How do I get around the regex size limit?
|
||||
</h3>
|
||||
|
||||
If you've given ripgrep a particularly large pattern (or a large number of
|
||||
smaller patterns), then it is possible that it will fail to compile because it
|
||||
hit a pre-set limit. For example:
|
||||
|
||||
```
|
||||
$ rg '\pL{1000}'
|
||||
Compiled regex exceeds size limit of 10485760 bytes.
|
||||
```
|
||||
|
||||
(Note: `\pL{1000}` may look small, but `\pL` is the character class containing
|
||||
all Unicode letters, which is quite large. *And* it's repeated 1000 times.)
|
||||
|
||||
In this case, you can work around by simply increasing the limit:
|
||||
|
||||
```
|
||||
$ rg '\pL{1000}' --regex-size-limit 1G
|
||||
```
|
||||
|
||||
Increasing the limit to 1GB does not necessarily mean that ripgrep will use
|
||||
that much memory. The limit just says that it's allowed to (approximately) use
|
||||
that much memory for constructing the regular expression.
|
||||
|
||||
|
||||
<h3 name="dfa-size">
|
||||
How do I make the <code>-f/--file</code> flag faster?
|
||||
</h3>
|
||||
|
||||
The `-f/--file` permits one to give a file to ripgrep which contains a pattern
|
||||
on each line. ripgrep will then report any line that matches any of the
|
||||
patterns.
|
||||
|
||||
If this pattern file gets too big, then it is possible ripgrep will slow down
|
||||
dramatically. *Typically* this is because an internal cache is too small, and
|
||||
will cause ripgrep to spill over to a slower but more robust regular expression
|
||||
engine. If this is indeed the problem, then it is possible to increase this
|
||||
cache and regain speed. The cache can be controlled via the `--dfa-size-limit`
|
||||
flag. For example, using `--dfa-size-limit 1G` will set the cache size to 1GB.
|
||||
(Note that this doesn't mean ripgrep will use 1GB of memory automatically, but
|
||||
it will allow the regex engine to if it needs to.)
|
||||
|
||||
|
||||
<h3 name="silver-searcher-output">
|
||||
How do I make the output look like The Silver Searcher's output?
|
||||
</h3>
|
||||
|
||||
Use the `--colors` flag, like so:
|
||||
|
||||
```
|
||||
rg --colors line:fg:yellow \
|
||||
--colors line:style:bold \
|
||||
--colors path:fg:green \
|
||||
--colors path:style:bold \
|
||||
--colors match:fg:black \
|
||||
--colors match:bg:yellow \
|
||||
--colors match:style:nobold \
|
||||
foo
|
||||
```
|
||||
|
||||
Alternatively, add your color configuration to your ripgrep config file (which
|
||||
is activated by setting the `RIPGREP_CONFIG_PATH` environment variable to point
|
||||
to your config file). For example:
|
||||
|
||||
```
|
||||
$ cat $HOME/.config/ripgrep/rc
|
||||
--colors=line:fg:yellow
|
||||
--colors=line:style:bold
|
||||
--colors=path:fg:green
|
||||
--colors=path:style:bold
|
||||
--colors=match:fg:black
|
||||
--colors=match:bg:yellow
|
||||
--colors=match:style:nobold
|
||||
$ RIPGREP_CONFIG_PATH=$HOME/.config/ripgrep/rc rg foo
|
||||
```
|
||||
|
||||
|
||||
<h3 name="rg-other-cmd">
|
||||
When I run <code>rg</code>, why does it execute some other command?
|
||||
</h3>
|
||||
|
||||
It's likely that you have a shell alias or even another tool called `rg` which
|
||||
is interfering with ripgrep. Run `which rg` to see what it is.
|
||||
|
||||
(Notably, the Rails plug-in for
|
||||
[Oh My Zsh](https://github.com/robbyrussell/oh-my-zsh/wiki/Plugins#rails) sets
|
||||
up an `rg` alias for `rails generate`.)
|
||||
|
||||
Problems like this can be resolved in one of several ways:
|
||||
|
||||
* If you're using the OMZ Rails plug-in, disable it by editing the `plugins`
|
||||
array in your zsh configuration.
|
||||
* Temporarily bypass an existing `rg` alias by calling ripgrep as
|
||||
`command rg`, `\rg`, or `'rg'`.
|
||||
* Temporarily bypass an existing alias or another tool named `rg` by calling
|
||||
ripgrep by its full path (e.g., `/usr/bin/rg` or `/usr/local/bin/rg`).
|
||||
* Permanently disable an existing `rg` alias by adding `unalias rg` to the
|
||||
bottom of your shell configuration file (e.g., `.bash_profile` or `.zshrc`).
|
||||
* Give ripgrep its own alias that doesn't conflict with other tools/aliases by
|
||||
adding a line like the following to the bottom of your shell configuration
|
||||
file: `alias ripgrep='command rg'`.
|
||||
|
||||
|
||||
<h3 name="rg-alias-windows">
|
||||
How do I create an alias for ripgrep on Windows?
|
||||
</h3>
|
||||
|
||||
Often you can find a need to make alias for commands you use a lot that set
|
||||
certain flags. But PowerShell function aliases do not behave like your typical
|
||||
linux shell alias. You always need to propagate arguments and `stdin` input.
|
||||
But it cannot be done simply as
|
||||
`function grep() { $input | rg.exe --hidden $args }`
|
||||
|
||||
Use below example as reference to how setup alias in PowerShell.
|
||||
|
||||
```powershell
|
||||
function grep {
|
||||
$count = @($input).Count
|
||||
$input.Reset()
|
||||
|
||||
if ($count) {
|
||||
$input | rg.exe --hidden $args
|
||||
}
|
||||
else {
|
||||
rg.exe --hidden $args
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
PowerShell special variables:
|
||||
|
||||
* input - is powershell `stdin` object that allows you to access its content.
|
||||
* args - is array of arguments passed to this function.
|
||||
|
||||
This alias checks whether there is `stdin` input and propagates only if there
|
||||
is some lines. Otherwise empty `$input` will make powershell to trigger `rg` to
|
||||
search empty `stdin`.
|
||||
|
||||
|
||||
<h3 name="powershell-profile">
|
||||
How do I create a PowerShell profile?
|
||||
</h3>
|
||||
|
||||
To customize powershell on start-up, there is a special PowerShell script that
|
||||
has to be created. In order to find its location, type `$profile`.
|
||||
See
|
||||
[Microsoft's documentation](https://technet.microsoft.com/en-us/library/bb613488(v=vs.85).aspx)
|
||||
for more details.
|
||||
|
||||
Any PowerShell code in this file gets evaluated at the start of console. This
|
||||
way you can have own aliases to be created at start.
|
||||
|
||||
|
||||
<h3 name="pipe-non-ascii-windows">
|
||||
How do I pipe non-ASCII content to ripgrep on Windows?
|
||||
</h3>
|
||||
|
||||
When piping input into native executables in PowerShell, the encoding of the
|
||||
input is controlled by the `$OutputEncoding` variable. By default, this is set
|
||||
to US-ASCII, and any characters in the pipeline that don't have encodings in
|
||||
US-ASCII are converted to `?` (question mark) characters.
|
||||
|
||||
To change this setting, set `$OutputEncoding` to a different encoding, as
|
||||
represented by a .NET encoding object. Some common examples are below. The
|
||||
value of this variable is reset when PowerShell restarts, so to make this
|
||||
change take effect every time PowerShell is started add a line setting the
|
||||
variable into your PowerShell profile.
|
||||
|
||||
Example `$OutputEncoding` settings:
|
||||
|
||||
* UTF-8 without BOM: `$OutputEncoding = [System.Text.UTF8Encoding]::new()`
|
||||
* The console's output encoding:
|
||||
`$OutputEncoding = [System.Console]::OutputEncoding`
|
||||
|
||||
If you continue to have encoding problems, you can also force the encoding
|
||||
that the console will use for printing to UTF-8 with
|
||||
`[System.Console]::OutputEncoding = [System.Text.Encoding]::UTF8`. This
|
||||
will also reset when PowerShell is restarted, so you can add that line
|
||||
to your profile as well if you want to make the setting permanent.
|
||||
|
||||
|
||||
<h3 name="license">
|
||||
How is ripgrep licensed?
|
||||
</h3>
|
||||
|
||||
ripgrep is dual licensed under the
|
||||
[Unlicense](https://unlicense.org/)
|
||||
and MIT licenses. Specifically, you may use ripgrep under the terms of either
|
||||
license.
|
||||
|
||||
The reason why ripgrep is dual licensed this way is two-fold:
|
||||
|
||||
1. I, as ripgrep's author, would like to participate in a small bit of
|
||||
ideological activism by promoting the Unlicense's goal: to disclaim
|
||||
copyright monopoly interest.
|
||||
2. I, as ripgrep's author, would like as many people to use rigprep as
|
||||
possible. Since the Unlicense is not a proven or well known license, ripgrep
|
||||
is also offered under the MIT license, which is ubiquitous and accepted by
|
||||
almost everyone.
|
||||
|
||||
More specifically, ripgrep and all its dependencies are compatible with this
|
||||
licensing choice. In particular, ripgrep's dependencies (direct and transitive)
|
||||
will always be limited to permissive licenses. That is, ripgrep will never
|
||||
depend on code that is not permissively licensed. This means rejecting any
|
||||
dependency that uses a copyleft license such as the GPL, LGPL, MPL or any of
|
||||
the Creative Commons ShareAlike licenses. Whether the license is "weak"
|
||||
copyleft or not does not matter; ripgrep will **not** depend on it.
|
||||
|
||||
|
||||
<h3 name="posix4ever">
|
||||
Can ripgrep replace grep?
|
||||
</h3>
|
||||
|
||||
Yes and no.
|
||||
|
||||
If, upon hearing that "ripgrep can replace grep," you *actually* hear, "ripgrep
|
||||
can be used in every instance grep can be used, in exactly the same way, for
|
||||
the same use cases, with exactly the same bug-for-bug behavior," then no,
|
||||
ripgrep trivially *cannot* replace grep. Moreover, ripgrep will *never* replace
|
||||
grep.
|
||||
|
||||
If, upon hearing that "ripgrep can replace grep," you *actually* hear, "ripgrep
|
||||
can replace grep in some cases and not in other use cases," then yes, that is
|
||||
indeed true!
|
||||
|
||||
Let's go over some of those use cases in favor of ripgrep. Some of these may
|
||||
not apply to you. That's OK. There may be other use cases not listed here that
|
||||
do apply to you. That's OK too.
|
||||
|
||||
(For all claims related to performance in the following words, see my
|
||||
[blog post](https://blog.burntsushi.net/ripgrep/)
|
||||
introducing ripgrep.)
|
||||
|
||||
* Are you frequently searching a repository of code? If so, ripgrep might be a
|
||||
good choice since there's likely a good chunk of your repository that you
|
||||
don't want to search. grep, can, of course, be made to filter files using
|
||||
recursive search, and if you don't mind writing out the requisite `--exclude`
|
||||
rules or writing wrapper scripts, then grep might be sufficient. (I'm not
|
||||
kidding, I myself did this with grep for almost a decade before writing
|
||||
ripgrep.) But if you instead enjoy having a search tool respect your
|
||||
`.gitignore`, then ripgrep might be perfect for you!
|
||||
* Are you frequently searching non-ASCII text that is UTF-8 encoded? One of
|
||||
ripgrep's key features is that it can handle Unicode features in your
|
||||
patterns in a way that tends to be faster than GNU grep. Unicode features
|
||||
in ripgrep are enabled by default; there is no need to configure your locale
|
||||
settings to use ripgrep properly because ripgrep doesn't respect your locale
|
||||
settings.
|
||||
* Do you need to search UTF-16 files and you don't want to bother explicitly
|
||||
transcoding them? Great. ripgrep does this for you automatically. No need
|
||||
to enable it.
|
||||
* Do you need to search a large directory of large files? ripgrep uses
|
||||
parallelism by default, which tends to make it faster than a standard
|
||||
`grep -r` search. However, if you're OK writing the occasional
|
||||
`find ./ -print0 | xargs -P8 -0 grep` command, then maybe grep is good
|
||||
enough.
|
||||
|
||||
Here are some cases where you might *not* want to use ripgrep. The same caveats
|
||||
for the previous section apply.
|
||||
|
||||
* Are you writing portable shell scripts intended to work in a variety of
|
||||
environments? Great, probably not a good idea to use ripgrep! ripgrep is has
|
||||
nowhere near the ubquity of grep, so if you do use ripgrep, you might need
|
||||
to futz with the installation process more than you would with grep.
|
||||
* Do you care about POSIX compatibility? If so, then you can't use ripgrep
|
||||
because it never was, isn't and never will be POSIX compatible.
|
||||
* Do you hate tools that try to do something smart? If so, ripgrep is all about
|
||||
being smart, so you might prefer to just stick with grep.
|
||||
* Is there a particular feature of grep you rely on that ripgrep either doesn't
|
||||
have or never will have? If the former, file a bug report, maybe ripgrep can
|
||||
do it! If the latter, well, then, just use grep.
|
||||
|
||||
|
||||
<h3 name="intentcountsforsomething">
|
||||
What does the "rip" in ripgrep mean?
|
||||
</h3>
|
||||
|
||||
When I first started writing ripgrep, I called it `rep`, intending it to be a
|
||||
shorter variant of `grep`. Soon after, I renamed it to `xrep` since `rep`
|
||||
wasn't obvious enough of a name for my taste. And also because adding `x` to
|
||||
anything always makes it better, right?
|
||||
|
||||
Before ripgrep's first public release, I decided that I didn't like `xrep`. I
|
||||
thought it was slightly awkward to type, and despite my previous praise of the
|
||||
letter `x`, I kind of thought it was pretty lame. Being someone who really
|
||||
likes Rust, I wanted to call it "rustgrep" or maybe "rgrep" for short. But I
|
||||
thought that was just as lame, and maybe a little too in-your-face. But I
|
||||
wanted to continue using `r` so I could at least pretend Rust had something to
|
||||
do with it.
|
||||
|
||||
I spent a couple of days trying to think of very short words that began with
|
||||
the letter `r` that were even somewhat related to the task of searching. I
|
||||
don't remember how it popped into my head, but "rip" came up as something that
|
||||
meant "fast," as in, "to rip through your text." The fact that RIP is also
|
||||
an initialism for "Rest in Peace" (as in, "ripgrep kills grep") never really
|
||||
dawned on me. Perhaps the coincidence is too striking to believe that, but
|
||||
I didn't realize it until someone explicitly pointed it out to me after the
|
||||
initial public release. I admit that I found it mildly amusing, but if I had
|
||||
realized it myself before the public release, I probably would have pressed on
|
||||
and chose a different name. Alas, renaming things after a release is hard, so I
|
||||
decided to mush on.
|
||||
|
||||
Given the fact that
|
||||
[ripgrep never was, is or will be a 100% drop-in replacement for
|
||||
grep](#posix4ever),
|
||||
ripgrep is neither actually a "grep killer" nor was it ever intended to be. It
|
||||
certainly does eat into some of its use cases, but that's nothing that other
|
||||
tools like ack or The Silver Searcher weren't already doing.
|
||||
680
GUIDE.md
Normal file
680
GUIDE.md
Normal file
@@ -0,0 +1,680 @@
|
||||
## User Guide
|
||||
|
||||
This guide is intended to give an elementary description of ripgrep and an
|
||||
overview of its capabilities. This guide assumes that ripgrep is
|
||||
[installed](README.md#installation)
|
||||
and that readers have passing familiarity with using command line tools. This
|
||||
also assumes a Unix-like system, although most commands are probably easily
|
||||
translatable to any command line shell environment.
|
||||
|
||||
|
||||
### Table of Contents
|
||||
|
||||
* [Basics](#basics)
|
||||
* [Recursive search](#recursive-search)
|
||||
* [Automatic filtering](#automatic-filtering)
|
||||
* [Manual filtering: globs](#manual-filtering-globs)
|
||||
* [Manual filtering: file types](#manual-filtering-file-types)
|
||||
* [Replacements](#replacements)
|
||||
* [Configuration file](#configuration-file)
|
||||
* [File encoding](#file-encoding)
|
||||
* [Common options](#common-options)
|
||||
|
||||
|
||||
### Basics
|
||||
|
||||
ripgrep is a command line tool that searches your files for patterns that
|
||||
you give it. ripgrep behaves as if reading each file line by line. If a line
|
||||
matches the pattern provided to ripgrep, then that line will be printed. If a
|
||||
line does not match the pattern, then the line is not printed.
|
||||
|
||||
The best way to see how this works is with an example. To show an example, we
|
||||
need something to search. Let's try searching ripgrep's source code. First
|
||||
grab a ripgrep source archive from
|
||||
https://github.com/BurntSushi/ripgrep/archive/0.7.1.zip
|
||||
and extract it:
|
||||
|
||||
```
|
||||
$ curl -LO https://github.com/BurntSushi/ripgrep/archive/0.7.1.zip
|
||||
$ unzip 0.7.1.zip
|
||||
$ cd ripgrep-0.7.1
|
||||
$ ls
|
||||
benchsuite grep tests Cargo.toml LICENSE-MIT
|
||||
ci ignore wincolor CHANGELOG.md README.md
|
||||
complete pkg appveyor.yml compile snapcraft.yaml
|
||||
doc src build.rs COPYING UNLICENSE
|
||||
globset termcolor Cargo.lock HomebrewFormula
|
||||
```
|
||||
|
||||
Let's try our first search by looking for all occurrences of the word `fast`
|
||||
in `README.md`:
|
||||
|
||||
```
|
||||
$ rg fast README.md
|
||||
75: faster than both. (N.B. It is not, strictly speaking, a "drop-in" replacement
|
||||
88: color and full Unicode support. Unlike GNU grep, `ripgrep` stays fast while
|
||||
119:### Is it really faster than everything else?
|
||||
124:Summarizing, `ripgrep` is fast because:
|
||||
129: optimizations to make searching very fast.
|
||||
```
|
||||
|
||||
(**Note:** If you see an error message from ripgrep saying that it didn't
|
||||
search any files, then re-run ripgrep with the `--debug` flag. One likely cause
|
||||
of this is that you have a `*` rule in a `$HOME/.gitignore` file.)
|
||||
|
||||
So what happened here? ripgrep read the contents of `README.md`, and for each
|
||||
line that contained `fast`, ripgrep printed it to your terminal. ripgrep also
|
||||
included the line number for each line by default. If your terminal supports
|
||||
colors, then your output might actually look something like this screenshot:
|
||||
|
||||
[](https://burntsushi.net/stuff/ripgrep-guide-sample.png)
|
||||
|
||||
In this example, we searched for something called a "literal" string. This
|
||||
means that our pattern was just some normal text that we asked ripgrep to
|
||||
find. But ripgrep supports the ability to specify patterns via [regular
|
||||
expressions](https://en.wikipedia.org/wiki/Regular_expression). As an example,
|
||||
what if we wanted to find all lines have a word that contains `fast` followed
|
||||
by some number of other letters?
|
||||
|
||||
```
|
||||
$ rg 'fast\w+' README.md
|
||||
75: faster than both. (N.B. It is not, strictly speaking, a "drop-in" replacement
|
||||
119:### Is it really faster than everything else?
|
||||
```
|
||||
|
||||
In this example, we used the pattern `fast\w+`. This pattern tells ripgrep to
|
||||
look for any lines containing the letters `fast` followed by *one or more*
|
||||
word-like characters. Namely, `\w` matches characters that compose words (like
|
||||
`a` and `L` but unlike `.` and ` `). The `+` after the `\w` means, "match the
|
||||
previous pattern one or more times." This means that the word `fast` won't
|
||||
match because there are no word characters following the final `t`. But a word
|
||||
like `faster` will. `faste` would also match!
|
||||
|
||||
Here's a different variation on this same theme:
|
||||
|
||||
```
|
||||
$ rg 'fast\w*' README.md
|
||||
75: faster than both. (N.B. It is not, strictly speaking, a "drop-in" replacement
|
||||
88: color and full Unicode support. Unlike GNU grep, `ripgrep` stays fast while
|
||||
119:### Is it really faster than everything else?
|
||||
124:Summarizing, `ripgrep` is fast because:
|
||||
129: optimizations to make searching very fast.
|
||||
```
|
||||
|
||||
In this case, we used `fast\w*` for our pattern instead of `fast\w+`. The `*`
|
||||
means that it should match *zero* or more times. In this case, ripgrep will
|
||||
print the same lines as the pattern `fast`, but if your terminal supports
|
||||
colors, you'll notice that `faster` will be highlighted instead of just the
|
||||
`fast` prefix.
|
||||
|
||||
It is beyond the scope of this guide to provide a full tutorial on regular
|
||||
expressions, but ripgrep's specific syntax is documented here:
|
||||
https://docs.rs/regex/0.2.5/regex/#syntax
|
||||
|
||||
|
||||
### Recursive search
|
||||
|
||||
In the previous section, we showed how to use ripgrep to search a single file.
|
||||
In this section, we'll show how to use ripgrep to search an entire directory
|
||||
of files. In fact, *recursively* searching your current working directory is
|
||||
the default mode of operation for ripgrep, which means doing this is very
|
||||
simple.
|
||||
|
||||
Using our unzipped archive of ripgrep source code, here's how to find all
|
||||
function definitions whose name is `write`:
|
||||
|
||||
```
|
||||
$ rg 'fn write\('
|
||||
src/printer.rs
|
||||
469: fn write(&mut self, buf: &[u8]) {
|
||||
|
||||
termcolor/src/lib.rs
|
||||
227: fn write(&mut self, b: &[u8]) -> io::Result<usize> {
|
||||
250: fn write(&mut self, b: &[u8]) -> io::Result<usize> {
|
||||
428: fn write(&mut self, b: &[u8]) -> io::Result<usize> { self.wtr.write(b) }
|
||||
441: fn write(&mut self, b: &[u8]) -> io::Result<usize> { self.wtr.write(b) }
|
||||
454: fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
511: fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
848: fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
915: fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
949: fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
1114: fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
1348: fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
1353: fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
```
|
||||
|
||||
(**Note:** We escape the `(` here because `(` has special significance inside
|
||||
regular expressions. You could also use `rg -F 'fn write('` to achieve the
|
||||
same thing, where `-F` interprets your pattern as a literal string instead of
|
||||
a regular expression.)
|
||||
|
||||
In this example, we didn't specify a file at all. Instead, ripgrep defaulted
|
||||
to searching your current directory in the absence of a path. In general,
|
||||
`rg foo` is equivalent to `rg foo ./`.
|
||||
|
||||
This particular search showed us results in both the `src` and `termcolor`
|
||||
directories. The `src` directory is the core ripgrep code where as `termcolor`
|
||||
is a dependency of ripgrep (and is used by other tools). What if we only wanted
|
||||
to search core ripgrep code? Well, that's easy, just specify the directory you
|
||||
want:
|
||||
|
||||
```
|
||||
$ rg 'fn write\(' src
|
||||
src/printer.rs
|
||||
469: fn write(&mut self, buf: &[u8]) {
|
||||
```
|
||||
|
||||
Here, ripgrep limited its search to the `src` directory. Another way of doing
|
||||
this search would be to `cd` into the `src` directory and simply use `rg 'fn
|
||||
write\('` again.
|
||||
|
||||
|
||||
### Automatic filtering
|
||||
|
||||
After recursive search, ripgrep's most important feature is what it *doesn't*
|
||||
search. By default, when you search a directory, ripgrep will ignore all of
|
||||
the following:
|
||||
|
||||
1. Files and directories that match the rules in your `.gitignore` glob
|
||||
pattern.
|
||||
2. Hidden files and directories.
|
||||
3. Binary files. (ripgrep considers any file with a `NUL` byte to be binary.)
|
||||
4. Symbolic links aren't followed.
|
||||
|
||||
All of these things can be toggled using various flags provided by ripgrep:
|
||||
|
||||
1. You can disable `.gitignore` handling with the `--no-ignore` flag.
|
||||
2. Hidden files and directories can be searched with the `--hidden` flag.
|
||||
3. Binary files can be searched via the `--text` (`-a` for short) flag.
|
||||
Be careful with this flag! Binary files may emit control characters to your
|
||||
terminal, which might cause strange behavior.
|
||||
4. ripgrep can follow symlinks with the `--follow` (`-L` for short) flag.
|
||||
|
||||
As a special convenience, ripgrep also provides a flag called `--unrestricted`
|
||||
(`-u` for short). Repeated uses of this flag will cause ripgrep to disable
|
||||
more and more of its filtering. That is, `-u` will disable `.gitignore`
|
||||
handling, `-uu` will search hidden files and directories and `-uuu` will search
|
||||
binary files. This is useful when you're using ripgrep and you aren't sure
|
||||
whether its filtering is hiding results from you. Tacking on a couple `-u`
|
||||
flags is a quick way to find out. (Use the `--debug` flag if you're still
|
||||
perplexed, and if that doesn't help,
|
||||
[file an issue](https://github.com/BurntSushi/ripgrep/issues/new).)
|
||||
|
||||
ripgrep's `.gitignore` handling actually goes a bit beyond just `.gitignore`
|
||||
files. ripgrep will also respect repository specific rules found in
|
||||
`$GIT_DIR/info/exclude`, as well as any global ignore rules in your
|
||||
`core.excludesFile` (which is usually `$XDG_CONFIG_HOME/git/ignore` on
|
||||
Unix-like systems).
|
||||
|
||||
Sometimes you want to search files that are in your `.gitignore`, so it is
|
||||
possible to specify additional ignore rules or overrides in a `.ignore`
|
||||
(application agnostic) or `.rgignore` (ripgrep specific) file.
|
||||
|
||||
For example, let's say you have a `.gitignore` file that looks like this:
|
||||
|
||||
```
|
||||
log/
|
||||
```
|
||||
|
||||
This generally means that any `log` directory won't be tracked by `git`.
|
||||
However, perhaps it contains useful output that you'd like to include in your
|
||||
searches, but you still don't want to track it in `git`. You can achieve this
|
||||
by creating a `.ignore` file in the same directory as the `.gitignore` file
|
||||
with the following contents:
|
||||
|
||||
```
|
||||
!log/
|
||||
```
|
||||
|
||||
ripgrep treats `.ignore` files with higher precedence than `.gitignore` files
|
||||
(and treats `.rgignore` files with higher precdence than `.ignore` files).
|
||||
This means ripgrep will see the `!log/` whitelist rule first and search that
|
||||
directory.
|
||||
|
||||
Like `.gitignore`, a `.ignore` file can be placed in any directory. Its rules
|
||||
will be processed with respect to the directory it resides in, just like
|
||||
`.gitignore`.
|
||||
|
||||
For a more in depth description of how glob patterns in a `.gitignore` file
|
||||
are interpreted, please see `man gitignore`.
|
||||
|
||||
|
||||
### Manual filtering: globs
|
||||
|
||||
In the previous section, we talked about ripgrep's filtering that it does by
|
||||
default. It is "automatic" because it reacts to your environment. That is, it
|
||||
uses already existing `.gitignore` files to produce more relevant search
|
||||
results.
|
||||
|
||||
In addition to automatic filtering, ripgrep also provides more manual or ad hoc
|
||||
filtering. This comes in two varieties: additional glob patterns specified in
|
||||
your ripgrep commands and file type filtering. This section covers glob
|
||||
patterns while the next section covers file type filtering.
|
||||
|
||||
In our ripgrep source code (see [Basics](#basics) for instructions on how to
|
||||
get a source archive to search), let's say we wanted to see which things depend
|
||||
on `clap`, our argument parser.
|
||||
|
||||
We could do this:
|
||||
|
||||
```
|
||||
$ rg clap
|
||||
[lots of results]
|
||||
```
|
||||
|
||||
But this shows us many things, and we're only interested in where we wrote
|
||||
`clap` as a dependency. Instead, we could limit ourselves to TOML files, which
|
||||
is how dependencies are communicated to Rust's build tool, Cargo:
|
||||
|
||||
```
|
||||
$ rg clap -g '*.toml'
|
||||
Cargo.toml
|
||||
35:clap = "2.26"
|
||||
51:clap = "2.26"
|
||||
```
|
||||
|
||||
The `-g '*.toml'` syntax says, "make sure every file searched matches this
|
||||
glob pattern." Note that we put `'*.toml'` in single quotes to prevent our
|
||||
shell from expanding the `*`.
|
||||
|
||||
If we wanted, we could tell ripgrep to search anything *but* `*.toml` files:
|
||||
|
||||
```
|
||||
$ rg clap -g '!*.toml'
|
||||
[lots of results]
|
||||
```
|
||||
|
||||
This will give you a lot of results again as above, but they won't include
|
||||
files ending with `.toml`. Note that the use of a `!` here to mean "negation"
|
||||
is a bit non-standard, but it was chosen to be consistent with how globs in
|
||||
`.gitignore` files are written. (Although, the meaning is reversed. In
|
||||
`.gitignore` files, a `!` prefix means whitelist, and on the command line, a
|
||||
`!` means blacklist.)
|
||||
|
||||
Globs are interpreted in exactly the same way as `.gitignore` patterns. That
|
||||
is, later globs will override earlier globs. For example, the following command
|
||||
will search only `*.toml` files:
|
||||
|
||||
```
|
||||
$ rg clap -g '!*.toml' -g '*.toml'
|
||||
```
|
||||
|
||||
Interestingly, reversing the order of the globs in this case will match
|
||||
nothing, since the presence of at least one non-blacklist glob will institute a
|
||||
requirement that every file searched must match at least one glob. In this
|
||||
case, the blacklist glob takes precedence over the previous glob and prevents
|
||||
any file from being searched at all!
|
||||
|
||||
|
||||
### Manual filtering: file types
|
||||
|
||||
Over time, you might notice that you use the same glob patterns over and over.
|
||||
For example, you might find yourself doing a lot of searches where you only
|
||||
want to see results for Rust files:
|
||||
|
||||
```
|
||||
$ rg 'fn run' -g '*.rs'
|
||||
```
|
||||
|
||||
Instead of writing out the glob every time, you can use ripgrep's support for
|
||||
file types:
|
||||
|
||||
```
|
||||
$ rg 'fn run' --type rust
|
||||
```
|
||||
|
||||
or, more succinctly,
|
||||
|
||||
```
|
||||
$ rg 'fn run' -trust
|
||||
```
|
||||
|
||||
The way the `--type` flag functions is simple. It acts as a name that is
|
||||
assigned to one or more globs that match the relevant files. This lets you
|
||||
write a single type that might encompass a broad range of file extensions. For
|
||||
example, if you wanted to search C files, you'd have to check both C source
|
||||
files and C header files:
|
||||
|
||||
```
|
||||
$ rg 'int main' -g '*.{c,h}'
|
||||
```
|
||||
|
||||
or you could just use the C file type:
|
||||
|
||||
```
|
||||
$ rg 'int main' -tc
|
||||
```
|
||||
|
||||
Just as you can write blacklist globs, you can blacklist file types too:
|
||||
|
||||
```
|
||||
$ rg clap --type-not rust
|
||||
```
|
||||
|
||||
or, more succinctly,
|
||||
|
||||
```
|
||||
$ rg clap -Trust
|
||||
```
|
||||
|
||||
That is, `-t` means "include files of this type" where as `-T` means "exclude
|
||||
files of this type."
|
||||
|
||||
To see the globs that make up a type, run `rg --type-list`:
|
||||
|
||||
```
|
||||
$ rg --type-list | rg '^make:'
|
||||
make: *.mak, *.mk, GNUmakefile, Gnumakefile, Makefile, gnumakefile, makefile
|
||||
```
|
||||
|
||||
By default, ripgrep comes with a bunch of pre-defined types. Generally, these
|
||||
types correspond to well known public formats. But you can define your own
|
||||
types as well. For example, perhaps you frequently search "web" files, which
|
||||
consist of Javascript, HTML and CSS:
|
||||
|
||||
```
|
||||
$ rg --type-add 'web:*.html' --type-add 'web:*.css' --type-add 'web:*.js' -tweb title
|
||||
```
|
||||
|
||||
or, more succinctly,
|
||||
|
||||
```
|
||||
$ rg --type-add 'web:*.{html,css,js}' -tweb title
|
||||
```
|
||||
|
||||
The above command defines a new type, `web`, corresponding to the glob
|
||||
`*.{html,css,js}`. It then applies the new filter with `-tweb` and searches for
|
||||
the pattern `title`. If you ran
|
||||
|
||||
```
|
||||
$ rg --type-add 'web:*.{html,css,js}' --type-list
|
||||
```
|
||||
|
||||
Then you would see your `web` type show up in the list, even though it is not
|
||||
part of ripgrep's built-in types.
|
||||
|
||||
It is important to stress here that the `--type-add` flag only applies to the
|
||||
current command. It does not add a new file type and save it somewhere in a
|
||||
persistent form. If you want a type to be available in every ripgrep command,
|
||||
then you should either create a shell alias:
|
||||
|
||||
```
|
||||
alias rg="rg --type-add 'web:*.{html,css,js}'"
|
||||
```
|
||||
|
||||
or add `--type-add=web:*.{html,css,js}` to your ripgrep configuration file.
|
||||
([Configuration files](#configuration-file) are covered in more detail later.)
|
||||
|
||||
|
||||
### Replacements
|
||||
|
||||
ripgrep provides a limited ability to modify its output by replacing matched
|
||||
text with some other text. This is easiest to explain with an example. Remember
|
||||
when we searched for the word `fast` in ripgrep's README?
|
||||
|
||||
```
|
||||
$ rg fast README.md
|
||||
75: faster than both. (N.B. It is not, strictly speaking, a "drop-in" replacement
|
||||
88: color and full Unicode support. Unlike GNU grep, `ripgrep` stays fast while
|
||||
119:### Is it really faster than everything else?
|
||||
124:Summarizing, `ripgrep` is fast because:
|
||||
129: optimizations to make searching very fast.
|
||||
```
|
||||
|
||||
What if we wanted to *replace* all occurrences of `fast` with `FAST`? That's
|
||||
easy with ripgrep's `--replace` flag:
|
||||
|
||||
```
|
||||
$ rg fast README.md --replace FAST
|
||||
75: FASTer than both. (N.B. It is not, strictly speaking, a "drop-in" replacement
|
||||
88: color and full Unicode support. Unlike GNU grep, `ripgrep` stays FAST while
|
||||
119:### Is it really FASTer than everything else?
|
||||
124:Summarizing, `ripgrep` is FAST because:
|
||||
129: optimizations to make searching very FAST.
|
||||
```
|
||||
|
||||
or, more succinctly,
|
||||
|
||||
```
|
||||
$ rg fast README.md -r FAST
|
||||
[snip]
|
||||
```
|
||||
|
||||
In essence, the `--replace` flag applies *only* to the matching portion of text
|
||||
in the output. If you instead wanted to replace an entire line of text, then
|
||||
you need to include the entire line in your match. For example:
|
||||
|
||||
```
|
||||
$ rg '^.*fast.*$' README.md -r FAST
|
||||
75:FAST
|
||||
88:FAST
|
||||
119:FAST
|
||||
124:FAST
|
||||
129:FAST
|
||||
```
|
||||
|
||||
Alternatively, you can combine the `--only-matching` (or `-o` for short) with
|
||||
the `--replace` flag to achieve the same result:
|
||||
|
||||
```
|
||||
$ rg fast README.md --only-matching --replace FAST
|
||||
75:FAST
|
||||
88:FAST
|
||||
119:FAST
|
||||
124:FAST
|
||||
129:FAST
|
||||
```
|
||||
|
||||
or, more succinctly,
|
||||
|
||||
```
|
||||
$ rg fast README.md -or FAST
|
||||
[snip]
|
||||
```
|
||||
|
||||
Finally, replacements can include capturing groups. For example, let's say
|
||||
we wanted to find all occurrences of `fast` followed by another word and
|
||||
join them together with a dash. The pattern we might use for that is
|
||||
`fast\s+(\w+)`, which matches `fast`, followed by any amount of whitespace,
|
||||
followed by any number of "word" characters. We put the `\w+` in a "capturing
|
||||
group" (indicated by parentheses) so that we can reference it later in our
|
||||
replacement string. For example:
|
||||
|
||||
```
|
||||
$ rg 'fast\s+(\w+)' README.md -r 'fast-$1'
|
||||
88: color and full Unicode support. Unlike GNU grep, `ripgrep` stays fast-while
|
||||
124:Summarizing, `ripgrep` is fast-because:
|
||||
```
|
||||
|
||||
Our replacement string here, `fast-$1`, consists of `fast-` followed by the
|
||||
contents of the capturing group at index `1`. (Capturing groups actually start
|
||||
at index 0, but the `0`th capturing group always corresponds to the entire
|
||||
match. The capturing group at index `1` always corresponds to the first
|
||||
explicit capturing group found in the regex pattern.)
|
||||
|
||||
Capturing groups can also be named, which is sometimes more convenient than
|
||||
using the indices. For example, the following command is equivalent to the
|
||||
above command:
|
||||
|
||||
```
|
||||
$ rg 'fast\s+(?P<word>\w+)' README.md -r 'fast-$word'
|
||||
88: color and full Unicode support. Unlike GNU grep, `ripgrep` stays fast-while
|
||||
124:Summarizing, `ripgrep` is fast-because:
|
||||
```
|
||||
|
||||
It is important to note that ripgrep **will never modify your files**. The
|
||||
`--replace` flag only controls ripgrep's output. (And there is no flag to let
|
||||
you do a replacement in a file.)
|
||||
|
||||
|
||||
### Configuration file
|
||||
|
||||
It is possible that ripgrep's default options aren't suitable in every case.
|
||||
For that reason, and because shell aliases aren't always convenient, ripgrep
|
||||
supports configuration files.
|
||||
|
||||
Setting up a configuration file is simple. ripgrep will not look in any
|
||||
predetermined directory for a config file automatically. Instead, you need to
|
||||
set the `RIPGREP_CONFIG_PATH` environment variable to the file path of your
|
||||
config file. Once the environment variable is set, open the file and just type
|
||||
in the flags you want set automatically. There are only two rules for
|
||||
describing the format of the config file:
|
||||
|
||||
1. Every line is a shell argument, after trimming ASCII whitespace.
|
||||
2. Lines starting with `#` (optionally preceded by any amount of
|
||||
ASCII whitespace) are ignored.
|
||||
|
||||
In particular, there is no escaping. Each line is given to ripgrep as a single
|
||||
command line argument verbatim.
|
||||
|
||||
Here's an example of a configuration file, which demonstrates some of the
|
||||
formatting peculiarities:
|
||||
|
||||
```
|
||||
$ cat $HOME/.ripgreprc
|
||||
# Don't let ripgrep vomit really long lines to my terminal.
|
||||
--max-columns=150
|
||||
|
||||
# Add my 'web' type.
|
||||
--type-add
|
||||
web:*.{html,css,js}*
|
||||
|
||||
# Set the colors.
|
||||
--colors=line:none
|
||||
--colors=line:style:bold
|
||||
|
||||
# Because who cares about case!?
|
||||
--smart-case
|
||||
```
|
||||
|
||||
When we use a flag that has a value, we either put the flag and the value on
|
||||
the same line but delimited by an `=` sign (e.g., `--max-columns=150`), or we
|
||||
put the flag and the value on two different lines. This is because ripgrep's
|
||||
argument parser knows to treat the single argument `--max-columns=150` as a
|
||||
flag with a value, but if we had written `--max-columns 150` in our
|
||||
configuration file, then ripgrep's argument parser wouldn't know what to do
|
||||
with it.
|
||||
|
||||
Putting the flag and value on different lines is exactly equivalent and is a
|
||||
matter of style.
|
||||
|
||||
Comments are encouraged so that you remember what the config is doing. Empty
|
||||
lines are OK too.
|
||||
|
||||
So let's say you're using the above configuration file, but while you're at a
|
||||
terminal, you really want to be able to see lines longer than 150 columns. What
|
||||
do you do? Thankfully, all you need to do is pass `--max-columns 0` (or `-M0`
|
||||
for short) on the command line, which will override your configuration file's
|
||||
setting. This works because ripgrep's configuration file is *prepended* to the
|
||||
explicit arguments you give it on the command line. Since flags given later
|
||||
override flags given earlier, everything works as expected. This works for most
|
||||
other flags as well, and each flag's documentation states which other flags
|
||||
override it.
|
||||
|
||||
If you're confused about what configuration file ripgrep is reading arguments
|
||||
from, then running ripgrep with the `--debug` flag should help clarify things.
|
||||
The debug output should note what config file is being loaded and the arugments
|
||||
that have been read from the configuration.
|
||||
|
||||
Finally, if you want to make absolutely sure that ripgrep *isn't* reading a
|
||||
configuration file, then you can pass the `--no-config` flag, which will always
|
||||
prevent ripgrep from reading extraneous configuration from the environment,
|
||||
regardless of what other methods of configuration are added to ripgrep in the
|
||||
future.
|
||||
|
||||
|
||||
### File encoding
|
||||
|
||||
[Text encoding](https://en.wikipedia.org/wiki/Character_encoding) is a complex
|
||||
topic, but we can try to summarize its relevancy to ripgrep:
|
||||
|
||||
* Files are generally just a bundle of bytes. There is no reliable way to know
|
||||
their encoding.
|
||||
* Either the encoding of the pattern must match the encoding of the files being
|
||||
searched, or a form of transcoding must be performed converts either the
|
||||
pattern or the file to the same encoding as the other.
|
||||
* ripgrep tends to work best on plain text files, and among plain text files,
|
||||
the most popular encodings likely consist of ASCII, latin1 or UTF-8. As
|
||||
a special exception, UTF-16 is prevalent in Windows environments
|
||||
|
||||
In light of the above, here is how ripgrep behaves:
|
||||
|
||||
* All input is assumed to be ASCII compatible (which means every byte that
|
||||
corresponds to an ASCII codepoint actually is an ASCII codepoint). This
|
||||
includes ASCII itself, latin1 and UTF-8.
|
||||
* ripgrep works best with UTF-8. For example, ripgrep's regular expression
|
||||
engine supports Unicode features. Namely, character classes like `\w` will
|
||||
match all word characters by Unicode's definition and `.` will match any
|
||||
Unicode codepoint instead of any byte. These constructions assume UTF-8,
|
||||
so they simply won't match when they come across bytes in a file that aren't
|
||||
UTF-8.
|
||||
* To handle the UTF-16 case, ripgrep will do something called "BOM sniffing"
|
||||
by default. That is, the first three bytes of a file will be read, and if
|
||||
they correspond to a UTF-16 BOM, then ripgrep will transcode the contents of
|
||||
the file from UTF-16 to UTF-8, and then execute the search on the transcoded
|
||||
version of the file. (This incurs a performance penalty since transcoding
|
||||
is slower than regex searching.)
|
||||
* To handle other cases, ripgrep provides a `-E/--encoding` flag, which permits
|
||||
you to specify an encoding from the
|
||||
[Encoding Standard](https://encoding.spec.whatwg.org/#concept-encoding-get).
|
||||
ripgrep will assume *all* files searched are the encoding specified and
|
||||
will perform a transcoding step just like in the UTF-16 case described above.
|
||||
|
||||
By default, ripgrep will not require its input be valid UTF-8. That is, ripgrep
|
||||
can and will search arbitrary bytes. The key here is that if you're searching
|
||||
content that isn't UTF-8, then the usefulness of your pattern will degrade. If
|
||||
you're searching bytes that aren't ASCII compatible, then it's likely the
|
||||
pattern won't find anything. With all that said, this mode of operation is
|
||||
important, because it lets you find ASCII or UTF-8 *within* files that are
|
||||
otherwise arbitrary bytes.
|
||||
|
||||
Finally, it is possible to disable ripgrep's Unicode support from within the
|
||||
pattern regular expression. For example, let's say you wanted `.` to match any
|
||||
byte rather than any Unicode codepoint. (You might want this while searching a
|
||||
binary file, since `.` by default will not match invalid UTF-8.) You could do
|
||||
this by disabling Unicode via a regular expression flag:
|
||||
|
||||
```
|
||||
$ rg '(?-u:.)'
|
||||
```
|
||||
|
||||
This works for any part of the pattern. For example, the following will find
|
||||
any Unicode word character followed by any ASCII word character followed by
|
||||
another Unicode word character:
|
||||
|
||||
```
|
||||
$ rg '\w(?-u:\w)\w'
|
||||
```
|
||||
|
||||
|
||||
### Common options
|
||||
|
||||
ripgrep has a lot of flags. Too many to keep in your head at once. This section
|
||||
is intended to give you a sampling of some of the most important and frequently
|
||||
used options that will likely impact how you use ripgrep on a regular basis.
|
||||
|
||||
* `-h`: Show ripgrep's condensed help output.
|
||||
* `--help`: Show ripgrep's longer form help output. (Nearly what you'd find in
|
||||
ripgrep's man page, so pipe it into a pager!)
|
||||
* `-i/--ignore-case`: When searching for a pattern, ignore case differences.
|
||||
That is `rg -i fast` matches `fast`, `fASt`, `FAST`, etc.
|
||||
* `-S/--smart-case`: This is similar to `--ignore-case`, but disables itself
|
||||
if the pattern contains any uppercase letters. Usually this flag is put into
|
||||
alias or a config file.
|
||||
* `-w/--word-regexp`: Require that all matches of the pattern be surrounded
|
||||
by word boundaries. That is, given `pattern`, the `--word-regexp` flag will
|
||||
cause ripgrep to behave as if `pattern` were actually `\b(?:pattern)\b`.
|
||||
* `-c/--count`: Report a count of total matched lines.
|
||||
* `--files`: Print the files that ripgrep *would* search, but don't actually
|
||||
search them.
|
||||
* `-a/--text`: Search binary files as if they were plain text.
|
||||
* `-z/--search-zip`: Search compressed files (gzip, bzip2, lzma, xz). This is
|
||||
disabled by default.
|
||||
* `-C/--context`: Show the lines surrounding a match.
|
||||
* `--sort-files`: Force ripgrep to sort its output by file name. (This disables
|
||||
parallelism, so it might be slower.)
|
||||
* `-L/--follow`: Follow symbolic links while recursively searching.
|
||||
* `-M/--max-columns`: Limit the length of lines printed by ripgrep.
|
||||
* `--debug`: Shows ripgrep's debug output. This is useful for understanding
|
||||
why a particular file might be ignored from search, or what kinds of
|
||||
configuration ripgrep is loading from the environment.
|
||||
47
ISSUE_TEMPLATE.md
Normal file
47
ISSUE_TEMPLATE.md
Normal file
@@ -0,0 +1,47 @@
|
||||
#### What version of ripgrep are you using?
|
||||
|
||||
Replace this text with the output of `rg --version`.
|
||||
|
||||
#### What operating system are you using ripgrep on?
|
||||
|
||||
Replace this text with your operating system and version.
|
||||
|
||||
#### Describe your question, feature request, or bug.
|
||||
|
||||
If a question, please describe the problem you're trying to solve and give
|
||||
as much context as possible.
|
||||
|
||||
If a feature request, please describe the behavior you want and the motivation.
|
||||
Please also provide an example of how ripgrep would be used if your feature
|
||||
request were added.
|
||||
|
||||
If a bug, please see below.
|
||||
|
||||
#### If this is a bug, what are the steps to reproduce the behavior?
|
||||
|
||||
If possible, please include both your search patterns and the corpus on which
|
||||
you are searching. Unless the bug is very obvious, then it is unlikely that it
|
||||
will be fixed if the ripgrep maintainers cannot reproduce it.
|
||||
|
||||
If the corpus is too big and you cannot decrease its size, file the bug anyway
|
||||
and the ripgrep maintainers will help figure out next steps.
|
||||
|
||||
#### If this is a bug, what is the actual behavior?
|
||||
|
||||
Show the command you ran and the actual output. Include the `--debug` flag in
|
||||
your invocation of ripgrep.
|
||||
|
||||
If the output is large, put it in a gist: https://gist.github.com/
|
||||
|
||||
If the output is small, put it in code fences:
|
||||
|
||||
```
|
||||
your
|
||||
output
|
||||
goes
|
||||
here
|
||||
```
|
||||
|
||||
#### If this is a bug, what is the expected behavior?
|
||||
|
||||
What do you think ripgrep should have done?
|
||||
384
README.md
384
README.md
@@ -1,11 +1,11 @@
|
||||
ripgrep (rg)
|
||||
------------
|
||||
`ripgrep` is a line oriented search tool that combines the usability of The
|
||||
Silver Searcher (similar to `ack`) with the raw speed of GNU grep. `ripgrep`
|
||||
works by recursively searching your current directory for a regex pattern.
|
||||
`ripgrep` has first class support on Windows, Mac and Linux, with binary
|
||||
downloads available for
|
||||
[every release](https://github.com/BurntSushi/ripgrep/releases).
|
||||
ripgrep is a line-oriented search tool that recursively searches your current
|
||||
directory for a regex pattern while respecting your gitignore rules. ripgrep
|
||||
has first class support on Windows, macOS and Linux, with binary downloads
|
||||
available for [every release](https://github.com/BurntSushi/ripgrep/releases).
|
||||
ripgrep is similar to other popular search tools like The Silver Searcher,
|
||||
ack and grep.
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/ripgrep)
|
||||
[](https://ci.appveyor.com/project/BurntSushi/ripgrep)
|
||||
@@ -13,115 +13,131 @@ downloads available for
|
||||
|
||||
Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org).
|
||||
|
||||
|
||||
### CHANGELOG
|
||||
|
||||
Please see the [CHANGELOG](CHANGELOG.md) for a release history.
|
||||
|
||||
### Documentation quick links
|
||||
|
||||
* [Installation](#installation)
|
||||
* [User Guide](GUIDE.md)
|
||||
* [Frequently Asked Questions](FAQ.md)
|
||||
* [Regex syntax](https://docs.rs/regex/0.2.5/regex/#syntax)
|
||||
* [Configuration files](GUIDE.md#configuration-file)
|
||||
* [Shell completions](FAQ.md#complete)
|
||||
* [Building](#building)
|
||||
|
||||
|
||||
### Screenshot of search results
|
||||
|
||||
[](http://burntsushi.net/stuff/ripgrep1.png)
|
||||
|
||||
|
||||
### Quick examples comparing tools
|
||||
|
||||
This example searches the entire Linux kernel source tree (after running
|
||||
`make defconfig && make -j8`) for `[A-Z]+_SUSPEND`, where all matches must be
|
||||
words. Timings were collected on a system with an Intel i7-6900K 3.2 GHz, and
|
||||
ripgrep was compiled using the `compile` script in this repo.
|
||||
ripgrep was compiled with SIMD enabled.
|
||||
|
||||
Please remember that a single benchmark is never enough! See my
|
||||
[blog post on `ripgrep`](http://blog.burntsushi.net/ripgrep/)
|
||||
[blog post on ripgrep](http://blog.burntsushi.net/ripgrep/)
|
||||
for a very detailed comparison with more benchmarks and analysis.
|
||||
|
||||
| Tool | Command | Line count | Time |
|
||||
| ---- | ------- | ---------- | ---- |
|
||||
| ripgrep (Unicode) | `rg -n -w '[A-Z]+_SUSPEND'` | 450 | **0.134s** |
|
||||
| [The Silver Searcher](https://github.com/ggreer/the_silver_searcher) | `ag -w '[A-Z]+_SUSPEND'` | 450 | 0.753s |
|
||||
| [git grep](https://www.kernel.org/pub/software/scm/git/docs/git-grep.html) | `LC_ALL=C git grep -E -n -w '[A-Z]+_SUSPEND'` | 450 | 0.823s |
|
||||
| [git grep (Unicode)](https://www.kernel.org/pub/software/scm/git/docs/git-grep.html) | `LC_ALL=en_US.UTF-8 git grep -E -n -w '[A-Z]+_SUSPEND'` | 450 | 2.880s |
|
||||
| [sift](https://github.com/svent/sift) | `sift --git -n -w '[A-Z]+_SUSPEND'` | 450 | 3.656s |
|
||||
| [The Platinum Searcher](https://github.com/monochromegane/the_platinum_searcher) | `pt -w -e '[A-Z]+_SUSPEND'` | 450 | 12.369s |
|
||||
| [ack](https://github.com/petdance/ack2) | `ack -w '[A-Z]+_SUSPEND'` | 1878 | 16.952s |
|
||||
| ripgrep (Unicode) | `rg -n -w '[A-Z]+_SUSPEND'` | 450 | **0.106s** |
|
||||
| [git grep](https://www.kernel.org/pub/software/scm/git/docs/git-grep.html) | `LC_ALL=C git grep -E -n -w '[A-Z]+_SUSPEND'` | 450 | 0.553s |
|
||||
| [The Silver Searcher](https://github.com/ggreer/the_silver_searcher) | `ag -w '[A-Z]+_SUSPEND'` | 450 | 0.589s |
|
||||
| [git grep (Unicode)](https://www.kernel.org/pub/software/scm/git/docs/git-grep.html) | `LC_ALL=en_US.UTF-8 git grep -E -n -w '[A-Z]+_SUSPEND'` | 450 | 2.266s |
|
||||
| [sift](https://github.com/svent/sift) | `sift --git -n -w '[A-Z]+_SUSPEND'` | 450 | 3.505s |
|
||||
| [ack](https://github.com/petdance/ack2) | `ack -w '[A-Z]+_SUSPEND'` | 1878 | 6.823s |
|
||||
| [The Platinum Searcher](https://github.com/monochromegane/the_platinum_searcher) | `pt -w -e '[A-Z]+_SUSPEND'` | 450 | 14.208s |
|
||||
|
||||
(Yes, `ack` [has](https://github.com/petdance/ack2/issues/445) a
|
||||
[bug](https://github.com/petdance/ack2/issues/14).)
|
||||
|
||||
Here's another benchmark that disregards gitignore files and searches with a
|
||||
whitelist instead. The corpus is the same as in the previous benchmark, and the
|
||||
flags passed to each command ensures that they are doing equivalent work:
|
||||
flags passed to each command ensure that they are doing equivalent work:
|
||||
|
||||
| Tool | Command | Line count | Time |
|
||||
| ---- | ------- | ---------- | ---- |
|
||||
| ripgrep | `rg -L -u -tc -n -w '[A-Z]+_SUSPEND'` | 404 | **0.108s** |
|
||||
| [ucg](https://github.com/gvansickle/ucg) | `ucg --type=cc -w '[A-Z]+_SUSPEND'` | 392 | 0.219s |
|
||||
| [GNU grep](https://www.gnu.org/software/grep/) | `egrep -R -n --include='*.c' --include='*.h' -w '[A-Z]+_SUSPEND'` | 404 | 0.733s |
|
||||
| ripgrep | `rg -L -u -tc -n -w '[A-Z]+_SUSPEND'` | 404 | **0.079s** |
|
||||
| [ucg](https://github.com/gvansickle/ucg) | `ucg --type=cc -w '[A-Z]+_SUSPEND'` | 390 | 0.163s |
|
||||
| [GNU grep](https://www.gnu.org/software/grep/) | `egrep -R -n --include='*.c' --include='*.h' -w '[A-Z]+_SUSPEND'` | 404 | 0.611s |
|
||||
|
||||
(`ucg` [has slightly different behavior in the presence of symbolic links](https://github.com/gvansickle/ucg/issues/106).)
|
||||
|
||||
And finally, a straight up comparison between ripgrep and GNU grep on a single
|
||||
And finally, a straight-up comparison between ripgrep and GNU grep on a single
|
||||
large file (~9.3GB,
|
||||
[`OpenSubtitles2016.raw.en.gz`](http://opus.lingfil.uu.se/OpenSubtitles2016/mono/OpenSubtitles2016.raw.en.gz)):
|
||||
|
||||
| Tool | Command | Line count | Time |
|
||||
| ---- | ------- | ---------- | ---- |
|
||||
| ripgrep | `rg -w 'Sherlock [A-Z]\w+'` | 5268 | **2.520s** |
|
||||
| [GNU grep](https://www.gnu.org/software/grep/) | `LC_ALL=C egrep -w 'Sherlock [A-Z]\w+'` | 5268 | 7.143s |
|
||||
| ripgrep | `rg -w 'Sherlock [A-Z]\w+'` | 5268 | **2.108s** |
|
||||
| [GNU grep](https://www.gnu.org/software/grep/) | `LC_ALL=C egrep -w 'Sherlock [A-Z]\w+'` | 5268 | 7.014s |
|
||||
|
||||
In the above benchmark, passing the `-n` flag (for showing line numbers)
|
||||
increases the times to `3.081s` for ripgrep and `11.403s` for GNU grep.
|
||||
increases the times to `2.640s` for ripgrep and `10.277s` for GNU grep.
|
||||
|
||||
### Why should I use `ripgrep`?
|
||||
|
||||
* It can replace both The Silver Searcher and GNU grep because it is faster
|
||||
than both. (N.B. It is not, strictly speaking, a "drop-in" replacement for
|
||||
both, but the feature sets are far more similar than different.)
|
||||
* Like The Silver Searcher, `ripgrep` defaults to recursive directory search
|
||||
### Why should I use ripgrep?
|
||||
|
||||
* It can replace many use cases served by both The Silver Searcher and GNU grep
|
||||
because it is generally faster than both. (See [the FAQ](FAQ.md#posix4ever)
|
||||
for more details on whether ripgrep can truly replace grep.)
|
||||
* Like The Silver Searcher, ripgrep defaults to recursive directory search
|
||||
and won't search files ignored by your `.gitignore` files. It also ignores
|
||||
hidden and binary files by default. `ripgrep` also implements full support
|
||||
for `.gitignore`, where as there are many bugs related to that functionality
|
||||
hidden and binary files by default. ripgrep also implements full support
|
||||
for `.gitignore`, whereas there are many bugs related to that functionality
|
||||
in The Silver Searcher.
|
||||
* `ripgrep` can search specific types of files. For example, `rg -tpy foo`
|
||||
* ripgrep can search specific types of files. For example, `rg -tpy foo`
|
||||
limits your search to Python files and `rg -Tjs foo` excludes Javascript
|
||||
files from your search. `ripgrep` can be taught about new file types with
|
||||
files from your search. ripgrep can be taught about new file types with
|
||||
custom matching rules.
|
||||
* `ripgrep` supports many features found in `grep`, such as showing the context
|
||||
* ripgrep supports many features found in `grep`, such as showing the context
|
||||
of search results, searching multiple patterns, highlighting matches with
|
||||
color and full Unicode support. Unlike GNU grep, `ripgrep` stays fast while
|
||||
color and full Unicode support. Unlike GNU grep, ripgrep stays fast while
|
||||
supporting Unicode (which is always on).
|
||||
* `ripgrep` supports searching files in text encodings other than UTF-8, such
|
||||
* ripgrep supports searching files in text encodings other than UTF-8, such
|
||||
as UTF-16, latin-1, GBK, EUC-JP, Shift_JIS and more. (Some support for
|
||||
automatically detecting UTF-16 is provided. Other text encodings must be
|
||||
specifically specified with the `-E/--encoding` flag.)
|
||||
* ripgrep supports searching files compressed in a common format (gzip, xz,
|
||||
lzma or bzip2 current) with the `-z/--search-zip` flag.
|
||||
|
||||
In other words, use `ripgrep` if you like speed, filtering by default, fewer
|
||||
bugs and Unicode support.
|
||||
In other words, use ripgrep if you like speed, filtering by default, fewer
|
||||
bugs, and Unicode support.
|
||||
|
||||
### Why shouldn't I use `ripgrep`?
|
||||
|
||||
I'd like to try to convince you why you *shouldn't* use `ripgrep`. This should
|
||||
### Why shouldn't I use ripgrep?
|
||||
|
||||
I'd like to try to convince you why you *shouldn't* use ripgrep. This should
|
||||
give you a glimpse at some important downsides or missing features of
|
||||
`ripgrep`.
|
||||
ripgrep.
|
||||
|
||||
* `ripgrep` uses a regex engine based on finite automata, so if you want fancy
|
||||
regex features such as backreferences or look around, `ripgrep` won't give
|
||||
them to you. `ripgrep` does support lots of things though, including, but not
|
||||
* ripgrep uses a regex engine based on finite automata, so if you want fancy
|
||||
regex features such as backreferences or lookaround, ripgrep won't provide
|
||||
them to you. ripgrep does support lots of things though, including, but not
|
||||
limited to: lazy quantification (e.g., `a+?`), repetitions (e.g., `a{2,5}`),
|
||||
begin/end assertions (e.g., `^\w+$`), word boundaries (e.g., `\bfoo\b`), and
|
||||
support for Unicode categories (e.g., `\p{Sc}` to match currency symbols or
|
||||
`\p{Lu}` to match any uppercase letter). (Fancier regexes will never be
|
||||
supported.)
|
||||
* `ripgrep` doesn't yet support searching compressed files. (Likely to be
|
||||
supported in the future.)
|
||||
* `ripgrep` doesn't have multiline search. (Unlikely to ever be supported.)
|
||||
* ripgrep doesn't have multiline search. (Will happen as an opt-in feature.)
|
||||
|
||||
In other words, if you like fancy regexes or multiline search, then ripgrep
|
||||
may not quite meet your needs (yet).
|
||||
|
||||
In other words, if you like fancy regexes, searching compressed files or
|
||||
multiline search, then `ripgrep` may not quite meet your needs (yet).
|
||||
|
||||
### Is it really faster than everything else?
|
||||
|
||||
Yes. A large number of benchmarks with detailed analysis for each is
|
||||
Generally, yes. A large number of benchmarks with detailed analysis for each is
|
||||
[available on my blog](http://blog.burntsushi.net/ripgrep/).
|
||||
|
||||
Summarizing, `ripgrep` is fast because:
|
||||
Summarizing, ripgrep is fast because:
|
||||
|
||||
* It is built on top of
|
||||
[Rust's regex engine](https://github.com/rust-lang-nursery/regex).
|
||||
@@ -132,7 +148,7 @@ Summarizing, `ripgrep` is fast because:
|
||||
engine.
|
||||
* It supports searching with either memory maps or by searching incrementally
|
||||
with an intermediate buffer. The former is better for single files and the
|
||||
latter is better for large directories. `ripgrep` chooses the best searching
|
||||
latter is better for large directories. ripgrep chooses the best searching
|
||||
strategy for you automatically.
|
||||
* Applies your ignore patterns in `.gitignore` files using a
|
||||
[`RegexSet`](https://doc.rust-lang.org/regex/regex/struct.RegexSet.html).
|
||||
@@ -142,19 +158,31 @@ Summarizing, `ripgrep` is fast because:
|
||||
[`crossbeam`](https://docs.rs/crossbeam) and
|
||||
[`ignore`](https://docs.rs/ignore).
|
||||
|
||||
|
||||
### Feature comparison
|
||||
|
||||
Andy Lester, author of [ack](https://beyondgrep.com/), has published an
|
||||
excellent table comparing the features of ack, ag, git-grep, GNU grep and
|
||||
ripgrep: https://beyondgrep.com/feature-comparison/
|
||||
|
||||
|
||||
### Installation
|
||||
|
||||
The binary name for `ripgrep` is `rg`.
|
||||
The binary name for ripgrep is `rg`.
|
||||
|
||||
[Binaries for `ripgrep` are available for Windows, Mac and
|
||||
Linux.](https://github.com/BurntSushi/ripgrep/releases) Linux binaries are
|
||||
static executables. Windows binaries are available either as built with MinGW
|
||||
(GNU) or with Microsoft Visual C++ (MSVC). When possible, prefer MSVC over GNU,
|
||||
but you'll need to have the
|
||||
[Microsoft VC++ 2015 redistributable](https://www.microsoft.com/en-us/download/details.aspx?id=48145)
|
||||
**[Archives of precompiled binaries for ripgrep are available for Windows,
|
||||
macOS and Linux.](https://github.com/BurntSushi/ripgrep/releases)** Users of
|
||||
platforms not explicitly mentioned below are advised to download one of these
|
||||
archives.
|
||||
|
||||
Linux binaries are static executables. Windows binaries are available either as
|
||||
built with MinGW (GNU) or with Microsoft Visual C++ (MSVC). When possible,
|
||||
prefer MSVC over GNU, but you'll need to have the [Microsoft VC++ 2015
|
||||
redistributable](https://www.microsoft.com/en-us/download/details.aspx?id=48145)
|
||||
installed.
|
||||
|
||||
If you're a **Mac OS X Homebrew** user, then you can install ripgrep either
|
||||
If you're a **macOS Homebrew** or a **Linuxbrew** user,
|
||||
then you can install ripgrep either
|
||||
from homebrew-core, (compiled with rust stable, no SIMD):
|
||||
|
||||
```
|
||||
@@ -166,189 +194,117 @@ optimizations) by utilizing a custom tap:
|
||||
|
||||
```
|
||||
$ brew tap burntsushi/ripgrep https://github.com/BurntSushi/ripgrep.git
|
||||
$ brew install burntsushi/ripgrep/ripgrep-bin
|
||||
$ brew install ripgrep-bin
|
||||
```
|
||||
|
||||
If you're a **Windows Chocolatey** user, then you can install `ripgrep` from the [official repo](https://chocolatey.org/packages/ripgrep):
|
||||
If you're a **Windows Chocolatey** user, then you can install ripgrep from the [official repo](https://chocolatey.org/packages/ripgrep):
|
||||
|
||||
```
|
||||
$ choco install ripgrep
|
||||
```
|
||||
|
||||
If you're an **Arch Linux** user, then you can install `ripgrep` from the official repos:
|
||||
If you're a **Windows Scoop** user, then you can install ripgrep from the [official bucket](https://github.com/lukesampson/scoop/blob/master/bucket/ripgrep.json):
|
||||
|
||||
```
|
||||
$ scoop install ripgrep
|
||||
```
|
||||
|
||||
If you're an **Arch Linux** user, then you can install ripgrep from the official repos:
|
||||
|
||||
```
|
||||
$ pacman -S ripgrep
|
||||
```
|
||||
|
||||
If you're a **Gentoo** user, you can install `ripgrep` from the [official repo](https://packages.gentoo.org/packages/sys-apps/ripgrep):
|
||||
If you're a **Gentoo** user, you can install ripgrep from the [official repo](https://packages.gentoo.org/packages/sys-apps/ripgrep):
|
||||
|
||||
```
|
||||
$ emerge ripgrep
|
||||
$ emerge sys-apps/ripgrep
|
||||
```
|
||||
|
||||
If you're a **Fedora 24+** user, you can install `ripgrep` from [copr](https://copr.fedorainfracloud.org/coprs/carlgeorge/ripgrep/):
|
||||
If you're a **Fedora 27+** user, you can install ripgrep from official repositories.
|
||||
|
||||
```
|
||||
$ dnf copr enable carlgeorge/ripgrep
|
||||
$ dnf install ripgrep
|
||||
$ sudo dnf install ripgrep
|
||||
```
|
||||
|
||||
If you're a **RHEL/CentOS 7** user, you can install `ripgrep` from [copr](https://copr.fedorainfracloud.org/coprs/carlgeorge/ripgrep/):
|
||||
If you're a **Fedora 24+** user, you can install ripgrep from [copr](https://copr.fedorainfracloud.org/coprs/carlwgeorge/ripgrep/):
|
||||
|
||||
```
|
||||
$ yum-config-manager --add-repo=https://copr.fedorainfracloud.org/coprs/carlgeorge/ripgrep/repo/epel-7/carlgeorge-ripgrep-epel-7.repo
|
||||
$ yum install ripgrep
|
||||
$ sudo dnf copr enable carlwgeorge/ripgrep
|
||||
$ sudo dnf install ripgrep
|
||||
```
|
||||
|
||||
If you're a **Nix** user, you can install `ripgrep` from
|
||||
If you're an **openSUSE Tumbleweed** user, you can install ripgrep from the [official repo](http://software.opensuse.org/package/ripgrep):
|
||||
|
||||
```
|
||||
$ sudo zypper install ripgrep
|
||||
```
|
||||
|
||||
If you're a **RHEL/CentOS 7** user, you can install ripgrep from [copr](https://copr.fedorainfracloud.org/coprs/carlwgeorge/ripgrep/):
|
||||
|
||||
```
|
||||
$ sudo yum-config-manager --add-repo=https://copr.fedorainfracloud.org/coprs/carlwgeorge/ripgrep/repo/epel-7/carlwgeorge-ripgrep-epel-7.repo
|
||||
$ sudo yum install ripgrep
|
||||
```
|
||||
|
||||
If you're a **Nix** user, you can install ripgrep from
|
||||
[nixpkgs](https://github.com/NixOS/nixpkgs/blob/master/pkgs/tools/text/ripgrep/default.nix):
|
||||
|
||||
```
|
||||
$ nix-env --install ripgrep
|
||||
$ # (Or using the attribute name, which is also `ripgrep`.)
|
||||
$ # (Or using the attribute name, which is also ripgrep.)
|
||||
```
|
||||
|
||||
If you're a **Rust programmer**, `ripgrep` can be installed with `cargo`. Note
|
||||
that this requires you to have **Rust 1.12 or newer** installed.
|
||||
If you're a **Debian** user (or a user of a Debian derivative like **Ubuntu**),
|
||||
then ripgrep can be installed using a binary `.deb` file provided in each
|
||||
[ripgrep release](https://github.com/BurntSushi/ripgrep/releases). Note that
|
||||
ripgrep is not in the official Debian or Ubuntu repositories.
|
||||
|
||||
```
|
||||
$ curl -LO https://github.com/BurntSushi/ripgrep/releases/download/0.8.1/ripgrep_0.8.1_amd64.deb
|
||||
$ sudo dpkg -i ripgrep_0.8.1_amd64.deb
|
||||
```
|
||||
|
||||
If you're an **Ubuntu** user, ripgrep can be installed from the `snap` store.
|
||||
|
||||
* Note that if you are using `16.04 LTS` or later, snap is already installed.
|
||||
* For older versions you can install snap using
|
||||
[this guide](https://docs.snapcraft.io/core/install-ubuntu).
|
||||
|
||||
For the latest stable release:
|
||||
|
||||
```
|
||||
$ sudo snap install --classic rg
|
||||
```
|
||||
|
||||
If you're a **Rust programmer**, ripgrep can be installed with `cargo`.
|
||||
* Note that the minimum supported version of Rust for ripgrep is **1.20**,
|
||||
although ripgrep may work with older versions.
|
||||
* Note that the binary may be bigger than expected because it contains debug
|
||||
symbols. This is intentional. To remove debug symbols and therefore reduce
|
||||
the file size, run `strip` on the binary.
|
||||
|
||||
```
|
||||
$ cargo install ripgrep
|
||||
```
|
||||
|
||||
`ripgrep` isn't currently in any other package repositories.
|
||||
If you're using Rust nightly, then use
|
||||
|
||||
```
|
||||
$ cargo install ripgrep --features unstable
|
||||
```
|
||||
|
||||
to get SIMD optimizations.
|
||||
|
||||
ripgrep isn't currently in any other package repositories.
|
||||
[I'd like to change that](https://github.com/BurntSushi/ripgrep/issues/10).
|
||||
|
||||
### Whirlwind tour
|
||||
|
||||
The command line usage of `ripgrep` doesn't differ much from other tools that
|
||||
perform a similar function, so you probably already know how to use `ripgrep`.
|
||||
The full details can be found in `rg --help`, but let's go on a whirlwind tour.
|
||||
|
||||
`ripgrep` detects when its printing to a terminal, and will automatically
|
||||
colorize your output and show line numbers, just like The Silver Searcher.
|
||||
Coloring works on Windows too! Colors can be controlled more granularly with
|
||||
the `--color` flag.
|
||||
|
||||
One last thing before we get started: `ripgrep` assumes UTF-8 *everywhere*. It
|
||||
can still search files that are invalid UTF-8 (like, say, latin-1), but it will
|
||||
simply not work on UTF-16 encoded files or other more exotic encodings.
|
||||
[Support for other encodings may
|
||||
happen.](https://github.com/BurntSushi/ripgrep/issues/1)
|
||||
|
||||
To recursively search the current directory, while respecting all `.gitignore`
|
||||
files, ignore hidden files and directories and skip binary files:
|
||||
|
||||
```
|
||||
$ rg foobar
|
||||
```
|
||||
|
||||
The above command also respects all `.ignore` files, including in parent
|
||||
directories. `.ignore` files can be used when `.gitignore` files are
|
||||
insufficient. In all cases, `.ignore` patterns take precedence over
|
||||
`.gitignore`.
|
||||
|
||||
To ignore all ignore files, use `-u`. To additionally search hidden files
|
||||
and directories, use `-uu`. To additionally search binary files, use `-uuu`.
|
||||
(In other words, "search everything, dammit!") In particular, `rg -uuu` is
|
||||
similar to `grep -a -r`.
|
||||
|
||||
```
|
||||
$ rg -uu foobar # similar to `grep -r`
|
||||
$ rg -uuu foobar # similar to `grep -a -r`
|
||||
```
|
||||
|
||||
(Tip: If your ignore files aren't being adhered to like you expect, run your
|
||||
search with the `--debug` flag.)
|
||||
|
||||
Make the search case insensitive with `-i`, invert the search with `-v` or
|
||||
show the 2 lines before and after every search result with `-C2`.
|
||||
|
||||
Force all matches to be surrounded by word boundaries with `-w`.
|
||||
|
||||
Search and replace (find first and last names and swap them):
|
||||
|
||||
```
|
||||
$ rg '([A-Z][a-z]+)\s+([A-Z][a-z]+)' --replace '$2, $1'
|
||||
```
|
||||
|
||||
Named groups are supported:
|
||||
|
||||
```
|
||||
$ rg '(?P<first>[A-Z][a-z]+)\s+(?P<last>[A-Z][a-z]+)' --replace '$last, $first'
|
||||
```
|
||||
|
||||
Up the ante with full Unicode support, by matching any uppercase Unicode letter
|
||||
followed by any sequence of lowercase Unicode letters (good luck doing this
|
||||
with other search tools!):
|
||||
|
||||
```
|
||||
$ rg '(\p{Lu}\p{Ll}+)\s+(\p{Lu}\p{Ll}+)' --replace '$2, $1'
|
||||
```
|
||||
|
||||
Search only files matching a particular glob:
|
||||
|
||||
```
|
||||
$ rg foo -g 'README.*'
|
||||
```
|
||||
|
||||
<!--*-->
|
||||
|
||||
Or exclude files matching a particular glob:
|
||||
|
||||
```
|
||||
$ rg foo -g '!*.min.js'
|
||||
```
|
||||
|
||||
Search and return paths matching a particular glob (i.e., `-g` flag in ag/ack):
|
||||
|
||||
```
|
||||
$ rg -g 'doc*' --files
|
||||
```
|
||||
|
||||
Search only HTML and CSS files:
|
||||
|
||||
```
|
||||
$ rg -thtml -tcss foobar
|
||||
```
|
||||
|
||||
Search everything except for Javascript files:
|
||||
|
||||
```
|
||||
$ rg -Tjs foobar
|
||||
```
|
||||
|
||||
To see a list of types supported, run `rg --type-list`. To add a new type, use
|
||||
`--type-add`, which must be accompanied by a pattern for searching (`rg` won't
|
||||
persist your type settings):
|
||||
|
||||
```
|
||||
$ rg --type-add 'foo:*.{foo,foobar}' -tfoo bar
|
||||
```
|
||||
|
||||
The type `foo` will now match any file ending with the `.foo` or `.foobar`
|
||||
extensions.
|
||||
|
||||
### Regex syntax
|
||||
|
||||
The syntax supported is
|
||||
[documented as part of Rust's regex library](https://doc.rust-lang.org/regex/regex/index.html#syntax).
|
||||
|
||||
### Shell completions
|
||||
|
||||
Shell completion files are included in the release tarball for Bash, Fish, Zsh
|
||||
and PowerShell.
|
||||
|
||||
For **bash**, move `rg.bash-completion` to `$XDG_CONFIG_HOME/bash_completion`
|
||||
or `/etc/bash_completion.d/`.
|
||||
|
||||
For **fish**, move `rg.fish` to `$HOME/.config/fish/completions`.
|
||||
|
||||
### Building
|
||||
|
||||
`ripgrep` is written in Rust, so you'll need to grab a
|
||||
ripgrep is written in Rust, so you'll need to grab a
|
||||
[Rust installation](https://www.rust-lang.org/) in order to compile it.
|
||||
`ripgrep` compiles with Rust 1.12 (stable) or newer. Building is easy:
|
||||
ripgrep compiles with Rust 1.20 (stable) or newer. Building is easy:
|
||||
|
||||
```
|
||||
$ git clone https://github.com/BurntSushi/ripgrep
|
||||
@@ -358,37 +314,25 @@ $ ./target/release/rg --version
|
||||
0.1.3
|
||||
```
|
||||
|
||||
If you have a Rust nightly compiler, then you can enable optional SIMD
|
||||
acceleration like so:
|
||||
If you have a Rust nightly compiler and a recent Intel CPU, then you can enable
|
||||
optional SIMD acceleration like so:
|
||||
|
||||
```
|
||||
RUSTFLAGS="-C target-cpu=native" cargo build --release --features 'simd-accel avx-accel'
|
||||
```
|
||||
|
||||
If your machine doesn't support AVX instructions, then simply remove
|
||||
`avx-accel` from the features list. Similarly for SIMD.
|
||||
`avx-accel` from the features list. Similarly for SIMD (which corresponds
|
||||
roughly to SSE instructions).
|
||||
|
||||
|
||||
### Running tests
|
||||
|
||||
`ripgrep` is relatively well tested, including both unit tests and integration
|
||||
ripgrep is relatively well-tested, including both unit tests and integration
|
||||
tests. To run the full test suite, use:
|
||||
|
||||
```
|
||||
$ cargo test
|
||||
$ cargo test --all
|
||||
```
|
||||
|
||||
from the repository root.
|
||||
|
||||
### Known issues
|
||||
|
||||
#### I just hit Ctrl+C in the middle of ripgrep's output and now my terminal's foreground color is wrong!
|
||||
|
||||
Type in `color` on Windows and `echo -ne "\033[0m"` on Unix to restore your
|
||||
original foreground color.
|
||||
|
||||
PR [#187](https://github.com/BurntSushi/ripgrep/pull/187) fixed this, and it
|
||||
was later deprecated in
|
||||
[#281](https://github.com/BurntSushi/ripgrep/issues/281). A full explanation is
|
||||
available [here][msys issue explanation].
|
||||
|
||||
[msys issue explanation]: https://github.com/BurntSushi/ripgrep/issues/281#issuecomment-269093893
|
||||
|
||||
34
appveyor.yml
34
appveyor.yml
@@ -1,6 +1,23 @@
|
||||
# Inspired from https://github.com/habitat-sh/habitat/blob/master/appveyor.yml
|
||||
cache:
|
||||
- c:\cargo\registry
|
||||
- c:\cargo\git
|
||||
- c:\projects\ripgrep\target
|
||||
|
||||
init:
|
||||
- mkdir c:\cargo
|
||||
- mkdir c:\rustup
|
||||
- SET PATH=c:\cargo\bin;%PATH%
|
||||
|
||||
clone_folder: c:\projects\ripgrep
|
||||
|
||||
environment:
|
||||
CARGO_HOME: "c:\\cargo"
|
||||
RUSTUP_HOME: "c:\\rustup"
|
||||
CARGO_TARGET_DIR: "c:\\projects\\ripgrep\\target"
|
||||
global:
|
||||
PROJECT_NAME: ripgrep
|
||||
RUST_BACKTRACE: full
|
||||
matrix:
|
||||
- TARGET: i686-pc-windows-gnu
|
||||
CHANNEL: stable
|
||||
@@ -11,12 +28,14 @@ environment:
|
||||
- TARGET: x86_64-pc-windows-msvc
|
||||
CHANNEL: stable
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
|
||||
# Install Rust and Cargo
|
||||
# (Based on from https://github.com/rust-lang/libc/blob/master/appveyor.yml)
|
||||
install:
|
||||
- curl -sSf -o rustup-init.exe https://win.rustup.rs/
|
||||
- rustup-init.exe -y --default-host %TARGET%
|
||||
- set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
|
||||
- rustup-init.exe -y --default-host %TARGET% --no-modify-path
|
||||
- if defined MSYS2_BITS set PATH=%PATH%;C:\msys64\mingw%MSYS2_BITS%\bin
|
||||
- rustc -V
|
||||
- cargo -V
|
||||
@@ -27,19 +46,14 @@ build: false
|
||||
# Equivalent to Travis' `script` phase
|
||||
# TODO modify this phase as you see fit
|
||||
test_script:
|
||||
- cargo test --verbose
|
||||
- cargo test --verbose --manifest-path grep/Cargo.toml
|
||||
- cargo test --verbose --manifest-path globset/Cargo.toml
|
||||
- cargo test --verbose --manifest-path ignore/Cargo.toml
|
||||
- cargo test --verbose --manifest-path wincolor/Cargo.toml
|
||||
- cargo test --verbose --manifest-path termcolor/Cargo.toml
|
||||
- cargo test --verbose --all
|
||||
|
||||
before_deploy:
|
||||
# Generate artifacts for release
|
||||
# TODO(burntsushi): How can we enable SSSE3 on Windows?
|
||||
- cargo build --release
|
||||
- cargo build --release --features unstable
|
||||
- mkdir staging
|
||||
- copy target\release\rg.exe staging
|
||||
- ps: copy target\release\build\ripgrep-*\out\_rg.ps1 staging
|
||||
- cd staging
|
||||
# release zipfile will look like 'rust-everywhere-v1.2.3-x86_64-pc-windows-msvc'
|
||||
- 7z a ../%PROJECT_NAME%-%APPVEYOR_REPO_TAG_NAME%-%TARGET%.zip *
|
||||
|
||||
@@ -1082,7 +1082,7 @@ def download_subtitles_en(suite_dir):
|
||||
if not os.path.exists(en_path):
|
||||
if not os.path.exists(en_path_gz):
|
||||
run_cmd(['curl', '-LO', SUBTITLES_EN_URL], cwd=subtitle_dir)
|
||||
run_cmd(['gunzip', en_path_gz], cwd=subtitle_dir)
|
||||
run_cmd(['gunzip', en_path_gz])
|
||||
if not os.path.exists(en_path_sample):
|
||||
# Get a sample roughly the same size as the Russian corpus so that
|
||||
# benchmarks finish in a reasonable time.
|
||||
@@ -1109,7 +1109,7 @@ def download_subtitles_ru(suite_dir):
|
||||
if not os.path.exists(ru_path):
|
||||
if not os.path.exists(ru_path_gz):
|
||||
run_cmd(['curl', '-LO', SUBTITLES_RU_URL], cwd=subtitle_dir)
|
||||
run_cmd(['gunzip', ru_path_gz], cwd=subtitle_dir)
|
||||
run_cmd(['gunzip', ru_path_gz])
|
||||
|
||||
|
||||
def has_subtitles_ru(suite_dir):
|
||||
@@ -1184,6 +1184,7 @@ def collect_benchmarks(suite_dir, filter_pat=None,
|
||||
name,
|
||||
' '.join(['--download %s' % n for n in e.missing_names]),
|
||||
))
|
||||
continue
|
||||
except MissingCommands as e:
|
||||
fmt = 'missing commands: %s, skipping benchmark %s ' \
|
||||
'(run with --allow-missing to run incomplete benchmarks)'
|
||||
@@ -1239,7 +1240,7 @@ def main():
|
||||
benchmarks = collect_benchmarks(
|
||||
args.dir, filter_pat=args.bench,
|
||||
allow_missing_commands=args.allow_missing,
|
||||
disabled_cmds=args.disabled.split(','),
|
||||
disabled_cmds=(args.disabled or '').split(','),
|
||||
warmup_iter=args.warmup_iter, bench_iter=args.bench_iter)
|
||||
for b in benchmarks:
|
||||
print(b.name)
|
||||
@@ -1266,7 +1267,7 @@ def main():
|
||||
benchmarks = collect_benchmarks(
|
||||
args.dir, filter_pat=args.bench,
|
||||
allow_missing_commands=args.allow_missing,
|
||||
disabled_cmds=args.disabled.split(','),
|
||||
disabled_cmds=(args.disabled or '').split(','),
|
||||
warmup_iter=args.warmup_iter, bench_iter=args.bench_iter)
|
||||
for i, b in enumerate(benchmarks):
|
||||
result = b.run()
|
||||
|
||||
59
benchsuite/runs/2018-01-08-archlinux-cheetah/README
Normal file
59
benchsuite/runs/2018-01-08-archlinux-cheetah/README
Normal file
@@ -0,0 +1,59 @@
|
||||
This directory contains updated benchmarks as of 2018-01-08. They were captured
|
||||
via the benchsuite script at `benchsuite/benchsuite` from the root of this
|
||||
repository. The command that was run:
|
||||
|
||||
$ ./benchsuite \
|
||||
--dir /tmp/benchsuite \
|
||||
--raw runs/2018-01-08-archlinux-cheetah/raw.csv \
|
||||
--warmup-iter 1 \
|
||||
--bench-iter 5
|
||||
|
||||
These results are most directly comparable to the
|
||||
`2016-09-22-archlinux-cheetah` run in the parent directory.
|
||||
|
||||
The versions of each tool are as follows:
|
||||
|
||||
$ grep -V
|
||||
grep (GNU grep) 3.1
|
||||
|
||||
$ ag -V
|
||||
ag version 2.1.0
|
||||
Features:
|
||||
+jit +lzma +zlib
|
||||
|
||||
$ sift -V
|
||||
sift 0.8.0 (linux/amd64)
|
||||
built from commit 2ca94717 (which seems to be 0.9.0)
|
||||
|
||||
$ pt --version
|
||||
pt version 2.1.4
|
||||
|
||||
$ ucg -V
|
||||
UniversalCodeGrep 0.3.3
|
||||
[...]
|
||||
Build info
|
||||
|
||||
Repo version: 0.3.3-251-g9b5a3e3
|
||||
|
||||
Compiler info:
|
||||
Name ($(CXX)): "g++ -std=gnu++1z"
|
||||
Version string: "g++ (GCC) 7.2.1 20171224"
|
||||
|
||||
ISA extensions in use:
|
||||
sse4.2: yes
|
||||
popcnt: yes
|
||||
|
||||
libpcre info:
|
||||
Not linked against libpcre.
|
||||
|
||||
libpcre2-8 info:
|
||||
Version: 10.30 2017-08-14
|
||||
JIT support built in?: yes
|
||||
JIT target architecture: x86 64bit (little endian + unaligned)
|
||||
Newline style: LF
|
||||
|
||||
The version of ripgrep was compiled from source on commit 85d463c0, with the
|
||||
simd-accel and avx-accel features enabled:
|
||||
|
||||
$ export RUSTFLAGS="-C target-cpu=native"
|
||||
$ cargo build --release --features 'simd-accel avx-accel'
|
||||
806
benchsuite/runs/2018-01-08-archlinux-cheetah/raw.csv
Normal file
806
benchsuite/runs/2018-01-08-archlinux-cheetah/raw.csv
Normal file
@@ -0,0 +1,806 @@
|
||||
benchmark,warmup_iter,iter,name,command,duration,lines,env
|
||||
linux_alternates,1,5,rg (ignore),rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.10186767578125,68,
|
||||
linux_alternates,1,5,rg (ignore),rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.10199356079101562,68,
|
||||
linux_alternates,1,5,rg (ignore),rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.09750819206237793,68,
|
||||
linux_alternates,1,5,rg (ignore),rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.09634733200073242,68,
|
||||
linux_alternates,1,5,rg (ignore),rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.10117292404174805,68,
|
||||
linux_alternates,1,5,ag (ignore),ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.49642109870910645,68,
|
||||
linux_alternates,1,5,ag (ignore),ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.48993706703186035,68,
|
||||
linux_alternates,1,5,ag (ignore),ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.4837028980255127,68,
|
||||
linux_alternates,1,5,ag (ignore),ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.4773833751678467,68,
|
||||
linux_alternates,1,5,ag (ignore),ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.558436393737793,68,
|
||||
linux_alternates,1,5,git grep (ignore),git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.2605454921722412,68,LC_ALL=C
|
||||
linux_alternates,1,5,git grep (ignore),git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.26748204231262207,68,LC_ALL=C
|
||||
linux_alternates,1,5,git grep (ignore),git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.26719212532043457,68,LC_ALL=C
|
||||
linux_alternates,1,5,git grep (ignore),git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.2719383239746094,68,LC_ALL=C
|
||||
linux_alternates,1,5,git grep (ignore),git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.26963257789611816,68,LC_ALL=C
|
||||
linux_alternates,1,5,rg (whitelist),rg --no-ignore -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.08797001838684082,68,
|
||||
linux_alternates,1,5,rg (whitelist),rg --no-ignore -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.09073781967163086,68,
|
||||
linux_alternates,1,5,rg (whitelist),rg --no-ignore -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.0914468765258789,68,
|
||||
linux_alternates,1,5,rg (whitelist),rg --no-ignore -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.09071612358093262,68,
|
||||
linux_alternates,1,5,rg (whitelist),rg --no-ignore -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.0914316177368164,68,
|
||||
linux_alternates,1,5,ucg (whitelist),ucg --nosmart-case ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.1372535228729248,68,
|
||||
linux_alternates,1,5,ucg (whitelist),ucg --nosmart-case ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.13880419731140137,68,
|
||||
linux_alternates,1,5,ucg (whitelist),ucg --nosmart-case ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.13315439224243164,68,
|
||||
linux_alternates,1,5,ucg (whitelist),ucg --nosmart-case ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.1367807388305664,68,
|
||||
linux_alternates,1,5,ucg (whitelist),ucg --nosmart-case ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.13135552406311035,68,
|
||||
linux_alternates_casei,1,5,rg (ignore),rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.12781810760498047,160,
|
||||
linux_alternates_casei,1,5,rg (ignore),rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.11988544464111328,160,
|
||||
linux_alternates_casei,1,5,rg (ignore),rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.1205439567565918,160,
|
||||
linux_alternates_casei,1,5,rg (ignore),rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.12867259979248047,160,
|
||||
linux_alternates_casei,1,5,rg (ignore),rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.1215970516204834,160,
|
||||
linux_alternates_casei,1,5,ag (ignore),ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.5444357395172119,160,
|
||||
linux_alternates_casei,1,5,ag (ignore),ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.5511739253997803,160,
|
||||
linux_alternates_casei,1,5,ag (ignore),ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.5382294654846191,160,
|
||||
linux_alternates_casei,1,5,ag (ignore),ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.5499558448791504,160,
|
||||
linux_alternates_casei,1,5,ag (ignore),ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.6376545429229736,160,
|
||||
linux_alternates_casei,1,5,git grep (ignore),git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.9767155647277832,160,LC_ALL=C
|
||||
linux_alternates_casei,1,5,git grep (ignore),git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.920574426651001,160,LC_ALL=C
|
||||
linux_alternates_casei,1,5,git grep (ignore),git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.9352290630340576,160,LC_ALL=C
|
||||
linux_alternates_casei,1,5,git grep (ignore),git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.8866012096405029,160,LC_ALL=C
|
||||
linux_alternates_casei,1,5,git grep (ignore),git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.9189445972442627,160,LC_ALL=C
|
||||
linux_alternates_casei,1,5,rg (whitelist),rg --no-ignore -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.09351730346679688,160,
|
||||
linux_alternates_casei,1,5,rg (whitelist),rg --no-ignore -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.09393739700317383,160,
|
||||
linux_alternates_casei,1,5,rg (whitelist),rg --no-ignore -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.09986448287963867,160,
|
||||
linux_alternates_casei,1,5,rg (whitelist),rg --no-ignore -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.09596824645996094,160,
|
||||
linux_alternates_casei,1,5,rg (whitelist),rg --no-ignore -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.09604883193969727,160,
|
||||
linux_alternates_casei,1,5,ucg (whitelist),ucg -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.23943114280700684,160,
|
||||
linux_alternates_casei,1,5,ucg (whitelist),ucg -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.2587015628814697,160,
|
||||
linux_alternates_casei,1,5,ucg (whitelist),ucg -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.2543606758117676,160,
|
||||
linux_alternates_casei,1,5,ucg (whitelist),ucg -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.2490406036376953,160,
|
||||
linux_alternates_casei,1,5,ucg (whitelist),ucg -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.24046540260314941,160,
|
||||
linux_literal,1,5,rg (ignore),rg -n PM_RESUME,0.08253765106201172,16,
|
||||
linux_literal,1,5,rg (ignore),rg -n PM_RESUME,0.08176755905151367,16,
|
||||
linux_literal,1,5,rg (ignore),rg -n PM_RESUME,0.08141684532165527,16,
|
||||
linux_literal,1,5,rg (ignore),rg -n PM_RESUME,0.08108830451965332,16,
|
||||
linux_literal,1,5,rg (ignore),rg -n PM_RESUME,0.08082938194274902,16,
|
||||
linux_literal,1,5,rg (ignore) (mmap),rg -n --mmap PM_RESUME,0.6870582103729248,16,
|
||||
linux_literal,1,5,rg (ignore) (mmap),rg -n --mmap PM_RESUME,0.807842493057251,16,
|
||||
linux_literal,1,5,rg (ignore) (mmap),rg -n --mmap PM_RESUME,0.8129942417144775,16,
|
||||
linux_literal,1,5,rg (ignore) (mmap),rg -n --mmap PM_RESUME,0.7582321166992188,16,
|
||||
linux_literal,1,5,rg (ignore) (mmap),rg -n --mmap PM_RESUME,0.6869800090789795,16,
|
||||
linux_literal,1,5,ag (ignore) (mmap),ag -s PM_RESUME,0.6534101963043213,16,
|
||||
linux_literal,1,5,ag (ignore) (mmap),ag -s PM_RESUME,0.6020612716674805,16,
|
||||
linux_literal,1,5,ag (ignore) (mmap),ag -s PM_RESUME,0.6712157726287842,16,
|
||||
linux_literal,1,5,ag (ignore) (mmap),ag -s PM_RESUME,0.6267571449279785,16,
|
||||
linux_literal,1,5,ag (ignore) (mmap),ag -s PM_RESUME,0.505136251449585,16,
|
||||
linux_literal,1,5,pt (ignore),pt PM_RESUME,0.21415948867797852,16,
|
||||
linux_literal,1,5,pt (ignore),pt PM_RESUME,0.19318318367004395,16,
|
||||
linux_literal,1,5,pt (ignore),pt PM_RESUME,0.21352124214172363,16,
|
||||
linux_literal,1,5,pt (ignore),pt PM_RESUME,0.18979454040527344,16,
|
||||
linux_literal,1,5,pt (ignore),pt PM_RESUME,0.16629600524902344,16,
|
||||
linux_literal,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git PM_RESUME,0.46967077255249023,16,
|
||||
linux_literal,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git PM_RESUME,0.46343088150024414,16,
|
||||
linux_literal,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git PM_RESUME,0.4723978042602539,16,
|
||||
linux_literal,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git PM_RESUME,0.4741063117980957,16,
|
||||
linux_literal,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git PM_RESUME,0.4613051414489746,16,
|
||||
linux_literal,1,5,git grep (ignore),git grep -I -n PM_RESUME,0.20196986198425293,16,LC_ALL=C
|
||||
linux_literal,1,5,git grep (ignore),git grep -I -n PM_RESUME,0.18932533264160156,16,LC_ALL=C
|
||||
linux_literal,1,5,git grep (ignore),git grep -I -n PM_RESUME,0.19396305084228516,16,LC_ALL=C
|
||||
linux_literal,1,5,git grep (ignore),git grep -I -n PM_RESUME,0.1952073574066162,16,LC_ALL=C
|
||||
linux_literal,1,5,git grep (ignore),git grep -I -n PM_RESUME,0.20149731636047363,16,LC_ALL=C
|
||||
linux_literal,1,5,rg (whitelist),rg -n --no-ignore -tall PM_RESUME,0.08270478248596191,16,
|
||||
linux_literal,1,5,rg (whitelist),rg -n --no-ignore -tall PM_RESUME,0.08414745330810547,16,
|
||||
linux_literal,1,5,rg (whitelist),rg -n --no-ignore -tall PM_RESUME,0.08627724647521973,16,
|
||||
linux_literal,1,5,rg (whitelist),rg -n --no-ignore -tall PM_RESUME,0.08978700637817383,16,
|
||||
linux_literal,1,5,rg (whitelist),rg -n --no-ignore -tall PM_RESUME,0.0836489200592041,16,
|
||||
linux_literal,1,5,ucg (whitelist),ucg --nosmart-case PM_RESUME,0.15774202346801758,16,
|
||||
linux_literal,1,5,ucg (whitelist),ucg --nosmart-case PM_RESUME,0.16005396842956543,16,
|
||||
linux_literal,1,5,ucg (whitelist),ucg --nosmart-case PM_RESUME,0.15743708610534668,16,
|
||||
linux_literal,1,5,ucg (whitelist),ucg --nosmart-case PM_RESUME,0.16156601905822754,16,
|
||||
linux_literal,1,5,ucg (whitelist),ucg --nosmart-case PM_RESUME,0.1557624340057373,16,
|
||||
linux_literal_casei,1,5,rg (ignore),rg -n -i PM_RESUME,0.1028127670288086,374,
|
||||
linux_literal_casei,1,5,rg (ignore),rg -n -i PM_RESUME,0.10258054733276367,374,
|
||||
linux_literal_casei,1,5,rg (ignore),rg -n -i PM_RESUME,0.10902261734008789,374,
|
||||
linux_literal_casei,1,5,rg (ignore),rg -n -i PM_RESUME,0.10802555084228516,374,
|
||||
linux_literal_casei,1,5,rg (ignore),rg -n -i PM_RESUME,0.10153412818908691,374,
|
||||
linux_literal_casei,1,5,rg (ignore) (mmap),rg -n -i --mmap PM_RESUME,0.7902817726135254,374,
|
||||
linux_literal_casei,1,5,rg (ignore) (mmap),rg -n -i --mmap PM_RESUME,0.7985179424285889,374,
|
||||
linux_literal_casei,1,5,rg (ignore) (mmap),rg -n -i --mmap PM_RESUME,0.8208649158477783,374,
|
||||
linux_literal_casei,1,5,rg (ignore) (mmap),rg -n -i --mmap PM_RESUME,0.7937076091766357,374,
|
||||
linux_literal_casei,1,5,rg (ignore) (mmap),rg -n -i --mmap PM_RESUME,0.7936429977416992,374,
|
||||
linux_literal_casei,1,5,ag (ignore) (mmap),ag -i PM_RESUME,0.5215470790863037,374,
|
||||
linux_literal_casei,1,5,ag (ignore) (mmap),ag -i PM_RESUME,0.46518707275390625,374,
|
||||
linux_literal_casei,1,5,ag (ignore) (mmap),ag -i PM_RESUME,0.4467353820800781,374,
|
||||
linux_literal_casei,1,5,ag (ignore) (mmap),ag -i PM_RESUME,0.4595184326171875,374,
|
||||
linux_literal_casei,1,5,ag (ignore) (mmap),ag -i PM_RESUME,0.4531285762786865,374,
|
||||
linux_literal_casei,1,5,pt (ignore),pt -i PM_RESUME,14.187762022018433,374,
|
||||
linux_literal_casei,1,5,pt (ignore),pt -i PM_RESUME,14.178058385848999,374,
|
||||
linux_literal_casei,1,5,pt (ignore),pt -i PM_RESUME,14.096448421478271,374,
|
||||
linux_literal_casei,1,5,pt (ignore),pt -i PM_RESUME,14.190524339675903,374,
|
||||
linux_literal_casei,1,5,pt (ignore),pt -i PM_RESUME,14.231573343276978,374,
|
||||
linux_literal_casei,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git PM_RESUME,0.4668574333190918,374,
|
||||
linux_literal_casei,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git PM_RESUME,0.46050214767456055,374,
|
||||
linux_literal_casei,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git PM_RESUME,0.46228861808776855,374,
|
||||
linux_literal_casei,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git PM_RESUME,0.44957947731018066,374,
|
||||
linux_literal_casei,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git PM_RESUME,0.4612581729888916,374,
|
||||
linux_literal_casei,1,5,git grep (ignore),git grep -I -n -i PM_RESUME,0.1932981014251709,370,LC_ALL=C
|
||||
linux_literal_casei,1,5,git grep (ignore),git grep -I -n -i PM_RESUME,0.20561552047729492,370,LC_ALL=C
|
||||
linux_literal_casei,1,5,git grep (ignore),git grep -I -n -i PM_RESUME,0.19516706466674805,370,LC_ALL=C
|
||||
linux_literal_casei,1,5,git grep (ignore),git grep -I -n -i PM_RESUME,0.20196247100830078,370,LC_ALL=C
|
||||
linux_literal_casei,1,5,git grep (ignore),git grep -I -n -i PM_RESUME,0.19236421585083008,370,LC_ALL=C
|
||||
linux_literal_casei,1,5,rg (whitelist),rg -n -i --no-ignore -tall PM_RESUME,0.09555959701538086,370,
|
||||
linux_literal_casei,1,5,rg (whitelist),rg -n -i --no-ignore -tall PM_RESUME,0.09589338302612305,370,
|
||||
linux_literal_casei,1,5,rg (whitelist),rg -n -i --no-ignore -tall PM_RESUME,0.09479856491088867,370,
|
||||
linux_literal_casei,1,5,rg (whitelist),rg -n -i --no-ignore -tall PM_RESUME,0.09741568565368652,370,
|
||||
linux_literal_casei,1,5,rg (whitelist),rg -n -i --no-ignore -tall PM_RESUME,0.10127615928649902,370,
|
||||
linux_literal_casei,1,5,ucg (whitelist),ucg -i PM_RESUME,0.15514039993286133,370,
|
||||
linux_literal_casei,1,5,ucg (whitelist),ucg -i PM_RESUME,0.15668940544128418,370,
|
||||
linux_literal_casei,1,5,ucg (whitelist),ucg -i PM_RESUME,0.15429425239562988,370,
|
||||
linux_literal_casei,1,5,ucg (whitelist),ucg -i PM_RESUME,0.15332818031311035,370,
|
||||
linux_literal_casei,1,5,ucg (whitelist),ucg -i PM_RESUME,0.14861536026000977,370,
|
||||
linux_literal_default,1,5,rg,rg PM_RESUME,0.08931398391723633,16,
|
||||
linux_literal_default,1,5,rg,rg PM_RESUME,0.08717465400695801,16,
|
||||
linux_literal_default,1,5,rg,rg PM_RESUME,0.0879361629486084,16,
|
||||
linux_literal_default,1,5,rg,rg PM_RESUME,0.08688950538635254,16,
|
||||
linux_literal_default,1,5,rg,rg PM_RESUME,0.09138607978820801,16,
|
||||
linux_literal_default,1,5,ag,ag PM_RESUME,0.5342838764190674,16,
|
||||
linux_literal_default,1,5,ag,ag PM_RESUME,0.47187042236328125,16,
|
||||
linux_literal_default,1,5,ag,ag PM_RESUME,0.4456596374511719,16,
|
||||
linux_literal_default,1,5,ag,ag PM_RESUME,0.4507424831390381,16,
|
||||
linux_literal_default,1,5,ag,ag PM_RESUME,0.44472575187683105,16,
|
||||
linux_literal_default,1,5,ucg,ucg PM_RESUME,0.15556907653808594,16,
|
||||
linux_literal_default,1,5,ucg,ucg PM_RESUME,0.1533644199371338,16,
|
||||
linux_literal_default,1,5,ucg,ucg PM_RESUME,0.15392351150512695,16,
|
||||
linux_literal_default,1,5,ucg,ucg PM_RESUME,0.1535196304321289,16,
|
||||
linux_literal_default,1,5,ucg,ucg PM_RESUME,0.15589547157287598,16,
|
||||
linux_literal_default,1,5,pt,pt PM_RESUME,0.2261514663696289,16,
|
||||
linux_literal_default,1,5,pt,pt PM_RESUME,0.2731902599334717,16,
|
||||
linux_literal_default,1,5,pt,pt PM_RESUME,0.2563004493713379,16,
|
||||
linux_literal_default,1,5,pt,pt PM_RESUME,0.2575085163116455,16,
|
||||
linux_literal_default,1,5,pt,pt PM_RESUME,0.1724245548248291,16,
|
||||
linux_literal_default,1,5,sift,sift PM_RESUME,0.13233542442321777,16,
|
||||
linux_literal_default,1,5,sift,sift PM_RESUME,0.1256580352783203,16,
|
||||
linux_literal_default,1,5,sift,sift PM_RESUME,0.12435102462768555,16,
|
||||
linux_literal_default,1,5,sift,sift PM_RESUME,0.1259307861328125,16,
|
||||
linux_literal_default,1,5,sift,sift PM_RESUME,0.12412142753601074,16,
|
||||
linux_literal_default,1,5,git grep,git grep PM_RESUME,0.1742086410522461,16,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,git grep,git grep PM_RESUME,0.16890597343444824,16,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,git grep,git grep PM_RESUME,0.16680669784545898,16,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,git grep,git grep PM_RESUME,0.16899871826171875,16,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,git grep,git grep PM_RESUME,0.19794917106628418,16,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,rg (ignore),rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.33940672874450684,490,
|
||||
linux_no_literal,1,5,rg (ignore),rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.3274960517883301,490,
|
||||
linux_no_literal,1,5,rg (ignore),rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.32681775093078613,490,
|
||||
linux_no_literal,1,5,rg (ignore),rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.32865071296691895,490,
|
||||
linux_no_literal,1,5,rg (ignore),rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.3240926265716553,490,
|
||||
linux_no_literal,1,5,rg (ignore) (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.17426586151123047,490,
|
||||
linux_no_literal,1,5,rg (ignore) (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.17265701293945312,490,
|
||||
linux_no_literal,1,5,rg (ignore) (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.1703634262084961,490,
|
||||
linux_no_literal,1,5,rg (ignore) (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.17192435264587402,490,
|
||||
linux_no_literal,1,5,rg (ignore) (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.1704559326171875,490,
|
||||
linux_no_literal,1,5,ag (ignore) (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.8443403244018555,766,
|
||||
linux_no_literal,1,5,ag (ignore) (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.6956703662872314,766,
|
||||
linux_no_literal,1,5,ag (ignore) (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.6938261985778809,766,
|
||||
linux_no_literal,1,5,ag (ignore) (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.695967435836792,766,
|
||||
linux_no_literal,1,5,ag (ignore) (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.6945271492004395,766,
|
||||
linux_no_literal,1,5,pt (ignore) (ASCII),pt -e \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},12.645716428756714,490,
|
||||
linux_no_literal,1,5,pt (ignore) (ASCII),pt -e \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},12.441533088684082,490,
|
||||
linux_no_literal,1,5,pt (ignore) (ASCII),pt -e \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},12.472522735595703,490,
|
||||
linux_no_literal,1,5,pt (ignore) (ASCII),pt -e \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},12.42497444152832,490,
|
||||
linux_no_literal,1,5,pt (ignore) (ASCII),pt -e \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},12.407486200332642,490,
|
||||
linux_no_literal,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},9.091489553451538,490,
|
||||
linux_no_literal,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},9.049214124679565,490,
|
||||
linux_no_literal,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},8.879419803619385,490,
|
||||
linux_no_literal,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},9.07261848449707,490,
|
||||
linux_no_literal,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},8.918747901916504,490,
|
||||
linux_no_literal,1,5,git grep (ignore),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},8.334321975708008,490,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,git grep (ignore),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},8.993232727050781,490,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,git grep (ignore),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},8.622304916381836,490,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,git grep (ignore),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},8.35973048210144,490,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,git grep (ignore),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},8.39980435371399,490,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,git grep (ignore) (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},2.0318400859832764,490,LC_ALL=C
|
||||
linux_no_literal,1,5,git grep (ignore) (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},1.8587837219238281,490,LC_ALL=C
|
||||
linux_no_literal,1,5,git grep (ignore) (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},1.873384714126587,490,LC_ALL=C
|
||||
linux_no_literal,1,5,git grep (ignore) (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},1.8111364841461182,490,LC_ALL=C
|
||||
linux_no_literal,1,5,git grep (ignore) (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},1.8385357856750488,490,LC_ALL=C
|
||||
linux_no_literal,1,5,rg (whitelist),rg -n --no-ignore -tall \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.28792643547058105,458,
|
||||
linux_no_literal,1,5,rg (whitelist),rg -n --no-ignore -tall \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.28545212745666504,458,
|
||||
linux_no_literal,1,5,rg (whitelist),rg -n --no-ignore -tall \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.28576135635375977,458,
|
||||
linux_no_literal,1,5,rg (whitelist),rg -n --no-ignore -tall \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.29883813858032227,458,
|
||||
linux_no_literal,1,5,rg (whitelist),rg -n --no-ignore -tall \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.28493285179138184,458,
|
||||
linux_no_literal,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.15974783897399902,458,
|
||||
linux_no_literal,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.15943312644958496,458,
|
||||
linux_no_literal,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.160233736038208,458,
|
||||
linux_no_literal,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.16201996803283691,458,
|
||||
linux_no_literal,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.16033530235290527,458,
|
||||
linux_no_literal,1,5,ucg (whitelist) (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.4639148712158203,416,
|
||||
linux_no_literal,1,5,ucg (whitelist) (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.46042823791503906,416,
|
||||
linux_no_literal,1,5,ucg (whitelist) (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.45925426483154297,416,
|
||||
linux_no_literal,1,5,ucg (whitelist) (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.477064847946167,416,
|
||||
linux_no_literal,1,5,ucg (whitelist) (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.507554292678833,416,
|
||||
linux_re_literal_suffix,1,5,rg (ignore),rg -n [A-Z]+_RESUME,0.08520364761352539,1652,
|
||||
linux_re_literal_suffix,1,5,rg (ignore),rg -n [A-Z]+_RESUME,0.08203816413879395,1652,
|
||||
linux_re_literal_suffix,1,5,rg (ignore),rg -n [A-Z]+_RESUME,0.08355021476745605,1652,
|
||||
linux_re_literal_suffix,1,5,rg (ignore),rg -n [A-Z]+_RESUME,0.0865166187286377,1652,
|
||||
linux_re_literal_suffix,1,5,rg (ignore),rg -n [A-Z]+_RESUME,0.08125448226928711,1652,
|
||||
linux_re_literal_suffix,1,5,ag (ignore),ag -s [A-Z]+_RESUME,0.4846627712249756,1652,
|
||||
linux_re_literal_suffix,1,5,ag (ignore),ag -s [A-Z]+_RESUME,0.48070311546325684,1652,
|
||||
linux_re_literal_suffix,1,5,ag (ignore),ag -s [A-Z]+_RESUME,0.4813041687011719,1652,
|
||||
linux_re_literal_suffix,1,5,ag (ignore),ag -s [A-Z]+_RESUME,0.4755582809448242,1652,
|
||||
linux_re_literal_suffix,1,5,ag (ignore),ag -s [A-Z]+_RESUME,0.4926290512084961,1652,
|
||||
linux_re_literal_suffix,1,5,pt (ignore),pt -e [A-Z]+_RESUME,14.124520540237427,1652,
|
||||
linux_re_literal_suffix,1,5,pt (ignore),pt -e [A-Z]+_RESUME,14.151537656784058,1652,
|
||||
linux_re_literal_suffix,1,5,pt (ignore),pt -e [A-Z]+_RESUME,14.157994270324707,1652,
|
||||
linux_re_literal_suffix,1,5,pt (ignore),pt -e [A-Z]+_RESUME,14.102291822433472,1652,
|
||||
linux_re_literal_suffix,1,5,pt (ignore),pt -e [A-Z]+_RESUME,14.103861093521118,1652,
|
||||
linux_re_literal_suffix,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git [A-Z]+_RESUME,4.182392835617065,1652,
|
||||
linux_re_literal_suffix,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git [A-Z]+_RESUME,4.190829277038574,1652,
|
||||
linux_re_literal_suffix,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git [A-Z]+_RESUME,3.9770240783691406,1652,
|
||||
linux_re_literal_suffix,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git [A-Z]+_RESUME,3.9978606700897217,1652,
|
||||
linux_re_literal_suffix,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git [A-Z]+_RESUME,4.146454572677612,1652,
|
||||
linux_re_literal_suffix,1,5,git grep (ignore),git grep -E -I -n [A-Z]+_RESUME,0.5080702304840088,1652,LC_ALL=C
|
||||
linux_re_literal_suffix,1,5,git grep (ignore),git grep -E -I -n [A-Z]+_RESUME,0.5281260013580322,1652,LC_ALL=C
|
||||
linux_re_literal_suffix,1,5,git grep (ignore),git grep -E -I -n [A-Z]+_RESUME,0.5350546836853027,1652,LC_ALL=C
|
||||
linux_re_literal_suffix,1,5,git grep (ignore),git grep -E -I -n [A-Z]+_RESUME,0.5474245548248291,1652,LC_ALL=C
|
||||
linux_re_literal_suffix,1,5,git grep (ignore),git grep -E -I -n [A-Z]+_RESUME,0.5256762504577637,1652,LC_ALL=C
|
||||
linux_re_literal_suffix,1,5,rg (whitelist),rg -n --no-ignore -tall [A-Z]+_RESUME,0.07924222946166992,1630,
|
||||
linux_re_literal_suffix,1,5,rg (whitelist),rg -n --no-ignore -tall [A-Z]+_RESUME,0.0767812728881836,1630,
|
||||
linux_re_literal_suffix,1,5,rg (whitelist),rg -n --no-ignore -tall [A-Z]+_RESUME,0.07874488830566406,1630,
|
||||
linux_re_literal_suffix,1,5,rg (whitelist),rg -n --no-ignore -tall [A-Z]+_RESUME,0.0804905891418457,1630,
|
||||
linux_re_literal_suffix,1,5,rg (whitelist),rg -n --no-ignore -tall [A-Z]+_RESUME,0.07479119300842285,1630,
|
||||
linux_re_literal_suffix,1,5,ucg (whitelist),ucg --nosmart-case [A-Z]+_RESUME,0.13643193244934082,1630,
|
||||
linux_re_literal_suffix,1,5,ucg (whitelist),ucg --nosmart-case [A-Z]+_RESUME,0.13543128967285156,1630,
|
||||
linux_re_literal_suffix,1,5,ucg (whitelist),ucg --nosmart-case [A-Z]+_RESUME,0.13312768936157227,1630,
|
||||
linux_re_literal_suffix,1,5,ucg (whitelist),ucg --nosmart-case [A-Z]+_RESUME,0.13562273979187012,1630,
|
||||
linux_re_literal_suffix,1,5,ucg (whitelist),ucg --nosmart-case [A-Z]+_RESUME,0.13236212730407715,1630,
|
||||
linux_unicode_greek,1,5,rg,rg -n \p{Greek},0.17355775833129883,23,
|
||||
linux_unicode_greek,1,5,rg,rg -n \p{Greek},0.1676032543182373,23,
|
||||
linux_unicode_greek,1,5,rg,rg -n \p{Greek},0.1727275848388672,23,
|
||||
linux_unicode_greek,1,5,rg,rg -n \p{Greek},0.17095375061035156,23,
|
||||
linux_unicode_greek,1,5,rg,rg -n \p{Greek},0.17271947860717773,23,
|
||||
linux_unicode_greek,1,5,pt,pt -e \p{Greek},14.14364218711853,23,
|
||||
linux_unicode_greek,1,5,pt,pt -e \p{Greek},14.137334108352661,23,
|
||||
linux_unicode_greek,1,5,pt,pt -e \p{Greek},14.083475351333618,23,
|
||||
linux_unicode_greek,1,5,pt,pt -e \p{Greek},14.095231056213379,23,
|
||||
linux_unicode_greek,1,5,pt,pt -e \p{Greek},14.151906490325928,23,
|
||||
linux_unicode_greek,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \p{Greek},2.8376963138580322,23,
|
||||
linux_unicode_greek,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \p{Greek},2.8271427154541016,23,
|
||||
linux_unicode_greek,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \p{Greek},2.8310961723327637,23,
|
||||
linux_unicode_greek,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \p{Greek},2.826141595840454,23,
|
||||
linux_unicode_greek,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \p{Greek},2.805818796157837,23,
|
||||
linux_unicode_greek_casei,1,5,rg,rg -n -i \p{Greek},0.16843819618225098,103,
|
||||
linux_unicode_greek_casei,1,5,rg,rg -n -i \p{Greek},0.1704998016357422,103,
|
||||
linux_unicode_greek_casei,1,5,rg,rg -n -i \p{Greek},0.17055058479309082,103,
|
||||
linux_unicode_greek_casei,1,5,rg,rg -n -i \p{Greek},0.17064881324768066,103,
|
||||
linux_unicode_greek_casei,1,5,rg,rg -n -i \p{Greek},0.1699228286743164,103,
|
||||
linux_unicode_greek_casei,1,5,pt,pt -i -e \p{Greek},14.164355993270874,23,
|
||||
linux_unicode_greek_casei,1,5,pt,pt -i -e \p{Greek},14.099931478500366,23,
|
||||
linux_unicode_greek_casei,1,5,pt,pt -i -e \p{Greek},14.155095338821411,23,
|
||||
linux_unicode_greek_casei,1,5,pt,pt -i -e \p{Greek},14.109308004379272,23,
|
||||
linux_unicode_greek_casei,1,5,pt,pt -i -e \p{Greek},14.072362422943115,23,
|
||||
linux_unicode_greek_casei,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git \p{Greek},0.003945589065551758,,
|
||||
linux_unicode_greek_casei,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git \p{Greek},0.004189729690551758,,
|
||||
linux_unicode_greek_casei,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git \p{Greek},0.0034589767456054688,,
|
||||
linux_unicode_greek_casei,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git \p{Greek},0.003614187240600586,,
|
||||
linux_unicode_greek_casei,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git \p{Greek},0.003975629806518555,,
|
||||
linux_unicode_word,1,5,rg (ignore),rg -n \wAh,0.09798526763916016,186,
|
||||
linux_unicode_word,1,5,rg (ignore),rg -n \wAh,0.09575009346008301,186,
|
||||
linux_unicode_word,1,5,rg (ignore),rg -n \wAh,0.10181760787963867,186,
|
||||
linux_unicode_word,1,5,rg (ignore),rg -n \wAh,0.09650158882141113,186,
|
||||
linux_unicode_word,1,5,rg (ignore),rg -n \wAh,0.09717488288879395,186,
|
||||
linux_unicode_word,1,5,rg (ignore) (ASCII),rg -n (?-u)\wAh,0.09417867660522461,174,
|
||||
linux_unicode_word,1,5,rg (ignore) (ASCII),rg -n (?-u)\wAh,0.09903812408447266,174,
|
||||
linux_unicode_word,1,5,rg (ignore) (ASCII),rg -n (?-u)\wAh,0.09407877922058105,174,
|
||||
linux_unicode_word,1,5,rg (ignore) (ASCII),rg -n (?-u)\wAh,0.09681963920593262,174,
|
||||
linux_unicode_word,1,5,rg (ignore) (ASCII),rg -n (?-u)\wAh,0.09762454032897949,174,
|
||||
linux_unicode_word,1,5,ag (ignore) (ASCII),ag -s \wAh,0.5779609680175781,174,
|
||||
linux_unicode_word,1,5,ag (ignore) (ASCII),ag -s \wAh,0.635645866394043,174,
|
||||
linux_unicode_word,1,5,ag (ignore) (ASCII),ag -s \wAh,0.6109263896942139,174,
|
||||
linux_unicode_word,1,5,ag (ignore) (ASCII),ag -s \wAh,0.6260912418365479,174,
|
||||
linux_unicode_word,1,5,ag (ignore) (ASCII),ag -s \wAh,0.6823546886444092,174,
|
||||
linux_unicode_word,1,5,pt (ignore) (ASCII),pt -e \wAh,14.178487062454224,174,
|
||||
linux_unicode_word,1,5,pt (ignore) (ASCII),pt -e \wAh,14.190000057220459,174,
|
||||
linux_unicode_word,1,5,pt (ignore) (ASCII),pt -e \wAh,14.16363000869751,174,
|
||||
linux_unicode_word,1,5,pt (ignore) (ASCII),pt -e \wAh,14.160430431365967,174,
|
||||
linux_unicode_word,1,5,pt (ignore) (ASCII),pt -e \wAh,14.2189621925354,174,
|
||||
linux_unicode_word,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \wAh,4.17629337310791,174,
|
||||
linux_unicode_word,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \wAh,4.051238059997559,174,
|
||||
linux_unicode_word,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \wAh,4.323853015899658,174,
|
||||
linux_unicode_word,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \wAh,4.085661172866821,174,
|
||||
linux_unicode_word,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \wAh,4.036486625671387,174,
|
||||
linux_unicode_word,1,5,git grep (ignore),git grep -E -I -n \wAh,4.620476961135864,186,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,5,git grep (ignore),git grep -E -I -n \wAh,4.536192417144775,186,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,5,git grep (ignore),git grep -E -I -n \wAh,4.510494232177734,186,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,5,git grep (ignore),git grep -E -I -n \wAh,6.001620769500732,186,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,5,git grep (ignore),git grep -E -I -n \wAh,4.602652311325073,186,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,5,git grep (ignore) (ASCII),git grep -E -I -n \wAh,1.3785994052886963,174,LC_ALL=C
|
||||
linux_unicode_word,1,5,git grep (ignore) (ASCII),git grep -E -I -n \wAh,1.4163663387298584,174,LC_ALL=C
|
||||
linux_unicode_word,1,5,git grep (ignore) (ASCII),git grep -E -I -n \wAh,1.402677297592163,174,LC_ALL=C
|
||||
linux_unicode_word,1,5,git grep (ignore) (ASCII),git grep -E -I -n \wAh,1.3327512741088867,174,LC_ALL=C
|
||||
linux_unicode_word,1,5,git grep (ignore) (ASCII),git grep -E -I -n \wAh,1.3501760959625244,174,LC_ALL=C
|
||||
linux_unicode_word,1,5,rg (whitelist),rg -n --no-ignore -tall \wAh,0.07958698272705078,180,
|
||||
linux_unicode_word,1,5,rg (whitelist),rg -n --no-ignore -tall \wAh,0.0798649787902832,180,
|
||||
linux_unicode_word,1,5,rg (whitelist),rg -n --no-ignore -tall \wAh,0.08086204528808594,180,
|
||||
linux_unicode_word,1,5,rg (whitelist),rg -n --no-ignore -tall \wAh,0.0814356803894043,180,
|
||||
linux_unicode_word,1,5,rg (whitelist),rg -n --no-ignore -tall \wAh,0.08273720741271973,180,
|
||||
linux_unicode_word,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\wAh,0.08280825614929199,168,
|
||||
linux_unicode_word,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\wAh,0.08074021339416504,168,
|
||||
linux_unicode_word,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\wAh,0.0821676254272461,168,
|
||||
linux_unicode_word,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\wAh,0.07926368713378906,168,
|
||||
linux_unicode_word,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\wAh,0.08405280113220215,168,
|
||||
linux_unicode_word,1,5,ucg (ASCII),ucg --nosmart-case \wAh,0.1545090675354004,168,
|
||||
linux_unicode_word,1,5,ucg (ASCII),ucg --nosmart-case \wAh,0.1517190933227539,168,
|
||||
linux_unicode_word,1,5,ucg (ASCII),ucg --nosmart-case \wAh,0.15704965591430664,168,
|
||||
linux_unicode_word,1,5,ucg (ASCII),ucg --nosmart-case \wAh,0.15523767471313477,168,
|
||||
linux_unicode_word,1,5,ucg (ASCII),ucg --nosmart-case \wAh,0.1582942008972168,168,
|
||||
linux_word,1,5,rg (ignore),rg -n -w PM_RESUME,0.09102368354797363,6,
|
||||
linux_word,1,5,rg (ignore),rg -n -w PM_RESUME,0.08986210823059082,6,
|
||||
linux_word,1,5,rg (ignore),rg -n -w PM_RESUME,0.08989477157592773,6,
|
||||
linux_word,1,5,rg (ignore),rg -n -w PM_RESUME,0.0895695686340332,6,
|
||||
linux_word,1,5,rg (ignore),rg -n -w PM_RESUME,0.09547114372253418,6,
|
||||
linux_word,1,5,ag (ignore),ag -s -w PM_RESUME,0.4948008060455322,6,
|
||||
linux_word,1,5,ag (ignore),ag -s -w PM_RESUME,0.45710110664367676,6,
|
||||
linux_word,1,5,ag (ignore),ag -s -w PM_RESUME,0.44803452491760254,6,
|
||||
linux_word,1,5,ag (ignore),ag -s -w PM_RESUME,0.44779396057128906,6,
|
||||
linux_word,1,5,ag (ignore),ag -s -w PM_RESUME,0.4563112258911133,6,
|
||||
linux_word,1,5,pt (ignore),pt -w PM_RESUME,14.233235597610474,6,
|
||||
linux_word,1,5,pt (ignore),pt -w PM_RESUME,14.277648687362671,6,
|
||||
linux_word,1,5,pt (ignore),pt -w PM_RESUME,14.218127727508545,6,
|
||||
linux_word,1,5,pt (ignore),pt -w PM_RESUME,14.171622037887573,6,
|
||||
linux_word,1,5,pt (ignore),pt -w PM_RESUME,14.214240312576294,6,
|
||||
linux_word,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -w --git PM_RESUME,3.1536731719970703,6,
|
||||
linux_word,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -w --git PM_RESUME,3.2415099143981934,6,
|
||||
linux_word,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -w --git PM_RESUME,3.2526626586914062,6,
|
||||
linux_word,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -w --git PM_RESUME,3.2590816020965576,6,
|
||||
linux_word,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -w --git PM_RESUME,3.222473621368408,6,
|
||||
linux_word,1,5,git grep (ignore),git grep -E -I -n -w PM_RESUME,0.16982412338256836,6,LC_ALL=C
|
||||
linux_word,1,5,git grep (ignore),git grep -E -I -n -w PM_RESUME,0.16739583015441895,6,LC_ALL=C
|
||||
linux_word,1,5,git grep (ignore),git grep -E -I -n -w PM_RESUME,0.16866540908813477,6,LC_ALL=C
|
||||
linux_word,1,5,git grep (ignore),git grep -E -I -n -w PM_RESUME,0.18207120895385742,6,LC_ALL=C
|
||||
linux_word,1,5,git grep (ignore),git grep -E -I -n -w PM_RESUME,0.17716264724731445,6,LC_ALL=C
|
||||
linux_word,1,5,rg (whitelist),rg -n -w --no-ignore -tall PM_RESUME,0.07490420341491699,6,
|
||||
linux_word,1,5,rg (whitelist),rg -n -w --no-ignore -tall PM_RESUME,0.07714152336120605,6,
|
||||
linux_word,1,5,rg (whitelist),rg -n -w --no-ignore -tall PM_RESUME,0.07552146911621094,6,
|
||||
linux_word,1,5,rg (whitelist),rg -n -w --no-ignore -tall PM_RESUME,0.07651710510253906,6,
|
||||
linux_word,1,5,rg (whitelist),rg -n -w --no-ignore -tall PM_RESUME,0.0757131576538086,6,
|
||||
linux_word,1,5,ucg (whitelist),ucg --nosmart-case -w PM_RESUME,0.1530015468597412,6,
|
||||
linux_word,1,5,ucg (whitelist),ucg --nosmart-case -w PM_RESUME,0.15152239799499512,6,
|
||||
linux_word,1,5,ucg (whitelist),ucg --nosmart-case -w PM_RESUME,0.1571195125579834,6,
|
||||
linux_word,1,5,ucg (whitelist),ucg --nosmart-case -w PM_RESUME,0.15993595123291016,6,
|
||||
linux_word,1,5,ucg (whitelist),ucg --nosmart-case -w PM_RESUME,0.15633797645568848,6,
|
||||
subtitles_en_alternate,1,5,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.33371877670288086,848,
|
||||
subtitles_en_alternate,1,5,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3207988739013672,848,
|
||||
subtitles_en_alternate,1,5,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3301675319671631,848,
|
||||
subtitles_en_alternate,1,5,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.29731154441833496,848,
|
||||
subtitles_en_alternate,1,5,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.2711911201477051,848,
|
||||
subtitles_en_alternate,1,5,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.186570405960083,848,
|
||||
subtitles_en_alternate,1,5,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.1659939289093018,848,
|
||||
subtitles_en_alternate,1,5,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.187847137451172,848,
|
||||
subtitles_en_alternate,1,5,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.3522064685821533,848,
|
||||
subtitles_en_alternate,1,5,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.316105842590332,848,
|
||||
subtitles_en_alternate,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.1400718688964844,848,
|
||||
subtitles_en_alternate,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.1492774486541748,848,
|
||||
subtitles_en_alternate,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.1337254047393799,848,
|
||||
subtitles_en_alternate,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.1037378311157227,848,
|
||||
subtitles_en_alternate,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.1312851905822754,848,
|
||||
subtitles_en_alternate,1,5,grep (lines),grep -E -an Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.8294000625610352,848,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep (lines),grep -E -an Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.808884620666504,848,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep (lines),grep -E -an Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.8134734630584717,848,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep (lines),grep -E -an Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.8405649662017822,848,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep (lines),grep -E -an Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.8500289916992188,848,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.21175312995910645,848,
|
||||
subtitles_en_alternate,1,5,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.2118232250213623,848,
|
||||
subtitles_en_alternate,1,5,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.21287035942077637,848,
|
||||
subtitles_en_alternate,1,5,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.21167230606079102,848,
|
||||
subtitles_en_alternate,1,5,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.28102636337280273,848,
|
||||
subtitles_en_alternate,1,5,grep,grep -E -a Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5029187202453613,848,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep,grep -E -a Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.49977445602417,848,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep,grep -E -a Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.508340835571289,848,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep,grep -E -a Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5002548694610596,848,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep,grep -E -a Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.629526138305664,848,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.730497360229492,862,
|
||||
subtitles_en_alternate_casei,1,5,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.781018018722534,862,
|
||||
subtitles_en_alternate_casei,1,5,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.7858059406280518,862,
|
||||
subtitles_en_alternate_casei,1,5,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.7127914428710938,862,
|
||||
subtitles_en_alternate_casei,1,5,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.717308759689331,862,
|
||||
subtitles_en_alternate_casei,1,5,ucg (ASCII),ucg -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.428208351135254,862,
|
||||
subtitles_en_alternate_casei,1,5,ucg (ASCII),ucg -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.389420509338379,862,
|
||||
subtitles_en_alternate_casei,1,5,ucg (ASCII),ucg -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.403301954269409,862,
|
||||
subtitles_en_alternate_casei,1,5,ucg (ASCII),ucg -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.4691550731658936,862,
|
||||
subtitles_en_alternate_casei,1,5,ucg (ASCII),ucg -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.4245004653930664,862,
|
||||
subtitles_en_alternate_casei,1,5,grep (ASCII),grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.978189706802368,862,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,grep (ASCII),grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.974303722381592,862,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,grep (ASCII),grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.982886552810669,862,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,grep (ASCII),grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.90018630027771,862,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,grep (ASCII),grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.0078439712524414,862,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.9129142761230469,862,
|
||||
subtitles_en_alternate_casei,1,5,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.9066660404205322,862,
|
||||
subtitles_en_alternate_casei,1,5,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.946380615234375,862,
|
||||
subtitles_en_alternate_casei,1,5,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.9672930240631104,862,
|
||||
subtitles_en_alternate_casei,1,5,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.028451919555664,862,
|
||||
subtitles_en_alternate_casei,1,5,grep,grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.9427030086517334,862,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate_casei,1,5,grep,grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.938739061355591,862,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate_casei,1,5,grep,grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.921248435974121,862,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate_casei,1,5,grep,grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.9194068908691406,862,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate_casei,1,5,grep,grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.917184829711914,862,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal,1,5,rg,rg Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.12293672561645508,629,
|
||||
subtitles_en_literal,1,5,rg,rg Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.1259000301361084,629,
|
||||
subtitles_en_literal,1,5,rg,rg Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.12285709381103516,629,
|
||||
subtitles_en_literal,1,5,rg,rg Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.12280964851379395,629,
|
||||
subtitles_en_literal,1,5,rg,rg Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.1547396183013916,629,
|
||||
subtitles_en_literal,1,5,rg (no mmap),rg --no-mmap Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.22011375427246094,629,
|
||||
subtitles_en_literal,1,5,rg (no mmap),rg --no-mmap Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.23095202445983887,629,
|
||||
subtitles_en_literal,1,5,rg (no mmap),rg --no-mmap Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.2577846050262451,629,
|
||||
subtitles_en_literal,1,5,rg (no mmap),rg --no-mmap Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.2563819885253906,629,
|
||||
subtitles_en_literal,1,5,rg (no mmap),rg --no-mmap Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.24869346618652344,629,
|
||||
subtitles_en_literal,1,5,pt,pt -N Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.415337324142456,629,
|
||||
subtitles_en_literal,1,5,pt,pt -N Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.4208543300628662,629,
|
||||
subtitles_en_literal,1,5,pt,pt -N Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.416351079940796,629,
|
||||
subtitles_en_literal,1,5,pt,pt -N Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.4270708560943604,629,
|
||||
subtitles_en_literal,1,5,pt,pt -N Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.4243996143341064,629,
|
||||
subtitles_en_literal,1,5,sift,sift Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.2245020866394043,629,
|
||||
subtitles_en_literal,1,5,sift,sift Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.2382345199584961,629,
|
||||
subtitles_en_literal,1,5,sift,sift Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.23533034324645996,629,
|
||||
subtitles_en_literal,1,5,sift,sift Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.2577829360961914,629,
|
||||
subtitles_en_literal,1,5,sift,sift Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.2599349021911621,629,
|
||||
subtitles_en_literal,1,5,grep,grep -a Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.4733700752258301,629,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep,grep -a Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.4598572254180908,629,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep,grep -a Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.5303301811218262,629,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep,grep -a Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.4775106906890869,629,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep,grep -a Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.4881136417388916,629,LC_ALL=C
|
||||
subtitles_en_literal,1,5,rg (lines),rg -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.20051789283752441,629,
|
||||
subtitles_en_literal,1,5,rg (lines),rg -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.17326998710632324,629,
|
||||
subtitles_en_literal,1,5,rg (lines),rg -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.20733428001403809,629,
|
||||
subtitles_en_literal,1,5,rg (lines),rg -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.189713716506958,629,
|
||||
subtitles_en_literal,1,5,rg (lines),rg -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.17817258834838867,629,
|
||||
subtitles_en_literal,1,5,ag (lines),ag -s Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5327835083007812,629,
|
||||
subtitles_en_literal,1,5,ag (lines),ag -s Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5411181449890137,629,
|
||||
subtitles_en_literal,1,5,ag (lines),ag -s Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.600783109664917,629,
|
||||
subtitles_en_literal,1,5,ag (lines),ag -s Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5838911533355713,629,
|
||||
subtitles_en_literal,1,5,ag (lines),ag -s Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.6051928997039795,629,
|
||||
subtitles_en_literal,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.4090385437011719,629,
|
||||
subtitles_en_literal,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3816399574279785,629,
|
||||
subtitles_en_literal,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.38033008575439453,629,
|
||||
subtitles_en_literal,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3731727600097656,629,
|
||||
subtitles_en_literal,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.38796329498291016,629,
|
||||
subtitles_en_literal,1,5,pt (lines),pt Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.4102630615234375,629,
|
||||
subtitles_en_literal,1,5,pt (lines),pt Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.4137451648712158,629,
|
||||
subtitles_en_literal,1,5,pt (lines),pt Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.4649333953857422,629,
|
||||
subtitles_en_literal,1,5,pt (lines),pt Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.430387258529663,629,
|
||||
subtitles_en_literal,1,5,pt (lines),pt Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.541991949081421,629,
|
||||
subtitles_en_literal,1,5,sift (lines),sift -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.6231405735015869,629,
|
||||
subtitles_en_literal,1,5,sift (lines),sift -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.5986526012420654,629,
|
||||
subtitles_en_literal,1,5,sift (lines),sift -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.5821917057037354,629,
|
||||
subtitles_en_literal,1,5,sift (lines),sift -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.6045489311218262,629,
|
||||
subtitles_en_literal,1,5,sift (lines),sift -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.5986905097961426,629,
|
||||
subtitles_en_literal,1,5,grep (lines),grep -an Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.8278565406799316,629,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep (lines),grep -an Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.777052640914917,629,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep (lines),grep -an Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.7619414329528809,629,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep (lines),grep -an Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.8248744010925293,629,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep (lines),grep -an Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.824932336807251,629,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,rg,rg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.2718961238861084,642,
|
||||
subtitles_en_literal_casei,1,5,rg,rg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.27082157135009766,642,
|
||||
subtitles_en_literal_casei,1,5,rg,rg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.27086758613586426,642,
|
||||
subtitles_en_literal_casei,1,5,rg,rg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.274705171585083,642,
|
||||
subtitles_en_literal_casei,1,5,rg,rg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3337059020996094,642,
|
||||
subtitles_en_literal_casei,1,5,grep,grep -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.9112112522125244,642,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,5,grep,grep -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.907888650894165,642,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,5,grep,grep -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.912668228149414,642,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,5,grep,grep -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.9082865715026855,642,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,5,grep,grep -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.9177796840667725,642,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,5,grep (ASCII),grep -E -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.6020669937133789,642,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,grep (ASCII),grep -E -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.568228006362915,642,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,grep (ASCII),grep -E -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.5648214817047119,642,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,grep (ASCII),grep -E -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.5568234920501709,642,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,grep (ASCII),grep -E -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.5588953495025635,642,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,rg (lines),rg -n -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3486766815185547,642,
|
||||
subtitles_en_literal_casei,1,5,rg (lines),rg -n -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.34010815620422363,642,
|
||||
subtitles_en_literal_casei,1,5,rg (lines),rg -n -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.33849263191223145,642,
|
||||
subtitles_en_literal_casei,1,5,rg (lines),rg -n -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3917088508605957,642,
|
||||
subtitles_en_literal_casei,1,5,rg (lines),rg -n -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.39266490936279297,642,
|
||||
subtitles_en_literal_casei,1,5,ag (lines) (ASCII),ag -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5564041137695312,642,
|
||||
subtitles_en_literal_casei,1,5,ag (lines) (ASCII),ag -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5533506870269775,642,
|
||||
subtitles_en_literal_casei,1,5,ag (lines) (ASCII),ag -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.6205368041992188,642,
|
||||
subtitles_en_literal_casei,1,5,ag (lines) (ASCII),ag -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5530028343200684,642,
|
||||
subtitles_en_literal_casei,1,5,ag (lines) (ASCII),ag -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.6189889907836914,642,
|
||||
subtitles_en_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3834850788116455,642,
|
||||
subtitles_en_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.41916346549987793,642,
|
||||
subtitles_en_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3895289897918701,642,
|
||||
subtitles_en_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.4278140068054199,642,
|
||||
subtitles_en_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.4013493061065674,642,
|
||||
subtitles_en_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.17953085899353027,629,
|
||||
subtitles_en_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.17679834365844727,629,
|
||||
subtitles_en_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.17448186874389648,629,
|
||||
subtitles_en_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.21117281913757324,629,
|
||||
subtitles_en_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.1848156452178955,629,
|
||||
subtitles_en_literal_word,1,5,ag (ASCII),ag -sw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5236153602600098,629,
|
||||
subtitles_en_literal_word,1,5,ag (ASCII),ag -sw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.52512526512146,629,
|
||||
subtitles_en_literal_word,1,5,ag (ASCII),ag -sw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5218794345855713,629,
|
||||
subtitles_en_literal_word,1,5,ag (ASCII),ag -sw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5384306907653809,629,
|
||||
subtitles_en_literal_word,1,5,ag (ASCII),ag -sw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5150353908538818,629,
|
||||
subtitles_en_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3757903575897217,629,
|
||||
subtitles_en_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3744041919708252,629,
|
||||
subtitles_en_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.37261366844177246,629,
|
||||
subtitles_en_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.40795230865478516,629,
|
||||
subtitles_en_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3868849277496338,629,
|
||||
subtitles_en_literal_word,1,5,grep (ASCII),grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.8265349864959717,629,LC_ALL=C
|
||||
subtitles_en_literal_word,1,5,grep (ASCII),grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.8123743534088135,629,LC_ALL=C
|
||||
subtitles_en_literal_word,1,5,grep (ASCII),grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.7669925689697266,629,LC_ALL=C
|
||||
subtitles_en_literal_word,1,5,grep (ASCII),grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.766636848449707,629,LC_ALL=C
|
||||
subtitles_en_literal_word,1,5,grep (ASCII),grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.7665839195251465,629,LC_ALL=C
|
||||
subtitles_en_literal_word,1,5,rg,rg -nw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.1879115104675293,629,
|
||||
subtitles_en_literal_word,1,5,rg,rg -nw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.18082356452941895,629,
|
||||
subtitles_en_literal_word,1,5,rg,rg -nw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.18497347831726074,629,
|
||||
subtitles_en_literal_word,1,5,rg,rg -nw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.1769394874572754,629,
|
||||
subtitles_en_literal_word,1,5,rg,rg -nw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.1917715072631836,629,
|
||||
subtitles_en_literal_word,1,5,grep,grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.8192996978759766,629,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_word,1,5,grep,grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.8193323612213135,629,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_word,1,5,grep,grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.7837738990783691,629,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_word,1,5,grep,grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.7639024257659912,629,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_word,1,5,grep,grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.7634689807891846,629,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.7922985553741455,13,
|
||||
subtitles_en_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.7885758876800537,13,
|
||||
subtitles_en_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.802325963973999,13,
|
||||
subtitles_en_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.792595386505127,13,
|
||||
subtitles_en_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.7909605503082275,13,
|
||||
subtitles_en_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5903098583221436,13,
|
||||
subtitles_en_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5982813835144043,13,
|
||||
subtitles_en_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5926671028137207,13,
|
||||
subtitles_en_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5976767539978027,13,
|
||||
subtitles_en_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.593153953552246,13,
|
||||
subtitles_en_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,6.614634275436401,48,
|
||||
subtitles_en_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,6.574857473373413,48,
|
||||
subtitles_en_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,6.54079270362854,48,
|
||||
subtitles_en_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,6.600660800933838,48,
|
||||
subtitles_en_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,6.531627178192139,48,
|
||||
subtitles_en_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,5.361133337020874,13,
|
||||
subtitles_en_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,5.456786870956421,13,
|
||||
subtitles_en_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,5.403071403503418,13,
|
||||
subtitles_en_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,5.398236274719238,13,
|
||||
subtitles_en_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,5.348573923110962,13,
|
||||
subtitles_en_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.5057969093322754,13,LC_ALL=C
|
||||
subtitles_en_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.4157862663269043,13,LC_ALL=C
|
||||
subtitles_en_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.471182346343994,13,LC_ALL=C
|
||||
subtitles_en_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.4590909481048584,13,LC_ALL=C
|
||||
subtitles_en_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.3759689331054688,13,LC_ALL=C
|
||||
subtitles_en_surrounding_words,1,5,rg,rg -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.18518710136413574,317,
|
||||
subtitles_en_surrounding_words,1,5,rg,rg -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.18791556358337402,317,
|
||||
subtitles_en_surrounding_words,1,5,rg,rg -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.18598675727844238,317,
|
||||
subtitles_en_surrounding_words,1,5,rg,rg -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.18552684783935547,317,
|
||||
subtitles_en_surrounding_words,1,5,rg,rg -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.19262075424194336,317,
|
||||
subtitles_en_surrounding_words,1,5,grep,grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.1321008205413818,317,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_surrounding_words,1,5,grep,grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.0709969997406006,317,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_surrounding_words,1,5,grep,grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.1117346286773682,317,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_surrounding_words,1,5,grep,grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.0880234241485596,317,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_surrounding_words,1,5,grep,grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.0745558738708496,317,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_surrounding_words,1,5,rg (ASCII),rg -n (?-u)\w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.1827528476715088,317,
|
||||
subtitles_en_surrounding_words,1,5,rg (ASCII),rg -n (?-u)\w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.18874144554138184,317,
|
||||
subtitles_en_surrounding_words,1,5,rg (ASCII),rg -n (?-u)\w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.17983436584472656,317,
|
||||
subtitles_en_surrounding_words,1,5,rg (ASCII),rg -n (?-u)\w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.18831133842468262,317,
|
||||
subtitles_en_surrounding_words,1,5,rg (ASCII),rg -n (?-u)\w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.17810606956481934,317,
|
||||
subtitles_en_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,4.5957207679748535,323,
|
||||
subtitles_en_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,4.627211570739746,323,
|
||||
subtitles_en_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,4.554431200027466,323,
|
||||
subtitles_en_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,4.492656469345093,323,
|
||||
subtitles_en_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,4.443558216094971,323,
|
||||
subtitles_en_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.522758722305298,317,
|
||||
subtitles_en_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.502918004989624,317,
|
||||
subtitles_en_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.6503307819366455,317,
|
||||
subtitles_en_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.58940052986145,317,
|
||||
subtitles_en_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.569624423980713,317,
|
||||
subtitles_en_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.0672054290771484,317,LC_ALL=C
|
||||
subtitles_en_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.0729331970214844,317,LC_ALL=C
|
||||
subtitles_en_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.052501916885376,317,LC_ALL=C
|
||||
subtitles_en_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.0711696147918701,317,LC_ALL=C
|
||||
subtitles_en_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.0863316059112549,317,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0312588214874268,691,
|
||||
subtitles_ru_alternate,1,5,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.063939094543457,691,
|
||||
subtitles_ru_alternate,1,5,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0000121593475342,691,
|
||||
subtitles_ru_alternate,1,5,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.9842438697814941,691,
|
||||
subtitles_ru_alternate,1,5,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.95733642578125,691,
|
||||
subtitles_ru_alternate,1,5,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.7781903743743896,691,
|
||||
subtitles_ru_alternate,1,5,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.861164093017578,691,
|
||||
subtitles_ru_alternate,1,5,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.8268885612487793,691,
|
||||
subtitles_ru_alternate,1,5,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.8621268272399902,691,
|
||||
subtitles_ru_alternate,1,5,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.8216166496276855,691,
|
||||
subtitles_ru_alternate,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0069098472595215,691,
|
||||
subtitles_ru_alternate,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.025178909301758,691,
|
||||
subtitles_ru_alternate,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0631070137023926,691,
|
||||
subtitles_ru_alternate,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0902633666992188,691,
|
||||
subtitles_ru_alternate,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0272655487060547,691,
|
||||
subtitles_ru_alternate,1,5,grep (lines),grep -E -an Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.510146617889404,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep (lines),grep -E -an Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.541701793670654,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep (lines),grep -E -an Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.506088733673096,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep (lines),grep -E -an Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.51838755607605,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep (lines),grep -E -an Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.486810684204102,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.9679937362670898,691,
|
||||
subtitles_ru_alternate,1,5,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.9942011833190918,691,
|
||||
subtitles_ru_alternate,1,5,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.9233448505401611,691,
|
||||
subtitles_ru_alternate,1,5,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.9294781684875488,691,
|
||||
subtitles_ru_alternate,1,5,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.8729774951934814,691,
|
||||
subtitles_ru_alternate,1,5,grep,grep -E -a Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.100147485733032,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep,grep -E -a Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.075790166854858,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep,grep -E -a Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.069685220718384,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep,grep -E -a Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.0526063442230225,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep,grep -E -a Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.129194498062134,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.7894201278686523,691,
|
||||
subtitles_ru_alternate_casei,1,5,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.7878782749176025,691,
|
||||
subtitles_ru_alternate_casei,1,5,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.796328544616699,691,
|
||||
subtitles_ru_alternate_casei,1,5,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.8249149322509766,691,
|
||||
subtitles_ru_alternate_casei,1,5,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.7949724197387695,691,
|
||||
subtitles_ru_alternate_casei,1,5,ucg (ASCII),ucg -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.075739622116089,691,
|
||||
subtitles_ru_alternate_casei,1,5,ucg (ASCII),ucg -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.013590097427368,691,
|
||||
subtitles_ru_alternate_casei,1,5,ucg (ASCII),ucg -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.012375593185425,691,
|
||||
subtitles_ru_alternate_casei,1,5,ucg (ASCII),ucg -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.023118495941162,691,
|
||||
subtitles_ru_alternate_casei,1,5,ucg (ASCII),ucg -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0641982555389404,691,
|
||||
subtitles_ru_alternate_casei,1,5,grep (ASCII),grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.467320442199707,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,grep (ASCII),grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.486851692199707,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,grep (ASCII),grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.479818344116211,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,grep (ASCII),grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.516186475753784,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,grep (ASCII),grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.471773862838745,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,11.026185274124146,735,
|
||||
subtitles_ru_alternate_casei,1,5,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,11.168465614318848,735,
|
||||
subtitles_ru_alternate_casei,1,5,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,11.039950370788574,735,
|
||||
subtitles_ru_alternate_casei,1,5,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,11.089850425720215,735,
|
||||
subtitles_ru_alternate_casei,1,5,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,11.112446546554565,735,
|
||||
subtitles_ru_alternate_casei,1,5,grep,grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.822641849517822,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate_casei,1,5,grep,grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.808355331420898,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate_casei,1,5,grep,grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.80171275138855,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate_casei,1,5,grep,grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.794351577758789,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate_casei,1,5,grep,grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.844403266906738,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal,1,5,rg,rg Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.20681476593017578,583,
|
||||
subtitles_ru_literal,1,5,rg,rg Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.190568208694458,583,
|
||||
subtitles_ru_literal,1,5,rg,rg Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.18462657928466797,583,
|
||||
subtitles_ru_literal,1,5,rg,rg Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.1873643398284912,583,
|
||||
subtitles_ru_literal,1,5,rg,rg Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.20382428169250488,583,
|
||||
subtitles_ru_literal,1,5,rg (no mmap),rg --no-mmap Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.3085510730743408,583,
|
||||
subtitles_ru_literal,1,5,rg (no mmap),rg --no-mmap Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.318758487701416,583,
|
||||
subtitles_ru_literal,1,5,rg (no mmap),rg --no-mmap Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.3177149295806885,583,
|
||||
subtitles_ru_literal,1,5,rg (no mmap),rg --no-mmap Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.31236958503723145,583,
|
||||
subtitles_ru_literal,1,5,rg (no mmap),rg --no-mmap Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.31880998611450195,583,
|
||||
subtitles_ru_literal,1,5,pt,pt -N Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.152938365936279,583,
|
||||
subtitles_ru_literal,1,5,pt,pt -N Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.124867677688599,583,
|
||||
subtitles_ru_literal,1,5,pt,pt -N Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.132290363311768,583,
|
||||
subtitles_ru_literal,1,5,pt,pt -N Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.158328056335449,583,
|
||||
subtitles_ru_literal,1,5,pt,pt -N Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.1022467613220215,583,
|
||||
subtitles_ru_literal,1,5,sift,sift Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.807113409042358,583,
|
||||
subtitles_ru_literal,1,5,sift,sift Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.8178558349609375,583,
|
||||
subtitles_ru_literal,1,5,sift,sift Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.925220012664795,583,
|
||||
subtitles_ru_literal,1,5,sift,sift Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.861236333847046,583,
|
||||
subtitles_ru_literal,1,5,sift,sift Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.763278484344482,583,
|
||||
subtitles_ru_literal,1,5,grep,grep -a Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.704503059387207,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep,grep -a Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6887199878692627,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep,grep -a Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.7092702388763428,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep,grep -a Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6964359283447266,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep,grep -a Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6928379535675049,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,rg (lines),rg -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.2646975517272949,583,
|
||||
subtitles_ru_literal,1,5,rg (lines),rg -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.26806163787841797,583,
|
||||
subtitles_ru_literal,1,5,rg (lines),rg -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.2700214385986328,583,
|
||||
subtitles_ru_literal,1,5,rg (lines),rg -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.2669072151184082,583,
|
||||
subtitles_ru_literal,1,5,rg (lines),rg -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.2656106948852539,583,
|
||||
subtitles_ru_literal,1,5,ag (lines),ag -s Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.9972407817840576,583,
|
||||
subtitles_ru_literal,1,5,ag (lines),ag -s Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.906053066253662,583,
|
||||
subtitles_ru_literal,1,5,ag (lines),ag -s Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.864766836166382,583,
|
||||
subtitles_ru_literal,1,5,ag (lines),ag -s Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.7820546627044678,583,
|
||||
subtitles_ru_literal,1,5,ag (lines),ag -s Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.7599871158599854,583,
|
||||
subtitles_ru_literal,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.411653995513916,583,
|
||||
subtitles_ru_literal,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.394604206085205,583,
|
||||
subtitles_ru_literal,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.362853765487671,583,
|
||||
subtitles_ru_literal,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.4795477390289307,583,
|
||||
subtitles_ru_literal,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.4428844451904297,583,
|
||||
subtitles_ru_literal,1,5,pt (lines),pt Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.122563123703003,583,
|
||||
subtitles_ru_literal,1,5,pt (lines),pt Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.17008900642395,583,
|
||||
subtitles_ru_literal,1,5,pt (lines),pt Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.1965367794036865,583,
|
||||
subtitles_ru_literal,1,5,pt (lines),pt Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.152370929718018,583,
|
||||
subtitles_ru_literal,1,5,pt (lines),pt Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.106513738632202,583,
|
||||
subtitles_ru_literal,1,5,sift (lines),sift -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.408761978149414,583,
|
||||
subtitles_ru_literal,1,5,sift (lines),sift -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.423579454421997,583,
|
||||
subtitles_ru_literal,1,5,sift (lines),sift -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.2807464599609375,583,
|
||||
subtitles_ru_literal,1,5,sift (lines),sift -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.3771467208862305,583,
|
||||
subtitles_ru_literal,1,5,sift (lines),sift -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.378506422042847,583,
|
||||
subtitles_ru_literal,1,5,grep (lines),grep -an Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.121800422668457,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep (lines),grep -an Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.1189923286437988,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep (lines),grep -an Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0678138732910156,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep (lines),grep -an Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0668041706085205,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep (lines),grep -an Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0713574886322021,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,rg,rg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.9427816867828369,604,
|
||||
subtitles_ru_literal_casei,1,5,rg,rg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0397350788116455,604,
|
||||
subtitles_ru_literal_casei,1,5,rg,rg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.9732518196105957,604,
|
||||
subtitles_ru_literal_casei,1,5,rg,rg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.9387776851654053,604,
|
||||
subtitles_ru_literal_casei,1,5,rg,rg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.9536802768707275,604,
|
||||
subtitles_ru_literal_casei,1,5,grep,grep -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.338641405105591,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,5,grep,grep -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.280565023422241,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,5,grep,grep -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.241750240325928,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,5,grep,grep -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.316105604171753,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,5,grep,grep -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.307560205459595,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,5,grep (ASCII),grep -E -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.7379302978515625,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,grep (ASCII),grep -E -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.7226619720458984,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,grep (ASCII),grep -E -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.683293342590332,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,grep (ASCII),grep -E -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.714146614074707,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,grep (ASCII),grep -E -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.7654330730438232,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,rg (lines),rg -n -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0237820148468018,604,
|
||||
subtitles_ru_literal_casei,1,5,rg (lines),rg -n -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0194151401519775,604,
|
||||
subtitles_ru_literal_casei,1,5,rg (lines),rg -n -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0364336967468262,604,
|
||||
subtitles_ru_literal_casei,1,5,rg (lines),rg -n -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.035005807876587,604,
|
||||
subtitles_ru_literal_casei,1,5,rg (lines),rg -n -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0438766479492188,604,
|
||||
subtitles_ru_literal_casei,1,5,ag (lines) (ASCII),ag -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.619025468826294,,
|
||||
subtitles_ru_literal_casei,1,5,ag (lines) (ASCII),ag -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.647244930267334,,
|
||||
subtitles_ru_literal_casei,1,5,ag (lines) (ASCII),ag -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6785612106323242,,
|
||||
subtitles_ru_literal_casei,1,5,ag (lines) (ASCII),ag -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6503715515136719,,
|
||||
subtitles_ru_literal_casei,1,5,ag (lines) (ASCII),ag -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6314499378204346,,
|
||||
subtitles_ru_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.8302316665649414,583,
|
||||
subtitles_ru_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.7719593048095703,583,
|
||||
subtitles_ru_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.7697594165802002,583,
|
||||
subtitles_ru_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.7312629222869873,583,
|
||||
subtitles_ru_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.767866849899292,583,
|
||||
subtitles_ru_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Шерлок Холмс(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.19411826133728027,,
|
||||
subtitles_ru_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Шерлок Холмс(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.18651676177978516,,
|
||||
subtitles_ru_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Шерлок Холмс(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.19614577293395996,,
|
||||
subtitles_ru_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Шерлок Холмс(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.18459081649780273,,
|
||||
subtitles_ru_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Шерлок Холмс(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.1797487735748291,,
|
||||
subtitles_ru_literal_word,1,5,ag (ASCII),ag -sw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6507105827331543,,
|
||||
subtitles_ru_literal_word,1,5,ag (ASCII),ag -sw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6480035781860352,,
|
||||
subtitles_ru_literal_word,1,5,ag (ASCII),ag -sw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.7138750553131104,,
|
||||
subtitles_ru_literal_word,1,5,ag (ASCII),ag -sw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6521759033203125,,
|
||||
subtitles_ru_literal_word,1,5,ag (ASCII),ag -sw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6728894710540771,,
|
||||
subtitles_ru_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.3646819591522217,583,
|
||||
subtitles_ru_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.3836848735809326,583,
|
||||
subtitles_ru_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.419490337371826,583,
|
||||
subtitles_ru_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.363335609436035,583,
|
||||
subtitles_ru_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.488351345062256,583,
|
||||
subtitles_ru_literal_word,1,5,grep (ASCII),grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.171506643295288,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,5,grep (ASCII),grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.1602776050567627,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,5,grep (ASCII),grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.084787368774414,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,5,grep (ASCII),grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0714166164398193,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,5,grep (ASCII),grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.083632469177246,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,5,rg,rg -nw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.2769143581390381,579,
|
||||
subtitles_ru_literal_word,1,5,rg,rg -nw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.2694058418273926,579,
|
||||
subtitles_ru_literal_word,1,5,rg,rg -nw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.26763367652893066,579,
|
||||
subtitles_ru_literal_word,1,5,rg,rg -nw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.2671318054199219,579,
|
||||
subtitles_ru_literal_word,1,5,rg,rg -nw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.2922348976135254,579,
|
||||
subtitles_ru_literal_word,1,5,grep,grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.083528757095337,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_word,1,5,grep,grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0857081413269043,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_word,1,5,grep,grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.07025146484375,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_word,1,5,grep,grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.071930170059204,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_word,1,5,grep,grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0709245204925537,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.1552906036376953,41,
|
||||
subtitles_ru_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.164951801300049,41,
|
||||
subtitles_ru_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.175389289855957,41,
|
||||
subtitles_ru_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.1861774921417236,41,
|
||||
subtitles_ru_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.153625011444092,41,
|
||||
subtitles_ru_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.7353317737579346,,
|
||||
subtitles_ru_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.7592883110046387,,
|
||||
subtitles_ru_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.7242491245269775,,
|
||||
subtitles_ru_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.747089385986328,,
|
||||
subtitles_ru_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.732586145401001,,
|
||||
subtitles_ru_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0796375274658203,,
|
||||
subtitles_ru_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.9670393466949463,,
|
||||
subtitles_ru_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.9413447380065918,,
|
||||
subtitles_ru_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.916764497756958,,
|
||||
subtitles_ru_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.9110031127929688,,
|
||||
subtitles_ru_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0622072219848633,,
|
||||
subtitles_ru_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0975682735443115,,
|
||||
subtitles_ru_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0741493701934814,,
|
||||
subtitles_ru_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0423810482025146,,
|
||||
subtitles_ru_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.000764846801758,,
|
||||
subtitles_ru_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.6251120567321777,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.644089698791504,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.6416165828704834,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.6321892738342285,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.6264762878417969,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,5,rg,rg -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.29879307746887207,278,
|
||||
subtitles_ru_surrounding_words,1,5,rg,rg -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.3226010799407959,278,
|
||||
subtitles_ru_surrounding_words,1,5,rg,rg -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.32187771797180176,278,
|
||||
subtitles_ru_surrounding_words,1,5,rg,rg -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.2825047969818115,278,
|
||||
subtitles_ru_surrounding_words,1,5,rg,rg -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.283217191696167,278,
|
||||
subtitles_ru_surrounding_words,1,5,grep,grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.3977878093719482,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,5,grep,grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.4288139343261719,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,5,grep,grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.4054889678955078,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,5,grep,grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.4003441333770752,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,5,grep,grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.5269148349761963,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.8912529945373535,,
|
||||
subtitles_ru_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.9221522808074951,,
|
||||
subtitles_ru_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.9416618347167969,,
|
||||
subtitles_ru_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.893650770187378,,
|
||||
subtitles_ru_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.8895554542541504,,
|
||||
subtitles_ru_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0110745429992676,,
|
||||
subtitles_ru_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.9790067672729492,,
|
||||
subtitles_ru_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0426392555236816,,
|
||||
subtitles_ru_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.121723175048828,,
|
||||
subtitles_ru_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.1247596740722656,,
|
||||
subtitles_ru_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.3579976558685303,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.382859468460083,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.393401861190796,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.474374532699585,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.3835601806640625,,LC_ALL=C
|
||||
|
235
benchsuite/runs/2018-01-08-archlinux-cheetah/summary
Normal file
235
benchsuite/runs/2018-01-08-archlinux-cheetah/summary
Normal file
@@ -0,0 +1,235 @@
|
||||
linux_alternates (pattern: ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT)
|
||||
-------------------------------------------------------------------------
|
||||
rg (ignore) 0.100 +/- 0.003 (lines: 68)
|
||||
ag (ignore) 0.501 +/- 0.033 (lines: 68)
|
||||
git grep (ignore) 0.267 +/- 0.004 (lines: 68)
|
||||
rg (whitelist)* 0.090 +/- 0.001 (lines: 68)*
|
||||
ucg (whitelist) 0.135 +/- 0.003 (lines: 68)
|
||||
|
||||
linux_alternates_casei (pattern: ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT)
|
||||
-------------------------------------------------------------------------------
|
||||
rg (ignore) 0.124 +/- 0.004 (lines: 160)
|
||||
ag (ignore) 0.564 +/- 0.041 (lines: 160)
|
||||
git grep (ignore) 0.928 +/- 0.033 (lines: 160)
|
||||
rg (whitelist)* 0.096 +/- 0.003 (lines: 160)*
|
||||
ucg (whitelist) 0.248 +/- 0.008 (lines: 160)
|
||||
|
||||
linux_literal (pattern: PM_RESUME)
|
||||
----------------------------------
|
||||
rg (ignore)* 0.082 +/- 0.001 (lines: 16)*
|
||||
rg (ignore) (mmap) 0.751 +/- 0.062 (lines: 16)
|
||||
ag (ignore) (mmap) 0.612 +/- 0.065 (lines: 16)
|
||||
pt (ignore) 0.195 +/- 0.020 (lines: 16)
|
||||
sift (ignore) 0.468 +/- 0.006 (lines: 16)
|
||||
git grep (ignore) 0.196 +/- 0.005 (lines: 16)
|
||||
rg (whitelist) 0.085 +/- 0.003 (lines: 16)
|
||||
ucg (whitelist) 0.159 +/- 0.002 (lines: 16)
|
||||
|
||||
linux_literal_casei (pattern: PM_RESUME)
|
||||
----------------------------------------
|
||||
rg (ignore) 0.105 +/- 0.003 (lines: 374)
|
||||
rg (ignore) (mmap) 0.799 +/- 0.012 (lines: 374)
|
||||
ag (ignore) (mmap) 0.469 +/- 0.030 (lines: 374)
|
||||
pt (ignore) 14.177 +/- 0.049 (lines: 374)
|
||||
sift (ignore) 0.460 +/- 0.006 (lines: 374)
|
||||
git grep (ignore) 0.198 +/- 0.006 (lines: 370)
|
||||
rg (whitelist)* 0.097 +/- 0.003 (lines: 370)*
|
||||
ucg (whitelist) 0.154 +/- 0.003 (lines: 370)
|
||||
|
||||
linux_literal_default (pattern: PM_RESUME)
|
||||
------------------------------------------
|
||||
rg* 0.089 +/- 0.002 (lines: 16)*
|
||||
ag 0.469 +/- 0.038 (lines: 16)
|
||||
ucg 0.154 +/- 0.001 (lines: 16)
|
||||
pt 0.237 +/- 0.040 (lines: 16)
|
||||
sift 0.126 +/- 0.003 (lines: 16)
|
||||
git grep 0.175 +/- 0.013 (lines: 16)
|
||||
|
||||
linux_no_literal (pattern: \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5})
|
||||
-----------------------------------------------------------------
|
||||
rg (ignore) 0.329 +/- 0.006 (lines: 490)
|
||||
rg (ignore) (ASCII) 0.172 +/- 0.002 (lines: 490)
|
||||
ag (ignore) (ASCII) 0.725 +/- 0.067 (lines: 766)
|
||||
pt (ignore) (ASCII) 12.478 +/- 0.097 (lines: 490)
|
||||
sift (ignore) (ASCII) 9.002 +/- 0.096 (lines: 490)
|
||||
git grep (ignore) 8.542 +/- 0.277 (lines: 490)
|
||||
git grep (ignore) (ASCII) 1.883 +/- 0.087 (lines: 490)
|
||||
rg (whitelist) 0.289 +/- 0.006 (lines: 458)
|
||||
rg (whitelist) (ASCII)* 0.160 +/- 0.001 (lines: 458)*
|
||||
ucg (whitelist) (ASCII) 0.474 +/- 0.020 (lines: 416)
|
||||
|
||||
linux_re_literal_suffix (pattern: [A-Z]+_RESUME)
|
||||
------------------------------------------------
|
||||
rg (ignore) 0.084 +/- 0.002 (lines: 1652)
|
||||
ag (ignore) 0.483 +/- 0.006 (lines: 1652)
|
||||
pt (ignore) 14.128 +/- 0.026 (lines: 1652)
|
||||
sift (ignore) 4.099 +/- 0.103 (lines: 1652)
|
||||
git grep (ignore) 0.529 +/- 0.014 (lines: 1652)
|
||||
rg (whitelist)* 0.078 +/- 0.002 (lines: 1630)*
|
||||
ucg (whitelist) 0.135 +/- 0.002 (lines: 1630)
|
||||
|
||||
linux_unicode_greek (pattern: \p{Greek})
|
||||
----------------------------------------
|
||||
rg* 0.172 +/- 0.002 (lines: 23)*
|
||||
pt 14.122 +/- 0.031 (lines: 23)
|
||||
sift 2.826 +/- 0.012 (lines: 23)
|
||||
|
||||
linux_unicode_greek_casei (pattern: \p{Greek})
|
||||
----------------------------------------------
|
||||
rg 0.170 +/- 0.001 (lines: 103)
|
||||
pt 14.120 +/- 0.039 (lines: 23)
|
||||
sift* 0.004 +/- 0.000 (lines: 0)*
|
||||
|
||||
linux_unicode_word (pattern: \wAh)
|
||||
----------------------------------
|
||||
rg (ignore) 0.098 +/- 0.002 (lines: 186)
|
||||
rg (ignore) (ASCII) 0.096 +/- 0.002 (lines: 174)
|
||||
ag (ignore) (ASCII) 0.627 +/- 0.038 (lines: 174)
|
||||
pt (ignore) (ASCII) 14.182 +/- 0.024 (lines: 174)
|
||||
sift (ignore) (ASCII) 4.135 +/- 0.119 (lines: 174)
|
||||
git grep (ignore) 4.854 +/- 0.643 (lines: 186)
|
||||
git grep (ignore) (ASCII) 1.376 +/- 0.035 (lines: 174)
|
||||
rg (whitelist) 0.081 +/- 0.001 (lines: 180)*
|
||||
rg (whitelist) (ASCII)* 0.082 +/- 0.002 (lines: 168)
|
||||
ucg (ASCII) 0.155 +/- 0.003 (lines: 168)
|
||||
|
||||
linux_word (pattern: PM_RESUME)
|
||||
-------------------------------
|
||||
rg (ignore) 0.091 +/- 0.002 (lines: 6)
|
||||
ag (ignore) 0.461 +/- 0.020 (lines: 6)
|
||||
pt (ignore) 14.223 +/- 0.038 (lines: 6)
|
||||
sift (ignore) 3.226 +/- 0.043 (lines: 6)
|
||||
git grep (ignore) 0.173 +/- 0.006 (lines: 6)
|
||||
rg (whitelist)* 0.076 +/- 0.001 (lines: 6)*
|
||||
ucg (whitelist) 0.156 +/- 0.003 (lines: 6)
|
||||
|
||||
subtitles_en_alternate (pattern: Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty)
|
||||
---------------------------------------------------------------------------------------------------------------
|
||||
rg (lines) 0.311 +/- 0.026 (lines: 848)
|
||||
ag (lines) 2.242 +/- 0.086 (lines: 848)
|
||||
ucg (lines) 1.132 +/- 0.017 (lines: 848)
|
||||
grep (lines) 1.828 +/- 0.017 (lines: 848)
|
||||
rg* 0.226 +/- 0.031 (lines: 848)*
|
||||
grep 1.528 +/- 0.057 (lines: 848)
|
||||
|
||||
subtitles_en_alternate_casei (pattern: Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty)
|
||||
---------------------------------------------------------------------------------------------------------------------
|
||||
ag (ASCII) 3.745 +/- 0.035 (lines: 862)
|
||||
ucg (ASCII) 2.423 +/- 0.030 (lines: 862)
|
||||
grep (ASCII) 2.969 +/- 0.040 (lines: 862)
|
||||
rg* 1.952 +/- 0.049 (lines: 862)*
|
||||
grep 2.928 +/- 0.012 (lines: 862)
|
||||
|
||||
subtitles_en_literal (pattern: Sherlock Holmes)
|
||||
-----------------------------------------------
|
||||
rg* 0.130 +/- 0.014 (lines: 629)*
|
||||
rg (no mmap) 0.243 +/- 0.017 (lines: 629)
|
||||
pt 1.421 +/- 0.005 (lines: 629)
|
||||
sift 0.243 +/- 0.015 (lines: 629)
|
||||
grep 0.486 +/- 0.027 (lines: 629)
|
||||
rg (lines) 0.190 +/- 0.014 (lines: 629)
|
||||
ag (lines) 1.573 +/- 0.034 (lines: 629)
|
||||
ucg (lines) 0.386 +/- 0.014 (lines: 629)
|
||||
pt (lines) 1.452 +/- 0.055 (lines: 629)
|
||||
sift (lines) 0.601 +/- 0.015 (lines: 629)
|
||||
grep (lines) 0.803 +/- 0.031 (lines: 629)
|
||||
|
||||
subtitles_en_literal_casei (pattern: Sherlock Holmes)
|
||||
-----------------------------------------------------
|
||||
rg* 0.284 +/- 0.028 (lines: 642)*
|
||||
grep 1.912 +/- 0.004 (lines: 642)
|
||||
grep (ASCII) 0.570 +/- 0.018 (lines: 642)
|
||||
rg (lines) 0.362 +/- 0.028 (lines: 642)
|
||||
ag (lines) (ASCII) 1.580 +/- 0.036 (lines: 642)
|
||||
ucg (lines) (ASCII) 0.404 +/- 0.019 (lines: 642)
|
||||
|
||||
subtitles_en_literal_word (pattern: Sherlock Holmes)
|
||||
----------------------------------------------------
|
||||
rg (ASCII)* 0.185 +/- 0.015 (lines: 629)
|
||||
ag (ASCII) 1.525 +/- 0.009 (lines: 629)
|
||||
ucg (ASCII) 0.384 +/- 0.015 (lines: 629)
|
||||
grep (ASCII) 0.788 +/- 0.029 (lines: 629)
|
||||
rg 0.184 +/- 0.006 (lines: 629)*
|
||||
grep 0.790 +/- 0.028 (lines: 629)
|
||||
|
||||
subtitles_en_no_literal (pattern: \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5})
|
||||
----------------------------------------------------------------------------------------
|
||||
rg 1.793 +/- 0.005 (lines: 13)
|
||||
rg (ASCII)* 1.594 +/- 0.003 (lines: 13)*
|
||||
ag (ASCII) 6.573 +/- 0.036 (lines: 48)
|
||||
ucg (ASCII) 5.394 +/- 0.042 (lines: 13)
|
||||
grep (ASCII) 3.446 +/- 0.050 (lines: 13)
|
||||
|
||||
subtitles_en_surrounding_words (pattern: \w+\s+Holmes\s+\w+)
|
||||
------------------------------------------------------------
|
||||
rg 0.187 +/- 0.003 (lines: 317)
|
||||
grep 1.095 +/- 0.026 (lines: 317)
|
||||
rg (ASCII)* 0.184 +/- 0.005 (lines: 317)*
|
||||
ag (ASCII) 4.543 +/- 0.075 (lines: 323)
|
||||
ucg (ASCII) 3.567 +/- 0.058 (lines: 317)
|
||||
grep (ASCII) 1.070 +/- 0.012 (lines: 317)
|
||||
|
||||
subtitles_ru_alternate (pattern: Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти)
|
||||
-----------------------------------------------------------------------------------------------------------
|
||||
rg (lines) 1.007 +/- 0.041 (lines: 691)
|
||||
ag (lines) 3.830 +/- 0.035 (lines: 691)
|
||||
ucg (lines) 2.043 +/- 0.034 (lines: 691)
|
||||
grep (lines) 7.513 +/- 0.020 (lines: 691)
|
||||
rg* 0.938 +/- 0.046 (lines: 691)*
|
||||
grep 7.085 +/- 0.030 (lines: 691)
|
||||
|
||||
subtitles_ru_alternate_casei (pattern: Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти)
|
||||
-----------------------------------------------------------------------------------------------------------------
|
||||
ag (ASCII) 3.799 +/- 0.015 (lines: 691)
|
||||
ucg (ASCII)* 2.038 +/- 0.030 (lines: 691)*
|
||||
grep (ASCII) 7.484 +/- 0.019 (lines: 691)
|
||||
rg 11.087 +/- 0.057 (lines: 735)
|
||||
grep 6.814 +/- 0.020 (lines: 735)
|
||||
|
||||
subtitles_ru_literal (pattern: Шерлок Холмс)
|
||||
--------------------------------------------
|
||||
rg* 0.195 +/- 0.010 (lines: 583)*
|
||||
rg (no mmap) 0.315 +/- 0.005 (lines: 583)
|
||||
pt 5.134 +/- 0.023 (lines: 583)
|
||||
sift 5.835 +/- 0.061 (lines: 583)
|
||||
grep 0.698 +/- 0.008 (lines: 583)
|
||||
rg (lines) 0.267 +/- 0.002 (lines: 583)
|
||||
ag (lines) 2.862 +/- 0.096 (lines: 583)
|
||||
ucg (lines) 2.418 +/- 0.045 (lines: 583)
|
||||
pt (lines) 5.150 +/- 0.036 (lines: 583)
|
||||
sift (lines) 6.374 +/- 0.056 (lines: 583)
|
||||
grep (lines) 1.089 +/- 0.028 (lines: 583)
|
||||
|
||||
subtitles_ru_literal_casei (pattern: Шерлок Холмс)
|
||||
--------------------------------------------------
|
||||
rg 0.970 +/- 0.041 (lines: 604)
|
||||
grep 6.297 +/- 0.037 (lines: 604)
|
||||
grep (ASCII) 0.725 +/- 0.030 (lines: 583)
|
||||
rg (lines) 1.032 +/- 0.010 (lines: 604)
|
||||
ag (lines) (ASCII)* 0.645 +/- 0.022 (lines: 0)*
|
||||
ucg (lines) (ASCII) 0.774 +/- 0.036 (lines: 583)
|
||||
|
||||
subtitles_ru_literal_word (pattern: Шерлок Холмс)
|
||||
-------------------------------------------------
|
||||
rg (ASCII)* 0.188 +/- 0.007 (lines: 0)*
|
||||
ag (ASCII) 0.668 +/- 0.028 (lines: 0)
|
||||
ucg (ASCII) 2.404 +/- 0.052 (lines: 583)
|
||||
grep (ASCII) 1.114 +/- 0.048 (lines: 583)
|
||||
rg 0.275 +/- 0.011 (lines: 579)
|
||||
grep 1.076 +/- 0.008 (lines: 579)
|
||||
|
||||
subtitles_ru_no_literal (pattern: \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5})
|
||||
----------------------------------------------------------------------------------------
|
||||
rg 3.167 +/- 0.014 (lines: 41)
|
||||
rg (ASCII) 2.740 +/- 0.014 (lines: 0)
|
||||
ag (ASCII) 1.963 +/- 0.069 (lines: 0)
|
||||
ucg (ASCII) 2.055 +/- 0.037 (lines: 0)
|
||||
grep (ASCII)* 1.634 +/- 0.009 (lines: 0)*
|
||||
|
||||
subtitles_ru_surrounding_words (pattern: \w+\s+Холмс\s+\w+)
|
||||
-----------------------------------------------------------
|
||||
rg* 0.302 +/- 0.020 (lines: 278)*
|
||||
grep 1.432 +/- 0.055 (lines: 278)
|
||||
ag (ASCII) 1.908 +/- 0.023 (lines: 0)
|
||||
ucg (ASCII) 2.056 +/- 0.066 (lines: 0)
|
||||
grep (ASCII) 1.398 +/- 0.044 (lines: 0)
|
||||
165
build.rs
165
build.rs
@@ -4,24 +4,181 @@ extern crate clap;
|
||||
extern crate lazy_static;
|
||||
|
||||
use std::env;
|
||||
use std::fs;
|
||||
use std::fs::{self, File};
|
||||
use std::io::{self, Read, Write};
|
||||
use std::path::Path;
|
||||
use std::process;
|
||||
|
||||
use clap::Shell;
|
||||
|
||||
use app::{RGArg, RGArgKind};
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[path = "src/app.rs"]
|
||||
mod app;
|
||||
|
||||
fn main() {
|
||||
// OUT_DIR is set by Cargo and it's where any additional build artifacts
|
||||
// are written.
|
||||
let outdir = match env::var_os("OUT_DIR") {
|
||||
None => return,
|
||||
Some(outdir) => outdir,
|
||||
None => {
|
||||
eprintln!(
|
||||
"OUT_DIR environment variable not defined. \
|
||||
Please file a bug: \
|
||||
https://github.com/BurntSushi/ripgrep/issues/new");
|
||||
process::exit(1);
|
||||
}
|
||||
};
|
||||
fs::create_dir_all(&outdir).unwrap();
|
||||
|
||||
let mut app = app::app_short();
|
||||
let stamp_path = Path::new(&outdir).join("ripgrep-stamp");
|
||||
if let Err(err) = File::create(&stamp_path) {
|
||||
panic!("failed to write {}: {}", stamp_path.display(), err);
|
||||
}
|
||||
if let Err(err) = generate_man_page(&outdir) {
|
||||
eprintln!("failed to generate man page: {}", err);
|
||||
}
|
||||
|
||||
// Use clap to build completion files.
|
||||
let mut app = app::app();
|
||||
app.gen_completions("rg", Shell::Bash, &outdir);
|
||||
app.gen_completions("rg", Shell::Fish, &outdir);
|
||||
app.gen_completions("rg", Shell::Zsh, &outdir);
|
||||
app.gen_completions("rg", Shell::PowerShell, &outdir);
|
||||
// Note that we do not use clap's support for zsh. Instead, zsh completions
|
||||
// are manually maintained in `complete/_rg`.
|
||||
|
||||
// Make the current git hash available to the build.
|
||||
if let Some(rev) = git_revision_hash() {
|
||||
println!("cargo:rustc-env=RIPGREP_BUILD_GIT_HASH={}", rev);
|
||||
}
|
||||
}
|
||||
|
||||
fn git_revision_hash() -> Option<String> {
|
||||
let result = process::Command::new("git")
|
||||
.args(&["rev-parse", "--short=10", "HEAD"])
|
||||
.output();
|
||||
result.ok().and_then(|output| {
|
||||
let v = String::from_utf8_lossy(&output.stdout).trim().to_string();
|
||||
if v.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(v)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn generate_man_page<P: AsRef<Path>>(outdir: P) -> io::Result<()> {
|
||||
// If asciidoc isn't installed, then don't do anything.
|
||||
if let Err(err) = process::Command::new("a2x").output() {
|
||||
eprintln!("Could not run 'a2x' binary, skipping man page generation.");
|
||||
eprintln!("Error from running 'a2x': {}", err);
|
||||
return Ok(());
|
||||
}
|
||||
// 1. Read asciidoc template.
|
||||
// 2. Interpolate template with auto-generated docs.
|
||||
// 3. Save interpolation to disk.
|
||||
// 4. Use a2x (part of asciidoc) to convert to man page.
|
||||
let outdir = outdir.as_ref();
|
||||
let cwd = env::current_dir()?;
|
||||
let tpl_path = cwd.join("doc").join("rg.1.txt.tpl");
|
||||
let txt_path = outdir.join("rg.1.txt");
|
||||
|
||||
let mut tpl = String::new();
|
||||
File::open(&tpl_path)?.read_to_string(&mut tpl)?;
|
||||
tpl = tpl.replace("{OPTIONS}", &formatted_options()?);
|
||||
|
||||
let githash = git_revision_hash();
|
||||
let githash = githash.as_ref().map(|x| &**x);
|
||||
tpl = tpl.replace("{VERSION}", &app::long_version(githash));
|
||||
|
||||
File::create(&txt_path)?.write_all(tpl.as_bytes())?;
|
||||
let result = process::Command::new("a2x")
|
||||
.arg("--no-xmllint")
|
||||
.arg("--doctype").arg("manpage")
|
||||
.arg("--format").arg("manpage")
|
||||
.arg(&txt_path)
|
||||
.spawn()?
|
||||
.wait()?;
|
||||
if !result.success() {
|
||||
let msg = format!("'a2x' failed with exit code {:?}", result.code());
|
||||
return Err(ioerr(msg));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn formatted_options() -> io::Result<String> {
|
||||
let mut args = app::all_args_and_flags();
|
||||
args.sort_by(|x1, x2| x1.name.cmp(&x2.name));
|
||||
|
||||
let mut formatted = vec![];
|
||||
for arg in args {
|
||||
if arg.hidden {
|
||||
continue;
|
||||
}
|
||||
// ripgrep only has two positional arguments, and probably will only
|
||||
// ever have two positional arguments, so we just hardcode them into
|
||||
// the template.
|
||||
if let app::RGArgKind::Positional{..} = arg.kind {
|
||||
continue;
|
||||
}
|
||||
formatted.push(formatted_arg(&arg)?);
|
||||
}
|
||||
Ok(formatted.join("\n\n"))
|
||||
}
|
||||
|
||||
fn formatted_arg(arg: &RGArg) -> io::Result<String> {
|
||||
match arg.kind {
|
||||
RGArgKind::Positional{..} => panic!("unexpected positional argument"),
|
||||
RGArgKind::Switch { long, short, multiple } => {
|
||||
let mut out = vec![];
|
||||
|
||||
let mut header = format!("--{}", long);
|
||||
if let Some(short) = short {
|
||||
header = format!("-{}, {}", short, header);
|
||||
}
|
||||
if multiple {
|
||||
header = format!("*{}* ...::", header);
|
||||
} else {
|
||||
header = format!("*{}*::", header);
|
||||
}
|
||||
writeln!(out, "{}", header)?;
|
||||
writeln!(out, "{}", formatted_doc_txt(arg)?)?;
|
||||
|
||||
Ok(String::from_utf8(out).unwrap())
|
||||
}
|
||||
RGArgKind::Flag { long, short, value_name, multiple, .. } => {
|
||||
let mut out = vec![];
|
||||
|
||||
let mut header = format!("--{}", long);
|
||||
if let Some(short) = short {
|
||||
header = format!("-{}, {}", short, header);
|
||||
}
|
||||
if multiple {
|
||||
header = format!("*{}* _{}_ ...::", header, value_name);
|
||||
} else {
|
||||
header = format!("*{}* _{}_::", header, value_name);
|
||||
}
|
||||
writeln!(out, "{}", header)?;
|
||||
writeln!(out, "{}", formatted_doc_txt(arg)?)?;
|
||||
|
||||
Ok(String::from_utf8(out).unwrap())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn formatted_doc_txt(arg: &RGArg) -> io::Result<String> {
|
||||
let paragraphs: Vec<&str> = arg.doc_long.split("\n\n").collect();
|
||||
if paragraphs.is_empty() {
|
||||
return Err(ioerr(format!("missing docs for --{}", arg.name)));
|
||||
}
|
||||
let first = format!(" {}", paragraphs[0].replace("\n", "\n "));
|
||||
if paragraphs.len() == 1 {
|
||||
return Ok(first);
|
||||
}
|
||||
Ok(format!("{}\n+\n{}", first, paragraphs[1..].join("\n+\n")))
|
||||
}
|
||||
|
||||
fn ioerr(msg: String) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::Other, msg)
|
||||
}
|
||||
|
||||
51
ci/before_deploy.sh
Normal file → Executable file
51
ci/before_deploy.sh
Normal file → Executable file
@@ -1,31 +1,50 @@
|
||||
# `before_deploy` phase: here we package the build artifacts
|
||||
#!/bin/bash
|
||||
|
||||
# package the build artifacts
|
||||
|
||||
set -ex
|
||||
|
||||
. $(dirname $0)/utils.sh
|
||||
. "$(dirname $0)/utils.sh"
|
||||
|
||||
# Generate artifacts for release
|
||||
mk_artifacts() {
|
||||
RUSTFLAGS="-C target-feature=+ssse3" \
|
||||
cargo build --target $TARGET --release --features simd-accel
|
||||
cargo build --target "$TARGET" --release --features unstable
|
||||
}
|
||||
|
||||
mk_tarball() {
|
||||
# create a "staging" directory
|
||||
local td=$(mktempd)
|
||||
local out_dir=$(pwd)
|
||||
# When cross-compiling, use the right `strip` tool on the binary.
|
||||
local gcc_prefix="$(gcc_prefix)"
|
||||
# Create a temporary dir that contains our staging area.
|
||||
# $tmpdir/$name is what eventually ends up as the deployed archive.
|
||||
local tmpdir="$(mktemp -d)"
|
||||
local name="${PROJECT_NAME}-${TRAVIS_TAG}-${TARGET}"
|
||||
mkdir "$td/$name"
|
||||
mkdir "$td/$name/complete"
|
||||
local staging="$tmpdir/$name"
|
||||
mkdir -p "$staging"/{complete,doc}
|
||||
# The deployment directory is where the final archive will reside.
|
||||
# This path is known by the .travis.yml configuration.
|
||||
local out_dir="$(pwd)/deployment"
|
||||
mkdir -p "$out_dir"
|
||||
# Find the correct (most recent) Cargo "out" directory. The out directory
|
||||
# contains shell completion files and the man page.
|
||||
local cargo_out_dir="$(cargo_out_dir "target/$TARGET")"
|
||||
|
||||
cp target/$TARGET/release/rg "$td/$name/"
|
||||
cp {doc/rg.1,README.md,UNLICENSE,COPYING,LICENSE-MIT} "$td/$name/"
|
||||
cp target/$TARGET/release/build/ripgrep-*/out/{_rg,rg.bash-completion,rg.fish,_rg.ps1} "$td/$name/complete/"
|
||||
# Copy the ripgrep binary and strip it.
|
||||
cp "target/$TARGET/release/rg" "$staging/rg"
|
||||
"${gcc_prefix}strip" "$staging/rg"
|
||||
# Copy the licenses and README.
|
||||
cp {README.md,UNLICENSE,COPYING,LICENSE-MIT} "$staging/"
|
||||
# Copy documentation and man page.
|
||||
cp {CHANGELOG.md,FAQ.md,GUIDE.md} "$staging/doc/"
|
||||
if command -V a2x 2>&1 > /dev/null; then
|
||||
# The man page should only exist if we have asciidoc installed.
|
||||
cp "$cargo_out_dir/rg.1" "$staging/doc/"
|
||||
fi
|
||||
# Copy shell completion files.
|
||||
cp "$cargo_out_dir"/{rg.bash,rg.fish,_rg.ps1} "$staging/complete/"
|
||||
cp complete/_rg "$staging/complete/"
|
||||
|
||||
pushd $td
|
||||
tar czf "$out_dir/$name.tar.gz" *
|
||||
popd
|
||||
rm -r $td
|
||||
(cd "$tmpdir" && tar czf "$out_dir/$name.tar.gz" "$name")
|
||||
rm -rf "$tmpdir"
|
||||
}
|
||||
|
||||
main() {
|
||||
|
||||
55
ci/install.sh
Normal file → Executable file
55
ci/install.sh
Normal file → Executable file
@@ -1,60 +1,61 @@
|
||||
# `install` phase: install stuff needed for the `script` phase
|
||||
#!/bin/bash
|
||||
|
||||
# install stuff needed for the `script` phase
|
||||
|
||||
# Where rustup gets installed.
|
||||
export PATH="$PATH:$HOME/.cargo/bin"
|
||||
|
||||
set -ex
|
||||
|
||||
. $(dirname $0)/utils.sh
|
||||
|
||||
install_c_toolchain() {
|
||||
case $TARGET in
|
||||
aarch64-unknown-linux-gnu)
|
||||
sudo apt-get install -y --no-install-recommends \
|
||||
gcc-aarch64-linux-gnu libc6-arm64-cross libc6-dev-arm64-cross
|
||||
;;
|
||||
*)
|
||||
# For other targets, this is handled by addons.apt.packages in .travis.yml
|
||||
;;
|
||||
esac
|
||||
}
|
||||
. "$(dirname $0)/utils.sh"
|
||||
|
||||
install_rustup() {
|
||||
# uninstall the rust toolchain installed by travis, we are going to use rustup
|
||||
sh ~/rust/lib/rustlib/uninstall.sh
|
||||
|
||||
curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain=$TRAVIS_RUST_VERSION
|
||||
|
||||
curl https://sh.rustup.rs -sSf \
|
||||
| sh -s -- -y --default-toolchain="$TRAVIS_RUST_VERSION"
|
||||
rustc -V
|
||||
cargo -V
|
||||
}
|
||||
|
||||
install_standard_crates() {
|
||||
install_targets() {
|
||||
if [ $(host) != "$TARGET" ]; then
|
||||
rustup target add $TARGET
|
||||
fi
|
||||
}
|
||||
|
||||
install_osx_dependencies() {
|
||||
if ! is_osx; then
|
||||
return
|
||||
fi
|
||||
|
||||
brew install asciidoc docbook-xsl
|
||||
}
|
||||
|
||||
configure_cargo() {
|
||||
local prefix=$(gcc_prefix)
|
||||
if [ -n "${prefix}" ]; then
|
||||
local gcc_suffix=
|
||||
if [ -n "$GCC_VERSION" ]; then
|
||||
gcc_suffix="-$GCC_VERSION"
|
||||
fi
|
||||
local gcc="${prefix}gcc${gcc_suffix}"
|
||||
|
||||
if [ ! -z $prefix ]; then
|
||||
# information about the cross compiler
|
||||
${prefix}gcc -v
|
||||
"${gcc}" -v
|
||||
|
||||
# tell cargo which linker to use for cross compilation
|
||||
mkdir -p .cargo
|
||||
cat >>.cargo/config <<EOF
|
||||
[target.$TARGET]
|
||||
linker = "${prefix}gcc"
|
||||
linker = "${gcc}"
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
install_c_toolchain
|
||||
install_osx_dependencies
|
||||
install_rustup
|
||||
install_standard_crates
|
||||
install_targets
|
||||
configure_cargo
|
||||
|
||||
# TODO if you need to install extra stuff add it here
|
||||
}
|
||||
|
||||
main
|
||||
|
||||
70
ci/script.sh
Normal file → Executable file
70
ci/script.sh
Normal file → Executable file
@@ -1,40 +1,46 @@
|
||||
# `script` phase: you usually build, test and generate docs in this phase
|
||||
#!/bin/bash
|
||||
|
||||
# build, test and generate docs in this phase
|
||||
|
||||
set -ex
|
||||
|
||||
. $(dirname $0)/utils.sh
|
||||
|
||||
# NOTE Workaround for rust-lang/rust#31907 - disable doc tests when cross compiling
|
||||
# This has been fixed in the nightly channel but it would take a while to reach the other channels
|
||||
disable_cross_doctests() {
|
||||
if [ $(host) != "$TARGET" ] && [ "$TRAVIS_RUST_VERSION" = "stable" ]; then
|
||||
if [ "$TRAVIS_OS_NAME" = "osx" ]; then
|
||||
brew install gnu-sed --default-names
|
||||
fi
|
||||
find src -name '*.rs' -type f | xargs sed -i -e 's:\(//.\s*```\):\1 ignore,:g'
|
||||
fi
|
||||
}
|
||||
|
||||
run_test_suite() {
|
||||
cargo clean --target $TARGET --verbose
|
||||
cargo build --target $TARGET --verbose
|
||||
cargo test --target $TARGET --verbose
|
||||
cargo build --target $TARGET --verbose --manifest-path grep/Cargo.toml
|
||||
cargo test --target $TARGET --verbose --manifest-path grep/Cargo.toml
|
||||
cargo build --target $TARGET --verbose --manifest-path globset/Cargo.toml
|
||||
cargo test --target $TARGET --verbose --manifest-path globset/Cargo.toml
|
||||
cargo build --target $TARGET --verbose --manifest-path ignore/Cargo.toml
|
||||
cargo test --target $TARGET --verbose --manifest-path ignore/Cargo.toml
|
||||
cargo build --target $TARGET --verbose --manifest-path termcolor/Cargo.toml
|
||||
cargo test --target $TARGET --verbose --manifest-path termcolor/Cargo.toml
|
||||
|
||||
# sanity check the file type
|
||||
file target/$TARGET/debug/rg
|
||||
}
|
||||
. "$(dirname $0)/utils.sh"
|
||||
|
||||
main() {
|
||||
# disable_cross_doctests
|
||||
run_test_suite
|
||||
# Test a normal debug build.
|
||||
cargo build --target "$TARGET" --verbose --all
|
||||
|
||||
# Show the output of the most recent build.rs stderr.
|
||||
set +x
|
||||
stderr="$(find "target/$TARGET/debug" -name stderr -print0 | xargs -0 ls -t | head -n1)"
|
||||
if [ -s "$stderr" ]; then
|
||||
echo "===== $stderr ====="
|
||||
cat "$stderr"
|
||||
echo "====="
|
||||
fi
|
||||
set -x
|
||||
|
||||
# sanity check the file type
|
||||
file target/"$TARGET"/debug/rg
|
||||
|
||||
# Check that we've generated man page and other shell completions.
|
||||
outdir="$(cargo_out_dir "target/$TARGET/debug")"
|
||||
file "$outdir/rg.bash"
|
||||
file "$outdir/rg.fish"
|
||||
file "$outdir/_rg.ps1"
|
||||
file "$outdir/rg.1"
|
||||
|
||||
# Apparently tests don't work on arm, so just bail now. I guess we provide
|
||||
# ARM releases on a best effort basis?
|
||||
if is_arm; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Test that zsh completions are in sync with ripgrep's actual args.
|
||||
"$(dirname "${0}")/test_complete.sh"
|
||||
|
||||
# Run tests for ripgrep and all sub-crates.
|
||||
cargo test --target "$TARGET" --verbose --all
|
||||
}
|
||||
|
||||
main
|
||||
|
||||
0
ci/sha256.sh
Normal file → Executable file
0
ci/sha256.sh
Normal file → Executable file
94
ci/test_complete.sh
Executable file
94
ci/test_complete.sh
Executable file
@@ -0,0 +1,94 @@
|
||||
#!/usr/bin/env zsh
|
||||
|
||||
##
|
||||
# Compares options in `rg --help` output to options in zsh completion function
|
||||
|
||||
emulate -R zsh
|
||||
setopt extended_glob
|
||||
setopt no_function_argzero
|
||||
setopt no_unset
|
||||
|
||||
get_comp_args() {
|
||||
setopt local_options unset
|
||||
|
||||
# Our completion function recognises a special variable which tells it to
|
||||
# dump the _arguments specs and then just return. But do this in a sub-shell
|
||||
# anyway to avoid any weirdness
|
||||
( _RG_COMPLETE_LIST_ARGS=1 source $1 )
|
||||
return $?
|
||||
}
|
||||
|
||||
main() {
|
||||
local diff
|
||||
local rg="${${0:a}:h}/../target/${TARGET:-}/release/rg"
|
||||
local _rg="${${0:a}:h}/../complete/_rg"
|
||||
local -a help_args comp_args
|
||||
|
||||
[[ -e $rg ]] || rg=${rg/%\/release\/rg/\/debug\/rg}
|
||||
|
||||
[[ -e $rg ]] || {
|
||||
printf >&2 'File not found: %s\n' $rg
|
||||
return 1
|
||||
}
|
||||
[[ -e $_rg ]] || {
|
||||
printf >&2 'File not found: %s\n' $_rg
|
||||
return 1
|
||||
}
|
||||
|
||||
printf 'Comparing options:\n-%s\n+%s\n' $rg $_rg
|
||||
|
||||
# 'Parse' options out of the `--help` output. To prevent false positives we
|
||||
# only look at lines where the first non-white-space character is `-`
|
||||
help_args=( ${(f)"$(
|
||||
$rg --help |
|
||||
$rg -- '^\s*-' |
|
||||
$rg -io -- '[\t ,](-[a-z0-9]|--[a-z0-9-]+)\b' |
|
||||
tr -d '\t ,' |
|
||||
sort -u
|
||||
)"} )
|
||||
|
||||
# 'Parse' options out of the completion function
|
||||
comp_args=( ${(f)"$( get_comp_args $_rg )"} )
|
||||
|
||||
comp_args=( ${comp_args#\(*\)} ) # Strip excluded options
|
||||
comp_args=( ${comp_args#\*} ) # Strip repetition indicator
|
||||
comp_args=( ${comp_args%%-[:[]*} ) # Strip everything after -optname-
|
||||
comp_args=( ${comp_args%%[:+=[]*} ) # Strip everything after other optspecs
|
||||
comp_args=( ${comp_args##[^-]*} ) # Remove non-options
|
||||
|
||||
# This probably isn't necessary, but we should ensure the same order
|
||||
comp_args=( ${(f)"$( printf '%s\n' $comp_args | sort -u )"} )
|
||||
|
||||
(( $#help_args )) || {
|
||||
printf >&2 'Failed to get help_args\n'
|
||||
return 1
|
||||
}
|
||||
(( $#comp_args )) || {
|
||||
printf >&2 'Failed to get comp_args\n'
|
||||
return 1
|
||||
}
|
||||
|
||||
diff="$(
|
||||
if diff --help 2>&1 | grep -qF -- '--label'; then
|
||||
diff -U2 \
|
||||
--label '`rg --help`' \
|
||||
--label '`_rg`' \
|
||||
=( printf '%s\n' $help_args ) =( printf '%s\n' $comp_args )
|
||||
else
|
||||
diff -U2 \
|
||||
-L '`rg --help`' \
|
||||
-L '`_rg`' \
|
||||
=( printf '%s\n' $help_args ) =( printf '%s\n' $comp_args )
|
||||
fi
|
||||
)"
|
||||
|
||||
(( $#diff )) && {
|
||||
printf >&2 '%s\n' 'zsh completion options differ from `--help` options:'
|
||||
printf >&2 '%s\n' $diff
|
||||
return 1
|
||||
}
|
||||
printf 'OK\n'
|
||||
return 0
|
||||
}
|
||||
|
||||
main "${@}"
|
||||
95
ci/utils.sh
95
ci/utils.sh
@@ -1,5 +1,19 @@
|
||||
mktempd() {
|
||||
echo $(mktemp -d 2>/dev/null || mktemp -d -t tmp)
|
||||
#!/bin/bash
|
||||
|
||||
# Various utility functions used through CI.
|
||||
|
||||
# Finds Cargo's `OUT_DIR` directory from the most recent build.
|
||||
#
|
||||
# This requires one parameter corresponding to the target directory
|
||||
# to search for the build output.
|
||||
cargo_out_dir() {
|
||||
# This works by finding the most recent stamp file, which is produced by
|
||||
# every ripgrep build.
|
||||
target_dir="$1"
|
||||
find "$target_dir" -name ripgrep-stamp -print0 \
|
||||
| xargs -0 ls -t \
|
||||
| head -n1 \
|
||||
| xargs dirname
|
||||
}
|
||||
|
||||
host() {
|
||||
@@ -13,37 +27,12 @@ host() {
|
||||
esac
|
||||
}
|
||||
|
||||
gcc_prefix() {
|
||||
case "$TARGET" in
|
||||
aarch64-unknown-linux-gnu)
|
||||
echo aarch64-linux-gnu-
|
||||
;;
|
||||
arm*-gnueabihf)
|
||||
echo arm-linux-gnueabihf-
|
||||
;;
|
||||
*)
|
||||
return
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
dobin() {
|
||||
[ -z $MAKE_DEB ] && die 'dobin: $MAKE_DEB not set'
|
||||
[ $# -lt 1 ] && die "dobin: at least one argument needed"
|
||||
|
||||
local f prefix=$(gcc_prefix)
|
||||
for f in "$@"; do
|
||||
install -m0755 $f $dtd/debian/usr/bin/
|
||||
${prefix}strip -s $dtd/debian/usr/bin/$(basename $f)
|
||||
done
|
||||
}
|
||||
|
||||
architecture() {
|
||||
case $1 in
|
||||
x86_64-unknown-linux-gnu|x86_64-unknown-linux-musl)
|
||||
case "$TARGET" in
|
||||
x86_64-*)
|
||||
echo amd64
|
||||
;;
|
||||
i686-unknown-linux-gnu|i686-unknown-linux-musl)
|
||||
i686-*|i586-*|i386-*)
|
||||
echo i386
|
||||
;;
|
||||
arm*-unknown-linux-gnueabihf)
|
||||
@@ -54,3 +43,49 @@ architecture() {
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
gcc_prefix() {
|
||||
case "$(architecture)" in
|
||||
armhf)
|
||||
echo arm-linux-gnueabihf-
|
||||
;;
|
||||
*)
|
||||
return
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
is_ssse3_target() {
|
||||
case "$(architecture)" in
|
||||
amd64) return 0 ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
is_x86() {
|
||||
case "$(architecture)" in
|
||||
amd64|i386) return 0 ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
is_arm() {
|
||||
case "$(architecture)" in
|
||||
armhf) return 0 ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
is_linux() {
|
||||
case "$TRAVIS_OS_NAME" in
|
||||
linux) return 0 ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
is_osx() {
|
||||
case "$TRAVIS_OS_NAME" in
|
||||
osx) return 0 ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
8
compile
8
compile
@@ -1,8 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# export RUSTFLAGS="-C target-feature=+ssse3"
|
||||
# cargo build --release --features 'simd-accel'
|
||||
|
||||
export RUSTFLAGS="-C target-cpu=native"
|
||||
cargo build --release --features 'simd-accel avx-accel'
|
||||
# cargo build --release --features 'simd-accel avx-accel' --target x86_64-unknown-linux-musl
|
||||
263
complete/_rg
Normal file
263
complete/_rg
Normal file
@@ -0,0 +1,263 @@
|
||||
#compdef rg
|
||||
|
||||
##
|
||||
# zsh completion function for ripgrep
|
||||
#
|
||||
# Run ci/test_complete.sh after building to ensure that the options supported by
|
||||
# this function stay in synch with the `rg` binary.
|
||||
#
|
||||
# @see https://github.com/zsh-users/zsh/blob/master/Etc/completion-style-guide
|
||||
#
|
||||
# Based on code from the zsh-users project — see copyright notice below.
|
||||
|
||||
_rg() {
|
||||
local state_descr ret curcontext="${curcontext:-}"
|
||||
local -a context line state
|
||||
local -A opt_args val_args
|
||||
local -a rg_args
|
||||
|
||||
# Sort by long option name to match `rg --help`
|
||||
rg_args=(
|
||||
'(-A -C --after-context --context)'{-A+,--after-context=}'[specify lines to show after each match]:number of lines'
|
||||
'(-B -C --before-context --context)'{-B+,--before-context=}'[specify lines to show before each match]:number of lines'
|
||||
'(-i -s -S --ignore-case --case-sensitive --smart-case)'{-s,--case-sensitive}'[search case-sensitively]'
|
||||
'--color=[specify when to use colors in output]:when:( never auto always ansi )'
|
||||
'*--colors=[specify color settings and styles]: :->colorspec'
|
||||
'--column[show column numbers]'
|
||||
'(-A -B -C --after-context --before-context --context)'{-C+,--context=}'[specify lines to show before and after each match]:number of lines'
|
||||
'(-b --byte-offset)'{-b,--byte-offset}'[print the 0-based byte offset for each matching line]'
|
||||
'--context-separator=[specify string used to separate non-continuous context lines in output]:separator'
|
||||
'(-c --count --count-matches --passthrough --passthru)'{-c,--count}'[only show count of matching lines for each file]'
|
||||
'(--count-matches -c --count --passthrough --passthru)--count-matches[only show count of individual matches for each file]'
|
||||
'--debug[show debug messages]'
|
||||
'--dfa-size-limit=[specify upper size limit of generated DFA]:DFA size'
|
||||
'(-E --encoding)'{-E+,--encoding=}'[specify text encoding of files to search]: :_rg_encodings'
|
||||
'*'{-f+,--file=}'[specify file containing patterns to search for]:file:_files'
|
||||
"(1)--files[show each file that would be searched (but don't search)]"
|
||||
'(-l --files-with-matches --files-without-match)'{-l,--files-with-matches}'[only show names of files with matches]'
|
||||
'(-l --files-with-matches --files-without-match)--files-without-match[only show names of files without matches]'
|
||||
'(-F --fixed-strings)'{-F,--fixed-strings}'[treat pattern as literal string instead of regular expression]'
|
||||
'(-L --follow)'{-L,--follow}'[follow symlinks]'
|
||||
'*'{-g+,--glob=}'[include or exclude files for searching that match the specified glob]:glob'
|
||||
'(: -)'{-h,--help}'[display help information]'
|
||||
'(-p --no-heading --pretty --vimgrep)--heading[show matches grouped by file name]'
|
||||
'--hidden[search hidden files and directories]'
|
||||
'*--iglob=[include or exclude files for searching that match the specified case-insensitive glob]:glob'
|
||||
'(-i -s -S --case-sensitive --ignore-case --smart-case)'{-i,--ignore-case}'[search case-insensitively]'
|
||||
'--ignore-file=[specify additional ignore file]:file:_files'
|
||||
'(-v --invert-match)'{-v,--invert-match}'[invert matching]'
|
||||
'(-n -N --line-number --no-line-number)'{-n,--line-number}'[show line numbers]'
|
||||
'(-N --no-line-number)--line-number-width=[specify width of displayed line number]:number of columns'
|
||||
'(-w -x --line-regexp --word-regexp)'{-x,--line-regexp}'[only show matches surrounded by line boundaries]'
|
||||
'(-M --max-columns)'{-M+,--max-columns=}'[specify max length of lines to print]:number of bytes'
|
||||
'(-m --max-count)'{-m+,--max-count=}'[specify max number of matches per file]:number of matches'
|
||||
'--max-filesize=[specify size above which files should be ignored]:file size'
|
||||
'--maxdepth=[specify max number of directories to descend]:number of directories'
|
||||
'(--mmap --no-mmap)--mmap[search using memory maps when possible]'
|
||||
'(-H --with-filename --no-filename)--no-filename[suppress all file names]'
|
||||
"(-p --heading --pretty --vimgrep)--no-heading[don't group matches by file name]"
|
||||
"--no-config[don't load configuration files]"
|
||||
"(--no-ignore-parent)--no-ignore[don't respect ignore files]"
|
||||
"--no-ignore-parent[don't respect ignore files in parent directories]"
|
||||
"--no-ignore-vcs[don't respect version control ignore files]"
|
||||
'(-n -N --line-number --no-line-number)'{-N,--no-line-number}'[suppress line numbers]'
|
||||
'--no-messages[suppress all error messages]'
|
||||
"(--mmap --no-mmap)--no-mmap[don't search using memory maps]"
|
||||
'(-0 --null)'{-0,--null}'[print NUL byte after file names]'
|
||||
'(-o -r --only-matching --passthrough --passthru --replace)'{-o,--only-matching}'[show only matching part of each line]'
|
||||
'(-c -o -r --count --only-matching --passthrough --replace)--passthru[show both matching and non-matching lines]'
|
||||
'!(-c -o -r --count --only-matching --passthru --replace)--passthrough'
|
||||
'--path-separator=[specify path separator to use when printing file names]:separator'
|
||||
'(-p --heading --no-heading --pretty --vimgrep)'{-p,--pretty}'[alias for --color=always --heading -n]'
|
||||
'(-q --quiet)'{-q,--quiet}'[suppress normal output]'
|
||||
'--regex-size-limit=[specify upper size limit of compiled regex]:regex size'
|
||||
'(1 -f --file)*'{-e+,--regexp=}'[specify pattern]:pattern'
|
||||
'(-c -o -r --count --only-matching --passthrough --passthru --replace)'{-r+,--replace=}'[specify string used to replace matches]:replace string'
|
||||
'(-i -s -S --ignore-case --case-sensitive --smart-case)'{-S,--smart-case}'[search case-insensitively if the pattern is all lowercase]'
|
||||
'(-j --threads)--sort-files[sort results by file path (disables parallelism)]'
|
||||
'(-a --text)'{-a,--text}'[search binary files as if they were text]'
|
||||
'(-j --sort-files --threads)'{-j+,--threads=}'[specify approximate number of threads to use]:number of threads'
|
||||
'*'{-t+,--type=}'[only search files matching specified type]: :_rg_types'
|
||||
'*--type-add=[add new glob for file type]: :->typespec'
|
||||
'*--type-clear=[clear globs previously defined for specified file type]: :_rg_types'
|
||||
# This should actually be exclusive with everything but other type options
|
||||
'(:)--type-list[show all supported file types and their associated globs]'
|
||||
'*'{-T+,--type-not=}"[don't search files matching specified type]: :_rg_types"
|
||||
'*'{-u,--unrestricted}'[reduce level of "smart" searching]'
|
||||
'(: -)'{-V,--version}'[display version information]'
|
||||
'(-p --heading --no-heading --pretty)--vimgrep[show results in vim-compatible format]'
|
||||
'(-H --no-filename --with-filename)'{-H,--with-filename}'[display the file name for matches]'
|
||||
'(-w -x --line-regexp --word-regexp)'{-w,--word-regexp}'[only show matches surrounded by word boundaries]'
|
||||
'(-e -f --file --files --regexp --type-list)1: :_rg_pattern'
|
||||
'(--type-list)*:file:_files'
|
||||
'(-z --search-zip)'{-z,--search-zip}'[search in compressed files]'
|
||||
"(--stats)--stats[print stats about this search]"
|
||||
)
|
||||
|
||||
[[ ${_RG_COMPLETE_LIST_ARGS:-} == (1|t*|y*) ]] && {
|
||||
printf '%s\n' "${rg_args[@]}"
|
||||
return 0
|
||||
}
|
||||
|
||||
_arguments -s -S : "${rg_args[@]}" && return 0
|
||||
|
||||
while (( $#state )); do
|
||||
case "${state[1]}" in
|
||||
colorspec)
|
||||
# @todo I don't like this because it allows you to do weird things like
|
||||
# `line:line:bg:`. Also, i would like the `compadd -q` behaviour
|
||||
[[ -prefix *:none: ]] && return 1
|
||||
[[ -prefix *:*:*:* ]] && return 1
|
||||
|
||||
_values -S ':' 'color/style type' \
|
||||
'column[specify coloring for column numbers]: :->attribute' \
|
||||
'line[specify coloring for line numbers]: :->attribute' \
|
||||
'match[specify coloring for match text]: :->attribute' \
|
||||
'path[specify color for file names]: :->attribute' && return 0
|
||||
|
||||
[[ "${state}" == 'attribute' ]] &&
|
||||
_values -S ':' 'color/style attribute' \
|
||||
'none[clear color/style for type]' \
|
||||
'bg[specify background color]: :->color' \
|
||||
'fg[specify foreground color]: :->color' \
|
||||
'style[specify text style]: :->style' && return 0
|
||||
|
||||
[[ "${state}" == 'color' ]] &&
|
||||
_values -S ':' 'color value' \
|
||||
black blue green red cyan magenta yellow white && return 0
|
||||
|
||||
[[ "${state}" == 'style' ]] &&
|
||||
_values -S ':' 'style value' \
|
||||
bold nobold intense nointense underline nounderline && return 0
|
||||
;;
|
||||
|
||||
typespec)
|
||||
if compset -P '[^:]##:include:'; then
|
||||
_sequence -s ',' _rg_types && return 0
|
||||
# @todo This bit in particular could be better, but it's a little
|
||||
# complex, and attempting to solve it seems to run us up against a crash
|
||||
# bug — zsh # 40362
|
||||
elif compset -P '[^:]##:'; then
|
||||
_message 'glob or include directive' && return 1
|
||||
elif [[ ! -prefix *:* ]]; then
|
||||
_rg_types -qS ':' && return 0
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
shift state
|
||||
done
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# zsh 5.1 refuses to complete options if a 'match-less' operand like our pattern
|
||||
# could be 'completed' instead. We can use _guard() to avoid this problem, but
|
||||
# it introduces another one: zsh won't print the message if we try to complete
|
||||
# the pattern after having passed `--`. To work around *that* problem, we can
|
||||
# use this function to bypass the _guard() when `--` is on the command line.
|
||||
# This is inaccurate (it'd get confused by e.g. `rg -e --`), but zsh's handling
|
||||
# of `--` isn't accurate anyway
|
||||
_rg_pattern() {
|
||||
if (( ${words[(I)--]} )); then
|
||||
_message 'pattern'
|
||||
else
|
||||
_guard '^-*' 'pattern'
|
||||
fi
|
||||
}
|
||||
|
||||
# Complete encodings
|
||||
_rg_encodings() {
|
||||
local -a expl
|
||||
local -aU _encodings
|
||||
|
||||
# This is impossible to read, but these encodings rarely if ever change, so it
|
||||
# probably doesn't matter. They are derived from the list given here:
|
||||
# https://encoding.spec.whatwg.org/#concept-encoding-get
|
||||
_encodings=(
|
||||
{{,us-}ascii,arabic,chinese,cyrillic,greek{,8},hebrew,korean}
|
||||
logical visual mac {,cs}macintosh x-mac-{cyrillic,roman,ukrainian}
|
||||
866 ibm{819,866} csibm866
|
||||
big5{,-hkscs} {cn-,cs}big5 x-x-big5
|
||||
cp{819,866,125{0..8}} x-cp125{0..8}
|
||||
csiso2022{jp,kr} csiso8859{6,8}{e,i}
|
||||
csisolatin{{1..6},9} csisolatin{arabic,cyrillic,greek,hebrew}
|
||||
ecma-{114,118} asmo-708 elot_928 sun_eu_greek
|
||||
euc-{jp,kr} x-euc-jp cseuckr cseucpkdfmtjapanese
|
||||
{,x-}gbk csiso58gb231280 gb18030 {,cs}gb2312 gb_2312{,-80} hz-gb-2312
|
||||
iso-2022-{cn,cn-ext,jp,kr}
|
||||
iso8859{,-}{{1..11},13,14,15}
|
||||
iso-8859-{{1..11},{6,8}-{e,i},13,14,15,16} iso_8859-{{1..9},15}
|
||||
iso_8859-{1,2,6,7}:1987 iso_8859-{3,4,5,8}:1988 iso_8859-9:1989
|
||||
iso-ir-{58,100,101,109,110,126,127,138,144,148,149,157}
|
||||
koi{,8,8-r,8-ru,8-u,8_r} cskoi8r
|
||||
ks_c_5601-{1987,1989} ksc{,_}5691 csksc56011987
|
||||
latin{1..6} l{{1..6},9}
|
||||
shift{-,_}jis csshiftjis {,x-}sjis ms_kanji ms932
|
||||
utf{,-}8 utf-16{,be,le} unicode-1-1-utf-8
|
||||
windows-{31j,874,949,125{0..8}} dos-874 tis-620 ansi_x3.4-1968
|
||||
x-user-defined auto
|
||||
)
|
||||
|
||||
_wanted rg-encodings expl 'encoding' compadd -a "${@}" - _encodings
|
||||
}
|
||||
|
||||
# Complete file types
|
||||
_rg_types() {
|
||||
local -a expl
|
||||
local -aU _types
|
||||
|
||||
_types=( ${${(f)"$( _call_program rg-types rg --type-list )"}%%:*} )
|
||||
|
||||
_wanted rg-types expl 'file type' compadd -a "${@}" - _types
|
||||
}
|
||||
|
||||
_rg "${@}"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Copyright (c) 2011 Github zsh-users - http://github.com/zsh-users
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
# * Neither the name of the zsh-users nor the
|
||||
# names of its contributors may be used to endorse or promote products
|
||||
# derived from this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL ZSH-USERS BE LIABLE FOR ANY
|
||||
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
# ------------------------------------------------------------------------------
|
||||
# Description
|
||||
# -----------
|
||||
#
|
||||
# Completion script for ripgrep
|
||||
#
|
||||
# ------------------------------------------------------------------------------
|
||||
# Authors
|
||||
# -------
|
||||
#
|
||||
# * arcizan <ghostrevery@gmail.com>
|
||||
# * MaskRay <i@maskray.me>
|
||||
#
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Local Variables:
|
||||
# mode: shell-script
|
||||
# coding: utf-8-unix
|
||||
# indent-tabs-mode: nil
|
||||
# sh-indentation: 2
|
||||
# sh-basic-offset: 2
|
||||
# End:
|
||||
# vim: ft=zsh sw=2 ts=2 et
|
||||
@@ -1,4 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
pandoc -s -t man rg.1.md -o rg.1
|
||||
sed -i 's/\.TH.*/.TH "rg" "1"/g' rg.1
|
||||
507
doc/rg.1
507
doc/rg.1
@@ -1,507 +0,0 @@
|
||||
.\" Automatically generated by Pandoc 1.19.2.1
|
||||
.\"
|
||||
.TH "rg" "1"
|
||||
.hy
|
||||
.SH NAME
|
||||
.PP
|
||||
rg \- recursively search current directory for lines matching a pattern
|
||||
.SH SYNOPSIS
|
||||
.PP
|
||||
rg [\f[I]options\f[]] <\f[I]pattern\f[]> [\f[I]<\f[]path\f[I]> ...\f[]]
|
||||
.PP
|
||||
rg [\f[I]options\f[]] (\-e PATTERN | \-f FILE) ...
|
||||
[\f[I]<\f[]path\f[I]> ...\f[]]
|
||||
.PP
|
||||
rg [\f[I]options\f[]] \-\-files [\f[I]<\f[]path\f[I]> ...\f[]]
|
||||
.PP
|
||||
rg [\f[I]options\f[]] \-\-type\-list
|
||||
.PP
|
||||
rg [\f[I]options\f[]] \-\-help
|
||||
.PP
|
||||
rg [\f[I]options\f[]] \-\-version
|
||||
.SH DESCRIPTION
|
||||
.PP
|
||||
ripgrep (rg) combines the usability of The Silver Searcher (an ack
|
||||
clone) with the raw speed of grep.
|
||||
.PP
|
||||
ripgrep\[aq]s regex engine uses finite automata and guarantees linear
|
||||
time searching.
|
||||
Because of this, features like backreferences and arbitrary lookaround
|
||||
are not supported.
|
||||
.PP
|
||||
Project home page: https://github.com/BurntSushi/ripgrep
|
||||
.SH COMMON OPTIONS
|
||||
.TP
|
||||
.B \-a, \-\-text
|
||||
Search binary files as if they were text.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-c, \-\-count
|
||||
Only show count of line matches for each file.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-color \f[I]WHEN\f[]
|
||||
Whether to use coloring in match.
|
||||
Valid values are never, always or auto.
|
||||
[default: auto]
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-e, \-\-regexp \f[I]PATTERN\f[] ...
|
||||
Use PATTERN to search.
|
||||
This option can be provided multiple times, where all patterns given are
|
||||
searched.
|
||||
This is also useful when searching for patterns that start with a dash.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-F, \-\-fixed\-strings
|
||||
Treat the pattern as a literal string instead of a regular expression.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-g, \-\-glob \f[I]GLOB\f[] ...
|
||||
Include or exclude files for searching that match the given glob.
|
||||
This always overrides any other ignore logic if there is a conflict, but
|
||||
is otherwise applied in addition to ignore files (e.g., .gitignore or
|
||||
\&.ignore).
|
||||
Multiple glob flags may be used.
|
||||
Globbing rules match .gitignore globs.
|
||||
Precede a glob with a \[aq]!\[aq] to exclude it.
|
||||
.RS
|
||||
.RE
|
||||
.PP
|
||||
The \-\-glob flag subsumes the functionality of both the \-\-include and
|
||||
\-\-exclude flags commonly found in other tools.
|
||||
.IP
|
||||
.nf
|
||||
\f[C]
|
||||
Values\ given\ to\ \-g\ must\ be\ quoted\ or\ your\ shell\ will\ expand\ them\ and\ result
|
||||
in\ unexpected\ behavior.
|
||||
|
||||
Combine\ with\ the\ \-\-files\ flag\ to\ return\ matched\ filenames
|
||||
(i.e.,\ to\ replicate\ ack/ag\[aq]s\ \-g\ flag).
|
||||
|
||||
For\ example:\ rg\ \-g\ \[aq]\\<glob\\>\[aq]\ \-\-files
|
||||
\f[]
|
||||
.fi
|
||||
.TP
|
||||
.B \-h, \-\-help
|
||||
Show this usage message.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-i, \-\-ignore\-case
|
||||
Case insensitive search.
|
||||
Overridden by \-\-case\-sensitive.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-n, \-\-line\-number
|
||||
Show line numbers (1\-based).
|
||||
This is enabled by default at a tty.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-N, \-\-no\-line\-number
|
||||
Suppress line numbers.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-q, \-\-quiet
|
||||
Do not print anything to stdout.
|
||||
If a match is found in a file, stop searching that file.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-t, \-\-type \f[I]TYPE\f[] ...
|
||||
Only search files matching TYPE.
|
||||
Multiple type flags may be provided.
|
||||
Use the \-\-type\-list flag to list all available types.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-T, \-\-type\-not \f[I]TYPE\f[] ...
|
||||
Do not search files matching TYPE.
|
||||
Multiple not\-type flags may be provided.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-u, \-\-unrestricted ...
|
||||
Reduce the level of \[aq]smart\[aq] searching.
|
||||
A single \-u doesn\[aq]t respect .gitignore (etc.) files.
|
||||
Two \-u flags will search hidden files and directories.
|
||||
Three \-u flags will search binary files.
|
||||
\-uu is equivalent to grep \-r, and \-uuu is equivalent to grep \-a \-r.
|
||||
.RS
|
||||
.PP
|
||||
Note that the \-u flags are convenient aliases for other combinations of
|
||||
flags.
|
||||
\-u aliases \[aq]\-\-no\-ignore\[aq].
|
||||
\-uu aliases \[aq]\-\-no\-ignore \-\-hidden\[aq].
|
||||
\-uuu aliases \[aq]\-\-no\-ignore \-\-hidden \-\-text\[aq].
|
||||
.RE
|
||||
.TP
|
||||
.B \-v, \-\-invert\-match
|
||||
Invert matching.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-w, \-\-word\-regexp
|
||||
Only show matches surrounded by word boundaries.
|
||||
This is equivalent to putting \\b before and after the search pattern.
|
||||
.RS
|
||||
.RE
|
||||
.SH LESS COMMON OPTIONS
|
||||
.TP
|
||||
.B \-A, \-\-after\-context \f[I]NUM\f[]
|
||||
Show NUM lines after each match.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-B, \-\-before\-context \f[I]NUM\f[]
|
||||
Show NUM lines before each match.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-C, \-\-context \f[I]NUM\f[]
|
||||
Show NUM lines before and after each match.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-colors \f[I]SPEC\f[] ...
|
||||
This flag specifies color settings for use in the output.
|
||||
This flag may be provided multiple times.
|
||||
Settings are applied iteratively.
|
||||
Colors are limited to one of eight choices: red, blue, green, cyan,
|
||||
magenta, yellow, white and black.
|
||||
Styles are limited to nobold, bold, nointense or intense.
|
||||
.RS
|
||||
.PP
|
||||
The format of the flag is {type}:{attribute}:{value}.
|
||||
{type} should be one of path, line or match.
|
||||
{attribute} can be fg, bg or style.
|
||||
Value is either a color (for fg and bg) or a text style.
|
||||
A special format, {type}:none, will clear all color settings for {type}.
|
||||
.PP
|
||||
For example, the following command will change the match color to
|
||||
magenta and the background color for line numbers to yellow:
|
||||
.PP
|
||||
rg \-\-colors \[aq]match:fg:magenta\[aq] \-\-colors
|
||||
\[aq]line:bg:yellow\[aq] foo.
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-column
|
||||
Show column numbers (1 based) in output.
|
||||
This only shows the column numbers for the first match on each line.
|
||||
Note that this doesn\[aq]t try to account for Unicode.
|
||||
One byte is equal to one column.
|
||||
This implies \-\-line\-number.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-context\-separator \f[I]SEPARATOR\f[]
|
||||
The string to use when separating non\-continuous context lines.
|
||||
Escape sequences may be used.
|
||||
[default: \-\-]
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-debug
|
||||
Show debug messages.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-E, \-\-encoding \f[I]ENCODING\f[]
|
||||
Specify the text encoding that ripgrep will use on all files searched.
|
||||
The default value is \[aq]auto\[aq], which will cause ripgrep to do a
|
||||
best effort automatic detection of encoding on a per\-file basis.
|
||||
Other supported values can be found in the list of labels here:
|
||||
https://encoding.spec.whatwg.org/#concept\-encoding\-get
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-f, \-\-file FILE ...
|
||||
Search for patterns from the given file, with one pattern per line.
|
||||
When this flag is used or multiple times or in combination with the
|
||||
\-e/\-\-regexp flag, then all patterns provided are searched.
|
||||
Empty pattern lines will match all input lines, and the newline is not
|
||||
counted as part of the pattern.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-files
|
||||
Print each file that would be searched (but don\[aq]t search).
|
||||
.RS
|
||||
.PP
|
||||
Combine with the \-g flag to return matched paths, for example:
|
||||
.PP
|
||||
rg \-g \[aq]<glob>\[aq] \-\-files
|
||||
.RE
|
||||
.TP
|
||||
.B \-l, \-\-files\-with\-matches
|
||||
Only show path of each file with matches.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-files\-without\-match
|
||||
Only show path of each file with no matches.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-H, \-\-with\-filename
|
||||
Prefix each match with the file name that contains it.
|
||||
This is the default when more than one file is searched.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-no\-filename
|
||||
Never show the filename for a match.
|
||||
This is the default when one file is searched.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-heading
|
||||
Show the file name above clusters of matches from each file instead of
|
||||
showing the file name for every match.
|
||||
This is the default mode at a tty.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-no\-heading
|
||||
Don\[aq]t group matches by each file.
|
||||
If \-H/\-\-with\-filename is enabled, then file names will be shown for
|
||||
every line matched.
|
||||
This is the default more when not at a tty.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-hidden
|
||||
Search hidden directories and files.
|
||||
(Hidden directories and files are skipped by default.)
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-ignore\-file FILE ...
|
||||
Specify additional ignore files for filtering file paths.
|
||||
Ignore files should be in the gitignore format and are matched relative
|
||||
to the current working directory.
|
||||
These ignore files have lower precedence than all other ignore files.
|
||||
When specifying multiple ignore files, earlier files have lower
|
||||
precedence than later files.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-L, \-\-follow
|
||||
Follow symlinks.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-m, \-\-max\-count \f[I]NUM\f[]
|
||||
Limit the number of matching lines per file searched to NUM.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-max\-filesize \f[I]NUM\f[]+\f[I]SUFFIX\f[]?
|
||||
Ignore files larger than \f[I]NUM\f[] in size.
|
||||
Directories will never be ignored.
|
||||
.RS
|
||||
.PP
|
||||
\f[I]SUFFIX\f[] is optional and may be one of K, M or G.
|
||||
These correspond to kilobytes, megabytes and gigabytes respectively.
|
||||
If omitted the input is treated as bytes.
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-maxdepth \f[I]NUM\f[]
|
||||
Descend at most NUM directories below the command line arguments.
|
||||
A value of zero searches only the starting\-points themselves.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-mmap
|
||||
Search using memory maps when possible.
|
||||
This is enabled by default when ripgrep thinks it will be faster.
|
||||
(Note that mmap searching doesn\[aq]t currently support the various
|
||||
context related options.)
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-no\-messages
|
||||
Suppress all error messages.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-no\-mmap
|
||||
Never use memory maps, even when they might be faster.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-no\-ignore
|
||||
Don\[aq]t respect ignore files (.gitignore, .ignore, etc.) This implies
|
||||
\-\-no\-ignore\-parent.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-no\-ignore\-parent
|
||||
Don\[aq]t respect ignore files in parent directories.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-no\-ignore\-vcs
|
||||
Don\[aq]t respect version control ignore files (e.g., .gitignore).
|
||||
Note that .ignore files will continue to be respected.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-null
|
||||
Whenever a file name is printed, follow it with a NUL byte.
|
||||
This includes printing filenames before matches, and when printing a
|
||||
list of matching files such as with \-\-count, \-\-files\-with\-matches
|
||||
and \-\-files.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-path\-separator \f[I]SEPARATOR\f[]
|
||||
The path separator to use when printing file paths.
|
||||
This defaults to your platform\[aq]s path separator, which is / on Unix
|
||||
and \\ on Windows.
|
||||
This flag is intended for overriding the default when the environment
|
||||
demands it (e.g., cygwin).
|
||||
A path separator is limited to a single byte.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-p, \-\-pretty
|
||||
Alias for \-\-color=always \-\-heading \-n.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-r, \-\-replace \f[I]ARG\f[]
|
||||
Replace every match with the string given when printing search results.
|
||||
Neither this flag nor any other flag will modify your files.
|
||||
.RS
|
||||
.PP
|
||||
Capture group indices (e.g., $5) and names (e.g., $foo) are supported in
|
||||
the replacement string.
|
||||
.PP
|
||||
Note that the replacement by default replaces each match, and NOT the
|
||||
entire line.
|
||||
To replace the entire line, you should match the entire line.
|
||||
For example, to emit only the first phone numbers in each line:
|
||||
.IP
|
||||
.nf
|
||||
\f[C]
|
||||
rg\ \[aq]^.*([0\-9]{3}\-[0\-9]{3}\-[0\-9]{4}).*$\[aq]\ \-\-replace\ \[aq]$1\[aq]
|
||||
\f[]
|
||||
.fi
|
||||
.RE
|
||||
.TP
|
||||
.B \-s, \-\-case\-sensitive
|
||||
Search case sensitively.
|
||||
This overrides \-\-ignore\-case and \-\-smart\-case.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-S, \-\-smart\-case
|
||||
Search case insensitively if the pattern is all lowercase.
|
||||
Search case sensitively otherwise.
|
||||
This is overridden by either \-\-case\-sensitive or \-\-ignore\-case.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-sort\-files
|
||||
Sort results by file path.
|
||||
Note that this currently disables all parallelism and runs search in a
|
||||
single thread.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-j, \-\-threads \f[I]ARG\f[]
|
||||
The number of threads to use.
|
||||
0 means use the number of logical CPUs (capped at 12).
|
||||
[default: 0]
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-version
|
||||
Show the version number of ripgrep and exit.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-vimgrep
|
||||
Show results with every match on its own line, including line numbers
|
||||
and column numbers.
|
||||
(With this option, a line with more than one match of the regex will be
|
||||
printed more than once.)
|
||||
.RS
|
||||
.RE
|
||||
.SH FILE TYPE MANAGEMENT OPTIONS
|
||||
.TP
|
||||
.B \-\-type\-list
|
||||
Show all supported file types and their associated globs.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-type\-add \f[I]ARG\f[] ...
|
||||
Add a new glob for a particular file type.
|
||||
Only one glob can be added at a time.
|
||||
Multiple \-\-type\-add flags can be provided.
|
||||
Unless \-\-type\-clear is used, globs are added to any existing globs
|
||||
inside of ripgrep.
|
||||
Note that this must be passed to every invocation of rg.
|
||||
Type settings are NOT persisted.
|
||||
.RS
|
||||
.IP
|
||||
.nf
|
||||
\f[C]
|
||||
\ \ Example:\ `rg\ \-\-type\-add\ \[aq]foo:*.foo\[aq]\ \-tfoo\ PATTERN`
|
||||
\f[]
|
||||
.fi
|
||||
.PP
|
||||
\-\-type\-add can also be used to include rules from other types with
|
||||
the special include directive.
|
||||
The include directive permits specifying one or more other type names
|
||||
(separated by a comma) that have been defined and its rules will
|
||||
automatically be imported into the type specified.
|
||||
For example, to create a type called src that matches C++, Python and
|
||||
Markdown files, one can use:
|
||||
.IP
|
||||
.nf
|
||||
\f[C]
|
||||
\ \ `\-\-type\-add\ \[aq]src:include:cpp,py,md\[aq]`
|
||||
\f[]
|
||||
.fi
|
||||
.PP
|
||||
Additional glob rules can still be added to the src type by using the
|
||||
\-\-type\-add flag again:
|
||||
.IP
|
||||
.nf
|
||||
\f[C]
|
||||
\ \ `\-\-type\-add\ \[aq]src:include:cpp,py,md\[aq]\ \-\-type\-add\ \[aq]src:*.foo\[aq]`
|
||||
\f[]
|
||||
.fi
|
||||
.PP
|
||||
Note that type names must consist only of Unicode letters or numbers.
|
||||
Punctuation characters are not allowed.
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-type\-clear \f[I]TYPE\f[] ...
|
||||
Clear the file type globs previously defined for TYPE.
|
||||
This only clears the default type definitions that are found inside of
|
||||
ripgrep.
|
||||
Note that this must be passed to every invocation of rg.
|
||||
.RS
|
||||
.RE
|
||||
.SH SHELL COMPLETION
|
||||
.PP
|
||||
Shell completion files are included in the release tarball for Bash,
|
||||
Fish, Zsh and PowerShell.
|
||||
.PP
|
||||
For \f[B]bash\f[], move \f[C]rg.bash\-completion\f[] to
|
||||
\f[C]$XDG_CONFIG_HOME/bash_completion\f[] or
|
||||
\f[C]/etc/bash_completion.d/\f[].
|
||||
.PP
|
||||
For \f[B]fish\f[], move \f[C]rg.fish\f[] to
|
||||
\f[C]$HOME/.config/fish/completions\f[].
|
||||
343
doc/rg.1.md
343
doc/rg.1.md
@@ -1,343 +0,0 @@
|
||||
# NAME
|
||||
|
||||
rg - recursively search current directory for lines matching a pattern
|
||||
|
||||
# SYNOPSIS
|
||||
|
||||
rg [*options*] <*pattern*> [*<*path*> ...*]
|
||||
|
||||
rg [*options*] (-e PATTERN | -f FILE) ... [*<*path*> ...*]
|
||||
|
||||
rg [*options*] --files [*<*path*> ...*]
|
||||
|
||||
rg [*options*] --type-list
|
||||
|
||||
rg [*options*] --help
|
||||
|
||||
rg [*options*] --version
|
||||
|
||||
# DESCRIPTION
|
||||
|
||||
ripgrep (rg) combines the usability of The Silver Searcher (an ack clone) with
|
||||
the raw speed of grep.
|
||||
|
||||
ripgrep's regex engine uses finite automata and guarantees linear time
|
||||
searching. Because of this, features like backreferences and arbitrary
|
||||
lookaround are not supported.
|
||||
|
||||
Project home page: https://github.com/BurntSushi/ripgrep
|
||||
|
||||
# COMMON OPTIONS
|
||||
|
||||
-a, --text
|
||||
: Search binary files as if they were text.
|
||||
|
||||
-c, --count
|
||||
: Only show count of line matches for each file.
|
||||
|
||||
--color *WHEN*
|
||||
: Whether to use coloring in match. Valid values are never, always or auto.
|
||||
[default: auto]
|
||||
|
||||
-e, --regexp *PATTERN* ...
|
||||
: Use PATTERN to search. This option can be provided multiple times, where all
|
||||
patterns given are searched. This is also useful when searching for patterns
|
||||
that start with a dash.
|
||||
|
||||
-F, --fixed-strings
|
||||
: Treat the pattern as a literal string instead of a regular expression.
|
||||
|
||||
-g, --glob *GLOB* ...
|
||||
: Include or exclude files for searching that match the given glob. This always
|
||||
overrides any other ignore logic if there is a conflict, but is otherwise
|
||||
applied in addition to ignore files (e.g., .gitignore or .ignore). Multiple
|
||||
glob flags may be used. Globbing rules match .gitignore globs. Precede a
|
||||
glob with a '!' to exclude it.
|
||||
|
||||
The --glob flag subsumes the functionality of both the --include and
|
||||
--exclude flags commonly found in other tools.
|
||||
|
||||
Values given to -g must be quoted or your shell will expand them and result
|
||||
in unexpected behavior.
|
||||
|
||||
Combine with the --files flag to return matched filenames
|
||||
(i.e., to replicate ack/ag's -g flag).
|
||||
|
||||
For example: rg -g '\<glob\>' --files
|
||||
|
||||
-h, --help
|
||||
: Show this usage message.
|
||||
|
||||
-i, --ignore-case
|
||||
: Case insensitive search. Overridden by --case-sensitive.
|
||||
|
||||
-n, --line-number
|
||||
: Show line numbers (1-based). This is enabled by default at a tty.
|
||||
|
||||
-N, --no-line-number
|
||||
: Suppress line numbers.
|
||||
|
||||
-q, --quiet
|
||||
: Do not print anything to stdout. If a match is found in a file, stop
|
||||
searching that file.
|
||||
|
||||
-t, --type *TYPE* ...
|
||||
: Only search files matching TYPE. Multiple type flags may be provided. Use the
|
||||
--type-list flag to list all available types.
|
||||
|
||||
-T, --type-not *TYPE* ...
|
||||
: Do not search files matching TYPE. Multiple not-type flags may be provided.
|
||||
|
||||
-u, --unrestricted ...
|
||||
: Reduce the level of 'smart' searching. A single -u doesn't respect .gitignore
|
||||
(etc.) files. Two -u flags will search hidden files and directories. Three
|
||||
-u flags will search binary files. -uu is equivalent to grep -r, and -uuu is
|
||||
equivalent to grep -a -r.
|
||||
|
||||
Note that the -u flags are convenient aliases for other combinations of
|
||||
flags. -u aliases '--no-ignore'. -uu aliases '--no-ignore --hidden'.
|
||||
-uuu aliases '--no-ignore --hidden --text'.
|
||||
|
||||
-v, --invert-match
|
||||
: Invert matching.
|
||||
|
||||
-w, --word-regexp
|
||||
: Only show matches surrounded by word boundaries. This is equivalent to
|
||||
putting \\b before and after the search pattern.
|
||||
|
||||
# LESS COMMON OPTIONS
|
||||
|
||||
-A, --after-context *NUM*
|
||||
: Show NUM lines after each match.
|
||||
|
||||
-B, --before-context *NUM*
|
||||
: Show NUM lines before each match.
|
||||
|
||||
-C, --context *NUM*
|
||||
: Show NUM lines before and after each match.
|
||||
|
||||
--colors *SPEC* ...
|
||||
: This flag specifies color settings for use in the output. This flag may be
|
||||
provided multiple times. Settings are applied iteratively. Colors are limited
|
||||
to one of eight choices: red, blue, green, cyan, magenta, yellow, white and
|
||||
black. Styles are limited to nobold, bold, nointense or intense.
|
||||
|
||||
The format of the flag is {type}:{attribute}:{value}. {type} should be one
|
||||
of path, line or match. {attribute} can be fg, bg or style. Value is either
|
||||
a color (for fg and bg) or a text style. A special format, {type}:none,
|
||||
will clear all color settings for {type}.
|
||||
|
||||
For example, the following command will change the match color to magenta
|
||||
and the background color for line numbers to yellow:
|
||||
|
||||
rg --colors 'match:fg:magenta' --colors 'line:bg:yellow' foo.
|
||||
|
||||
--column
|
||||
: Show column numbers (1 based) in output. This only shows the column
|
||||
numbers for the first match on each line. Note that this doesn't try
|
||||
to account for Unicode. One byte is equal to one column. This implies
|
||||
--line-number.
|
||||
|
||||
--context-separator *SEPARATOR*
|
||||
: The string to use when separating non-continuous context lines. Escape
|
||||
sequences may be used. [default: --]
|
||||
|
||||
--debug
|
||||
: Show debug messages.
|
||||
|
||||
-E, --encoding *ENCODING*
|
||||
: Specify the text encoding that ripgrep will use on all files
|
||||
searched. The default value is 'auto', which will cause ripgrep to do
|
||||
a best effort automatic detection of encoding on a per-file basis.
|
||||
Other supported values can be found in the list of labels here:
|
||||
https://encoding.spec.whatwg.org/#concept-encoding-get
|
||||
|
||||
-f, --file FILE ...
|
||||
: Search for patterns from the given file, with one pattern per line. When this
|
||||
flag is used or multiple times or in combination with the -e/--regexp flag,
|
||||
then all patterns provided are searched. Empty pattern lines will match all
|
||||
input lines, and the newline is not counted as part of the pattern.
|
||||
|
||||
--files
|
||||
: Print each file that would be searched (but don't search).
|
||||
|
||||
Combine with the -g flag to return matched paths, for example:
|
||||
|
||||
rg -g '\<glob\>' --files
|
||||
|
||||
-l, --files-with-matches
|
||||
: Only show path of each file with matches.
|
||||
|
||||
--files-without-match
|
||||
: Only show path of each file with no matches.
|
||||
|
||||
-H, --with-filename
|
||||
: Prefix each match with the file name that contains it. This is the
|
||||
default when more than one file is searched.
|
||||
|
||||
--no-filename
|
||||
: Never show the filename for a match. This is the default when
|
||||
one file is searched.
|
||||
|
||||
--heading
|
||||
: Show the file name above clusters of matches from each file instead of
|
||||
showing the file name for every match. This is the default mode at a tty.
|
||||
|
||||
--no-heading
|
||||
: Don't group matches by each file. If -H/--with-filename is enabled, then
|
||||
file names will be shown for every line matched. This is the default more
|
||||
when not at a tty.
|
||||
|
||||
--hidden
|
||||
: Search hidden directories and files. (Hidden directories and files are
|
||||
skipped by default.)
|
||||
|
||||
--ignore-file FILE ...
|
||||
: Specify additional ignore files for filtering file paths.
|
||||
Ignore files should be in the gitignore format and are matched
|
||||
relative to the current working directory. These ignore files
|
||||
have lower precedence than all other ignore files. When
|
||||
specifying multiple ignore files, earlier files have lower
|
||||
precedence than later files.
|
||||
|
||||
-L, --follow
|
||||
: Follow symlinks.
|
||||
|
||||
-M, --max-columns *NUM*
|
||||
: Don't print lines longer than this limit in bytes. Longer lines are omitted,
|
||||
and only the number of matches in that line is printed.
|
||||
|
||||
-m, --max-count *NUM*
|
||||
: Limit the number of matching lines per file searched to NUM.
|
||||
|
||||
--max-filesize *NUM*+*SUFFIX*?
|
||||
: Ignore files larger than *NUM* in size. Directories will never be ignored.
|
||||
|
||||
*SUFFIX* is optional and may be one of K, M or G. These correspond to
|
||||
kilobytes, megabytes and gigabytes respectively. If omitted the input is
|
||||
treated as bytes.
|
||||
|
||||
--maxdepth *NUM*
|
||||
: Descend at most NUM directories below the command line arguments.
|
||||
A value of zero searches only the starting-points themselves.
|
||||
|
||||
--mmap
|
||||
: Search using memory maps when possible. This is enabled by default
|
||||
when ripgrep thinks it will be faster. (Note that mmap searching
|
||||
doesn't currently support the various context related options.)
|
||||
|
||||
--no-messages
|
||||
: Suppress all error messages.
|
||||
|
||||
--no-mmap
|
||||
: Never use memory maps, even when they might be faster.
|
||||
|
||||
--no-ignore
|
||||
: Don't respect ignore files (.gitignore, .ignore, etc.)
|
||||
This implies --no-ignore-parent.
|
||||
|
||||
--no-ignore-parent
|
||||
: Don't respect ignore files in parent directories.
|
||||
|
||||
--no-ignore-vcs
|
||||
: Don't respect version control ignore files (e.g., .gitignore).
|
||||
Note that .ignore files will continue to be respected.
|
||||
|
||||
--null
|
||||
: Whenever a file name is printed, follow it with a NUL byte.
|
||||
This includes printing filenames before matches, and when printing
|
||||
a list of matching files such as with --count, --files-with-matches
|
||||
and --files.
|
||||
|
||||
--path-separator *SEPARATOR*
|
||||
: The path separator to use when printing file paths. This defaults to your
|
||||
platform's path separator, which is / on Unix and \\ on Windows. This flag is
|
||||
intended for overriding the default when the environment demands it (e.g.,
|
||||
cygwin). A path separator is limited to a single byte.
|
||||
|
||||
-p, --pretty
|
||||
: Alias for --color=always --heading -n.
|
||||
|
||||
-r, --replace *ARG*
|
||||
: Replace every match with the string given when printing search results.
|
||||
Neither this flag nor any other flag will modify your files.
|
||||
|
||||
Capture group indices (e.g., $5) and names (e.g., $foo) are supported
|
||||
in the replacement string.
|
||||
|
||||
Note that the replacement by default replaces each match, and NOT the
|
||||
entire line. To replace the entire line, you should match the entire line.
|
||||
For example, to emit only the first phone numbers in each line:
|
||||
|
||||
rg '^.*([0-9]{3}-[0-9]{3}-[0-9]{4}).*$' --replace '$1'
|
||||
|
||||
-s, --case-sensitive
|
||||
: Search case sensitively. This overrides --ignore-case and --smart-case.
|
||||
|
||||
-S, --smart-case
|
||||
: Search case insensitively if the pattern is all lowercase.
|
||||
Search case sensitively otherwise. This is overridden by either
|
||||
--case-sensitive or --ignore-case.
|
||||
|
||||
--sort-files
|
||||
: Sort results by file path. Note that this currently
|
||||
disables all parallelism and runs search in a single thread.
|
||||
|
||||
-j, --threads *ARG*
|
||||
: The number of threads to use. 0 means use the number of logical CPUs
|
||||
(capped at 12). [default: 0]
|
||||
|
||||
--version
|
||||
: Show the version number of ripgrep and exit.
|
||||
|
||||
--vimgrep
|
||||
: Show results with every match on its own line, including line
|
||||
numbers and column numbers. (With this option, a line with more
|
||||
than one match of the regex will be printed more than once.)
|
||||
|
||||
# FILE TYPE MANAGEMENT OPTIONS
|
||||
|
||||
--type-list
|
||||
: Show all supported file types and their associated globs.
|
||||
|
||||
--type-add *ARG* ...
|
||||
: Add a new glob for a particular file type. Only one glob can be added
|
||||
at a time. Multiple --type-add flags can be provided. Unless --type-clear
|
||||
is used, globs are added to any existing globs inside of ripgrep. Note that
|
||||
this must be passed to every invocation of rg. Type settings are NOT
|
||||
persisted.
|
||||
|
||||
Example: `rg --type-add 'foo:*.foo' -tfoo PATTERN`
|
||||
|
||||
--type-add can also be used to include rules from other types
|
||||
with the special include directive. The include directive
|
||||
permits specifying one or more other type names (separated by a
|
||||
comma) that have been defined and its rules will automatically
|
||||
be imported into the type specified. For example, to create a
|
||||
type called src that matches C++, Python and Markdown files, one
|
||||
can use:
|
||||
|
||||
`--type-add 'src:include:cpp,py,md'`
|
||||
|
||||
Additional glob rules can still be added to the src type by
|
||||
using the --type-add flag again:
|
||||
|
||||
`--type-add 'src:include:cpp,py,md' --type-add 'src:*.foo'`
|
||||
|
||||
Note that type names must consist only of Unicode letters or
|
||||
numbers. Punctuation characters are not allowed.
|
||||
|
||||
--type-clear *TYPE* ...
|
||||
: Clear the file type globs previously defined for TYPE. This only clears
|
||||
the default type definitions that are found inside of ripgrep. Note
|
||||
that this must be passed to every invocation of rg.
|
||||
|
||||
# SHELL COMPLETION
|
||||
|
||||
Shell completion files are included in the release tarball for Bash, Fish, Zsh
|
||||
and PowerShell.
|
||||
|
||||
For **bash**, move `rg.bash-completion` to `$XDG_CONFIG_HOME/bash_completion`
|
||||
or `/etc/bash_completion.d/`.
|
||||
|
||||
For **fish**, move `rg.fish` to `$HOME/.config/fish/completions`.
|
||||
154
doc/rg.1.txt.tpl
Normal file
154
doc/rg.1.txt.tpl
Normal file
@@ -0,0 +1,154 @@
|
||||
rg(1)
|
||||
=====
|
||||
|
||||
Name
|
||||
----
|
||||
rg - recursively search current directory for lines matching a pattern
|
||||
|
||||
|
||||
Synopsis
|
||||
--------
|
||||
*rg* [_OPTIONS_] _PATTERN_ [_PATH_...]
|
||||
|
||||
*rg* [_OPTIONS_] *-e* _PATTERN_... [_PATH_...]
|
||||
|
||||
*rg* [_OPTIONS_] *-f* _PATTERNFILE_... [_PATH_...]
|
||||
|
||||
*rg* [_OPTIONS_] *--files* [_PATH_...]
|
||||
|
||||
*rg* [_OPTIONS_] *--type-list*
|
||||
|
||||
*rg* [_OPTIONS_] *--help*
|
||||
|
||||
*rg* [_OPTIONS_] *--version*
|
||||
|
||||
|
||||
DESCRIPTION
|
||||
-----------
|
||||
ripgrep (rg) recursively searches your current directory for a regex pattern.
|
||||
By default, ripgrep will respect your `.gitignore` and automatically skip
|
||||
hidden files/directories and binary files.
|
||||
|
||||
ripgrep's regex engine uses finite automata and guarantees linear time
|
||||
searching. Because of this, features like backreferences and arbitrary
|
||||
lookaround are not supported.
|
||||
|
||||
|
||||
REGEX SYNTAX
|
||||
------------
|
||||
ripgrep uses Rust's regex engine, which documents its syntax:
|
||||
https://docs.rs/regex/0.2.5/regex/#syntax
|
||||
|
||||
ripgrep uses byte-oriented regexes, which has some additional documentation:
|
||||
https://docs.rs/regex/0.2.5/regex/bytes/index.html#syntax
|
||||
|
||||
To a first approximation, ripgrep uses Perl-like regexes without look-around or
|
||||
backreferences. This makes them very similar to the "extended" (ERE) regular
|
||||
expressions supported by `egrep`, but with a few additional features like
|
||||
Unicode character classes.
|
||||
|
||||
|
||||
POSITIONAL ARGUMENTS
|
||||
--------------------
|
||||
_PATTERN_::
|
||||
A regular expression used for searching. To match a pattern beginning with a
|
||||
dash, use the -e/--regexp option.
|
||||
|
||||
_PATH_::
|
||||
A file or directory to search. Directories are searched recursively. Paths
|
||||
specified expicitly on the command line override glob and ignore rules.
|
||||
|
||||
|
||||
OPTIONS
|
||||
-------
|
||||
{OPTIONS}
|
||||
|
||||
|
||||
EXIT STATUS
|
||||
-----------
|
||||
If ripgrep finds a match, then the exit status of the program is 0. If no match
|
||||
could be found, then the exit status is non-zero.
|
||||
|
||||
|
||||
CONFIGURATION FILES
|
||||
-------------------
|
||||
ripgrep supports reading configuration files that change ripgrep's default
|
||||
behavior. The format of the configuration file is an "rc" style and is very
|
||||
simple. It is defined by two rules:
|
||||
|
||||
1. Every line is a shell argument, after trimming ASCII whitespace.
|
||||
2. Lines starting with _#_ (optionally preceded by any amount of
|
||||
ASCII whitespace) are ignored.
|
||||
|
||||
ripgrep will look for a single configuration file if and only if the
|
||||
_RIPGREP_CONFIG_PATH_ environment variable is set and is non-empty.
|
||||
ripgrep will parse shell arguments from this file on startup and will
|
||||
behave as if the arguments in this file were prepended to any explicit
|
||||
arguments given to ripgrep on the command line.
|
||||
|
||||
For example, if your ripgreprc file contained a single line:
|
||||
|
||||
--smart-case
|
||||
|
||||
then the following command
|
||||
|
||||
RIPGREP_CONFIG_PATH=wherever/.ripgreprc rg foo
|
||||
|
||||
would behave identically to the following command
|
||||
|
||||
rg --smart-case foo
|
||||
|
||||
ripgrep also provides a flag, *--no-config*, that when present will suppress
|
||||
any and all support for configuration. This includes any future support
|
||||
for auto-loading configuration files from pre-determined paths.
|
||||
|
||||
Conflicts between configuration files and explicit arguments are handled
|
||||
exactly like conflicts in the same command line invocation. That is,
|
||||
this command:
|
||||
|
||||
RIPGREP_CONFIG_PATH=wherever/.ripgreprc rg foo --case-sensitive
|
||||
|
||||
is exactly equivalent to
|
||||
|
||||
rg --smart-case foo --case-sensitive
|
||||
|
||||
in which case, the *--case-sensitive* flag would override the *--smart-case*
|
||||
flag.
|
||||
|
||||
|
||||
SHELL COMPLETION
|
||||
----------------
|
||||
Shell completion files are included in the release tarball for Bash, Fish, Zsh
|
||||
and PowerShell.
|
||||
|
||||
For *bash*, move `rg.bash` to `$XDG_CONFIG_HOME/bash_completion`
|
||||
or `/etc/bash_completion.d/`.
|
||||
|
||||
For *fish*, move `rg.fish` to `$HOME/.config/fish/completions`.
|
||||
|
||||
For *zsh*, move `_rg` to one of your `$fpath` directories.
|
||||
|
||||
|
||||
CAVEATS
|
||||
-------
|
||||
ripgrep may abort unexpectedly when using default settings if it searches a
|
||||
file that is simultaneously truncated. This behavior can be avoided by passing
|
||||
the --no-mmap flag which will forcefully disable the use of memory maps in all
|
||||
cases.
|
||||
|
||||
|
||||
VERSION
|
||||
-------
|
||||
{VERSION}
|
||||
|
||||
|
||||
HOMEPAGE
|
||||
--------
|
||||
https://github.com/BurntSushi/ripgrep
|
||||
|
||||
Please report bugs and feature requests in the issue tracker.
|
||||
|
||||
|
||||
AUTHORS
|
||||
-------
|
||||
Andrew Gallant <jamslam@gmail.com>
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "globset"
|
||||
version = "0.1.4" #:version
|
||||
version = "0.4.0" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
Cross platform single glob and glob set matching. Glob set matching is the
|
||||
@@ -21,9 +21,9 @@ bench = false
|
||||
[dependencies]
|
||||
aho-corasick = "0.6.0"
|
||||
fnv = "1.0"
|
||||
log = "0.3"
|
||||
memchr = "1"
|
||||
regex = "0.2.1"
|
||||
log = "0.4"
|
||||
memchr = "2"
|
||||
regex = "0.2.9"
|
||||
|
||||
[dev-dependencies]
|
||||
glob = "0.2"
|
||||
|
||||
@@ -20,7 +20,7 @@ Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
globset = "0.1"
|
||||
globset = "0.3"
|
||||
```
|
||||
|
||||
and this to your crate root:
|
||||
@@ -36,7 +36,7 @@ This example shows how to match a single glob against a single file path.
|
||||
```rust
|
||||
use globset::Glob;
|
||||
|
||||
let glob = try!(Glob::new("*.rs")).compile_matcher();
|
||||
let glob = Glob::new("*.rs")?.compile_matcher();
|
||||
|
||||
assert!(glob.is_match("foo.rs"));
|
||||
assert!(glob.is_match("foo/bar.rs"));
|
||||
@@ -51,8 +51,8 @@ semantics. In this example, we prevent wildcards from matching path separators.
|
||||
```rust
|
||||
use globset::GlobBuilder;
|
||||
|
||||
let glob = try!(GlobBuilder::new("*.rs")
|
||||
.literal_separator(true).build()).compile_matcher();
|
||||
let glob = GlobBuilder::new("*.rs")
|
||||
.literal_separator(true).build()?.compile_matcher();
|
||||
|
||||
assert!(glob.is_match("foo.rs"));
|
||||
assert!(!glob.is_match("foo/bar.rs")); // no longer matches
|
||||
@@ -69,10 +69,10 @@ use globset::{Glob, GlobSetBuilder};
|
||||
let mut builder = GlobSetBuilder::new();
|
||||
// A GlobBuilder can be used to configure each glob's match semantics
|
||||
// independently.
|
||||
builder.add(try!(Glob::new("*.rs")));
|
||||
builder.add(try!(Glob::new("src/lib.rs")));
|
||||
builder.add(try!(Glob::new("src/**/foo.rs")));
|
||||
let set = try!(builder.build());
|
||||
builder.add(Glob::new("*.rs")?);
|
||||
builder.add(Glob::new("src/lib.rs")?);
|
||||
builder.add(Glob::new("src/**/foo.rs")?);
|
||||
let set = builder.build()?;
|
||||
|
||||
assert_eq!(set.matches("src/bar/baz/foo.rs"), vec![0, 2]);
|
||||
```
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use std::ffi::{OsStr, OsString};
|
||||
use std::fmt;
|
||||
use std::hash;
|
||||
use std::iter;
|
||||
@@ -9,7 +8,7 @@ use std::str;
|
||||
use regex;
|
||||
use regex::bytes::Regex;
|
||||
|
||||
use {Candidate, Error, new_regex};
|
||||
use {Candidate, Error, ErrorKind, new_regex};
|
||||
|
||||
/// Describes a matching strategy for a particular pattern.
|
||||
///
|
||||
@@ -28,7 +27,7 @@ pub enum MatchStrategy {
|
||||
BasenameLiteral(String),
|
||||
/// A pattern matches if and only if the file path's extension matches this
|
||||
/// literal string.
|
||||
Extension(OsString),
|
||||
Extension(String),
|
||||
/// A pattern matches if and only if this prefix literal is a prefix of the
|
||||
/// candidate file path.
|
||||
Prefix(String),
|
||||
@@ -47,7 +46,7 @@ pub enum MatchStrategy {
|
||||
/// extension. Note that this is a necessary but NOT sufficient criterion.
|
||||
/// Namely, if the extension matches, then a full regex search is still
|
||||
/// required.
|
||||
RequiredExtension(OsString),
|
||||
RequiredExtension(String),
|
||||
/// A regex needs to be used for matching.
|
||||
Regex,
|
||||
}
|
||||
@@ -154,7 +153,7 @@ impl GlobStrategic {
|
||||
lit.as_bytes() == &*candidate.basename
|
||||
}
|
||||
MatchStrategy::Extension(ref ext) => {
|
||||
candidate.ext == ext
|
||||
ext.as_bytes() == &*candidate.ext
|
||||
}
|
||||
MatchStrategy::Prefix(ref pre) => {
|
||||
starts_with(pre.as_bytes(), byte_path)
|
||||
@@ -166,7 +165,8 @@ impl GlobStrategic {
|
||||
ends_with(suffix.as_bytes(), byte_path)
|
||||
}
|
||||
MatchStrategy::RequiredExtension(ref ext) => {
|
||||
candidate.ext == ext && self.re.is_match(byte_path)
|
||||
let ext = ext.as_bytes();
|
||||
&*candidate.ext == ext && self.re.is_match(byte_path)
|
||||
}
|
||||
MatchStrategy::Regex => self.re.is_match(byte_path),
|
||||
}
|
||||
@@ -187,13 +187,26 @@ pub struct GlobBuilder<'a> {
|
||||
opts: GlobOptions,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)]
|
||||
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
|
||||
struct GlobOptions {
|
||||
/// Whether to match case insensitively.
|
||||
case_insensitive: bool,
|
||||
/// Whether to require a literal separator to match a separator in a file
|
||||
/// path. e.g., when enabled, `*` won't match `/`.
|
||||
literal_separator: bool,
|
||||
/// Whether or not to use `\` to escape special characters.
|
||||
/// e.g., when enabled, `\*` will match a literal `*`.
|
||||
backslash_escape: bool,
|
||||
}
|
||||
|
||||
impl GlobOptions {
|
||||
fn default() -> GlobOptions {
|
||||
GlobOptions {
|
||||
case_insensitive: false,
|
||||
literal_separator: false,
|
||||
backslash_escape: !is_separator('\\'),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Eq, PartialEq)]
|
||||
@@ -295,7 +308,7 @@ impl Glob {
|
||||
/// std::path::Path::extension returns. Namely, this extension includes
|
||||
/// the '.'. Also, paths like `.rs` are considered to have an extension
|
||||
/// of `.rs`.
|
||||
fn ext(&self) -> Option<OsString> {
|
||||
fn ext(&self) -> Option<String> {
|
||||
if self.opts.case_insensitive {
|
||||
return None;
|
||||
}
|
||||
@@ -319,11 +332,11 @@ impl Glob {
|
||||
Some(&Token::Literal('.')) => {}
|
||||
_ => return None,
|
||||
}
|
||||
let mut lit = OsStr::new(".").to_os_string();
|
||||
let mut lit = ".".to_string();
|
||||
for t in self.tokens[start + 2..].iter() {
|
||||
match *t {
|
||||
Token::Literal('.') | Token::Literal('/') => return None,
|
||||
Token::Literal(c) => lit.push(c.to_string()),
|
||||
Token::Literal(c) => lit.push(c),
|
||||
_ => return None,
|
||||
}
|
||||
}
|
||||
@@ -337,7 +350,7 @@ impl Glob {
|
||||
/// This is like `ext`, but returns an extension even if it isn't sufficent
|
||||
/// to imply a match. Namely, if an extension is returned, then it is
|
||||
/// necessary but not sufficient for a match.
|
||||
fn required_ext(&self) -> Option<OsString> {
|
||||
fn required_ext(&self) -> Option<String> {
|
||||
if self.opts.case_insensitive {
|
||||
return None;
|
||||
}
|
||||
@@ -360,7 +373,7 @@ impl Glob {
|
||||
None
|
||||
} else {
|
||||
ext.reverse();
|
||||
Some(OsString::from(ext.into_iter().collect::<String>()))
|
||||
Some(ext.into_iter().collect())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -509,7 +522,7 @@ impl Glob {
|
||||
Some(&self.tokens[start..])
|
||||
}
|
||||
|
||||
/// Returns the pattern as a literal if and only if the pattern exclusiely
|
||||
/// Returns the pattern as a literal if and only if the pattern exclusively
|
||||
/// matches the basename of a file path *and* is a literal.
|
||||
///
|
||||
/// The basic format of these patterns is `**/{literal}`, where `{literal}`
|
||||
@@ -544,16 +557,24 @@ impl<'a> GlobBuilder<'a> {
|
||||
/// Parses and builds the pattern.
|
||||
pub fn build(&self) -> Result<Glob, Error> {
|
||||
let mut p = Parser {
|
||||
glob: &self.glob,
|
||||
stack: vec![Tokens::default()],
|
||||
chars: self.glob.chars().peekable(),
|
||||
prev: None,
|
||||
cur: None,
|
||||
opts: &self.opts,
|
||||
};
|
||||
try!(p.parse());
|
||||
p.parse()?;
|
||||
if p.stack.is_empty() {
|
||||
Err(Error::UnopenedAlternates)
|
||||
Err(Error {
|
||||
glob: Some(self.glob.to_string()),
|
||||
kind: ErrorKind::UnopenedAlternates,
|
||||
})
|
||||
} else if p.stack.len() > 1 {
|
||||
Err(Error::UnclosedAlternates)
|
||||
Err(Error {
|
||||
glob: Some(self.glob.to_string()),
|
||||
kind: ErrorKind::UnclosedAlternates,
|
||||
})
|
||||
} else {
|
||||
let tokens = p.stack.pop().unwrap();
|
||||
Ok(Glob {
|
||||
@@ -578,6 +599,19 @@ impl<'a> GlobBuilder<'a> {
|
||||
self.opts.literal_separator = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// When enabled, a back slash (`\`) may be used to escape
|
||||
/// special characters in a glob pattern. Additionally, this will
|
||||
/// prevent `\` from being interpreted as a path separator on all
|
||||
/// platforms.
|
||||
///
|
||||
/// This is enabled by default on platforms where `\` is not a
|
||||
/// path separator and disabled by default on platforms where `\`
|
||||
/// is a path separator.
|
||||
pub fn backslash_escape(&mut self, yes: bool) -> &mut GlobBuilder<'a> {
|
||||
self.opts.backslash_escape = yes;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Tokens {
|
||||
@@ -698,30 +732,30 @@ fn bytes_to_escaped_literal(bs: &[u8]) -> String {
|
||||
}
|
||||
|
||||
struct Parser<'a> {
|
||||
glob: &'a str,
|
||||
stack: Vec<Tokens>,
|
||||
chars: iter::Peekable<str::Chars<'a>>,
|
||||
prev: Option<char>,
|
||||
cur: Option<char>,
|
||||
opts: &'a GlobOptions,
|
||||
}
|
||||
|
||||
impl<'a> Parser<'a> {
|
||||
fn error(&self, kind: ErrorKind) -> Error {
|
||||
Error { glob: Some(self.glob.to_string()), kind: kind }
|
||||
}
|
||||
|
||||
fn parse(&mut self) -> Result<(), Error> {
|
||||
while let Some(c) = self.bump() {
|
||||
match c {
|
||||
'?' => try!(self.push_token(Token::Any)),
|
||||
'*' => try!(self.parse_star()),
|
||||
'[' => try!(self.parse_class()),
|
||||
'{' => try!(self.push_alternate()),
|
||||
'}' => try!(self.pop_alternate()),
|
||||
',' => try!(self.parse_comma()),
|
||||
c => {
|
||||
if is_separator(c) {
|
||||
// Normalize all patterns to use / as a separator.
|
||||
try!(self.push_token(Token::Literal('/')))
|
||||
} else {
|
||||
try!(self.push_token(Token::Literal(c)))
|
||||
}
|
||||
}
|
||||
'?' => self.push_token(Token::Any)?,
|
||||
'*' => self.parse_star()?,
|
||||
'[' => self.parse_class()?,
|
||||
'{' => self.push_alternate()?,
|
||||
'}' => self.pop_alternate()?,
|
||||
',' => self.parse_comma()?,
|
||||
'\\' => self.parse_backslash()?,
|
||||
c => self.push_token(Token::Literal(c))?,
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@@ -729,7 +763,7 @@ impl<'a> Parser<'a> {
|
||||
|
||||
fn push_alternate(&mut self) -> Result<(), Error> {
|
||||
if self.stack.len() > 1 {
|
||||
return Err(Error::NestedAlternates);
|
||||
return Err(self.error(ErrorKind::NestedAlternates));
|
||||
}
|
||||
Ok(self.stack.push(Tokens::default()))
|
||||
}
|
||||
@@ -743,22 +777,22 @@ impl<'a> Parser<'a> {
|
||||
}
|
||||
|
||||
fn push_token(&mut self, tok: Token) -> Result<(), Error> {
|
||||
match self.stack.last_mut() {
|
||||
None => Err(Error::UnopenedAlternates),
|
||||
Some(ref mut pat) => Ok(pat.push(tok)),
|
||||
if let Some(ref mut pat) = self.stack.last_mut() {
|
||||
return Ok(pat.push(tok));
|
||||
}
|
||||
Err(self.error(ErrorKind::UnopenedAlternates))
|
||||
}
|
||||
|
||||
fn pop_token(&mut self) -> Result<Token, Error> {
|
||||
match self.stack.last_mut() {
|
||||
None => Err(Error::UnopenedAlternates),
|
||||
Some(ref mut pat) => Ok(pat.pop().unwrap()),
|
||||
if let Some(ref mut pat) = self.stack.last_mut() {
|
||||
return Ok(pat.pop().unwrap());
|
||||
}
|
||||
Err(self.error(ErrorKind::UnopenedAlternates))
|
||||
}
|
||||
|
||||
fn have_tokens(&self) -> Result<bool, Error> {
|
||||
match self.stack.last() {
|
||||
None => Err(Error::UnopenedAlternates),
|
||||
None => Err(self.error(ErrorKind::UnopenedAlternates)),
|
||||
Some(ref pat) => Ok(!pat.is_empty()),
|
||||
}
|
||||
}
|
||||
@@ -774,26 +808,40 @@ impl<'a> Parser<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_backslash(&mut self) -> Result<(), Error> {
|
||||
if self.opts.backslash_escape {
|
||||
match self.bump() {
|
||||
None => Err(self.error(ErrorKind::DanglingEscape)),
|
||||
Some(c) => self.push_token(Token::Literal(c)),
|
||||
}
|
||||
} else if is_separator('\\') {
|
||||
// Normalize all patterns to use / as a separator.
|
||||
self.push_token(Token::Literal('/'))
|
||||
} else {
|
||||
self.push_token(Token::Literal('\\'))
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_star(&mut self) -> Result<(), Error> {
|
||||
let prev = self.prev;
|
||||
if self.chars.peek() != Some(&'*') {
|
||||
try!(self.push_token(Token::ZeroOrMore));
|
||||
self.push_token(Token::ZeroOrMore)?;
|
||||
return Ok(());
|
||||
}
|
||||
assert!(self.bump() == Some('*'));
|
||||
if !try!(self.have_tokens()) {
|
||||
try!(self.push_token(Token::RecursivePrefix));
|
||||
if !self.have_tokens()? {
|
||||
self.push_token(Token::RecursivePrefix)?;
|
||||
let next = self.bump();
|
||||
if !next.map(is_separator).unwrap_or(true) {
|
||||
return Err(Error::InvalidRecursive);
|
||||
return Err(self.error(ErrorKind::InvalidRecursive));
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
try!(self.pop_token());
|
||||
self.pop_token()?;
|
||||
if !prev.map(is_separator).unwrap_or(false) {
|
||||
if self.stack.len() <= 1
|
||||
|| (prev != Some(',') && prev != Some('{')) {
|
||||
return Err(Error::InvalidRecursive);
|
||||
return Err(self.error(ErrorKind::InvalidRecursive));
|
||||
}
|
||||
}
|
||||
match self.chars.peek() {
|
||||
@@ -808,28 +856,35 @@ impl<'a> Parser<'a> {
|
||||
assert!(self.bump().map(is_separator).unwrap_or(false));
|
||||
self.push_token(Token::RecursiveZeroOrMore)
|
||||
}
|
||||
_ => Err(Error::InvalidRecursive),
|
||||
_ => Err(self.error(ErrorKind::InvalidRecursive)),
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_class(&mut self) -> Result<(), Error> {
|
||||
fn add_to_last_range(
|
||||
glob: &str,
|
||||
r: &mut (char, char),
|
||||
add: char,
|
||||
) -> Result<(), Error> {
|
||||
r.1 = add;
|
||||
if r.1 < r.0 {
|
||||
Err(Error::InvalidRange(r.0, r.1))
|
||||
Err(Error {
|
||||
glob: Some(glob.to_string()),
|
||||
kind: ErrorKind::InvalidRange(r.0, r.1),
|
||||
})
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
let mut negated = false;
|
||||
let mut ranges = vec![];
|
||||
if self.chars.peek() == Some(&'!') {
|
||||
assert!(self.bump() == Some('!'));
|
||||
negated = true;
|
||||
let negated = match self.chars.peek() {
|
||||
Some(&'!') | Some(&'^') => {
|
||||
let bump = self.bump();
|
||||
assert!(bump == Some('!') || bump == Some('^'));
|
||||
true
|
||||
}
|
||||
_ => false,
|
||||
};
|
||||
let mut first = true;
|
||||
let mut in_range = false;
|
||||
loop {
|
||||
@@ -837,7 +892,7 @@ impl<'a> Parser<'a> {
|
||||
Some(c) => c,
|
||||
// The only way to successfully break this loop is to observe
|
||||
// a ']'.
|
||||
None => return Err(Error::UnclosedClass),
|
||||
None => return Err(self.error(ErrorKind::UnclosedClass)),
|
||||
};
|
||||
match c {
|
||||
']' => {
|
||||
@@ -854,7 +909,7 @@ impl<'a> Parser<'a> {
|
||||
// invariant: in_range is only set when there is
|
||||
// already at least one character seen.
|
||||
let r = ranges.last_mut().unwrap();
|
||||
try!(add_to_last_range(r, '-'));
|
||||
add_to_last_range(&self.glob, r, '-')?;
|
||||
in_range = false;
|
||||
} else {
|
||||
assert!(!ranges.is_empty());
|
||||
@@ -865,7 +920,8 @@ impl<'a> Parser<'a> {
|
||||
if in_range {
|
||||
// invariant: in_range is only set when there is
|
||||
// already at least one character seen.
|
||||
try!(add_to_last_range(ranges.last_mut().unwrap(), c));
|
||||
add_to_last_range(
|
||||
&self.glob, ranges.last_mut().unwrap(), c)?;
|
||||
} else {
|
||||
ranges.push((c, c));
|
||||
}
|
||||
@@ -907,16 +963,15 @@ fn ends_with(needle: &[u8], haystack: &[u8]) -> bool {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::ffi::{OsStr, OsString};
|
||||
|
||||
use {GlobSetBuilder, Error};
|
||||
use {GlobSetBuilder, ErrorKind};
|
||||
use super::{Glob, GlobBuilder, Token};
|
||||
use super::Token::*;
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
struct Options {
|
||||
casei: bool,
|
||||
litsep: bool,
|
||||
casei: Option<bool>,
|
||||
litsep: Option<bool>,
|
||||
bsesc: Option<bool>,
|
||||
}
|
||||
|
||||
macro_rules! syntax {
|
||||
@@ -934,7 +989,7 @@ mod tests {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let err = Glob::new($pat).unwrap_err();
|
||||
assert_eq!($err, err);
|
||||
assert_eq!(&$err, err.kind());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -946,11 +1001,17 @@ mod tests {
|
||||
($name:ident, $pat:expr, $re:expr, $options:expr) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let pat = GlobBuilder::new($pat)
|
||||
.case_insensitive($options.casei)
|
||||
.literal_separator($options.litsep)
|
||||
.build()
|
||||
.unwrap();
|
||||
let mut builder = GlobBuilder::new($pat);
|
||||
if let Some(casei) = $options.casei {
|
||||
builder.case_insensitive(casei);
|
||||
}
|
||||
if let Some(litsep) = $options.litsep {
|
||||
builder.literal_separator(litsep);
|
||||
}
|
||||
if let Some(bsesc) = $options.bsesc {
|
||||
builder.backslash_escape(bsesc);
|
||||
}
|
||||
let pat = builder.build().unwrap();
|
||||
assert_eq!(format!("(?-u){}", $re), pat.regex());
|
||||
}
|
||||
};
|
||||
@@ -963,11 +1024,17 @@ mod tests {
|
||||
($name:ident, $pat:expr, $path:expr, $options:expr) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let pat = GlobBuilder::new($pat)
|
||||
.case_insensitive($options.casei)
|
||||
.literal_separator($options.litsep)
|
||||
.build()
|
||||
.unwrap();
|
||||
let mut builder = GlobBuilder::new($pat);
|
||||
if let Some(casei) = $options.casei {
|
||||
builder.case_insensitive(casei);
|
||||
}
|
||||
if let Some(litsep) = $options.litsep {
|
||||
builder.literal_separator(litsep);
|
||||
}
|
||||
if let Some(bsesc) = $options.bsesc {
|
||||
builder.backslash_escape(bsesc);
|
||||
}
|
||||
let pat = builder.build().unwrap();
|
||||
let matcher = pat.compile_matcher();
|
||||
let strategic = pat.compile_strategic_matcher();
|
||||
let set = GlobSetBuilder::new().add(pat).build().unwrap();
|
||||
@@ -985,11 +1052,17 @@ mod tests {
|
||||
($name:ident, $pat:expr, $path:expr, $options:expr) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let pat = GlobBuilder::new($pat)
|
||||
.case_insensitive($options.casei)
|
||||
.literal_separator($options.litsep)
|
||||
.build()
|
||||
.unwrap();
|
||||
let mut builder = GlobBuilder::new($pat);
|
||||
if let Some(casei) = $options.casei {
|
||||
builder.case_insensitive(casei);
|
||||
}
|
||||
if let Some(litsep) = $options.litsep {
|
||||
builder.literal_separator(litsep);
|
||||
}
|
||||
if let Some(bsesc) = $options.bsesc {
|
||||
builder.backslash_escape(bsesc);
|
||||
}
|
||||
let pat = builder.build().unwrap();
|
||||
let matcher = pat.compile_matcher();
|
||||
let strategic = pat.compile_strategic_matcher();
|
||||
let set = GlobSetBuilder::new().add(pat).build().unwrap();
|
||||
@@ -1001,7 +1074,6 @@ mod tests {
|
||||
}
|
||||
|
||||
fn s(string: &str) -> String { string.to_string() }
|
||||
fn os(string: &str) -> OsString { OsStr::new(string).to_os_string() }
|
||||
|
||||
fn class(s: char, e: char) -> Token {
|
||||
Class { negated: false, ranges: vec![(s, e)] }
|
||||
@@ -1056,28 +1128,42 @@ mod tests {
|
||||
syntax!(cls17, "[a-z0-9]", vec![rclass(&[('a', 'z'), ('0', '9')])]);
|
||||
syntax!(cls18, "[!0-9a-z]", vec![rclassn(&[('0', '9'), ('a', 'z')])]);
|
||||
syntax!(cls19, "[!a-z0-9]", vec![rclassn(&[('a', 'z'), ('0', '9')])]);
|
||||
syntax!(cls20, "[^a]", vec![classn('a', 'a')]);
|
||||
syntax!(cls21, "[^a-z]", vec![classn('a', 'z')]);
|
||||
|
||||
syntaxerr!(err_rseq1, "a**", Error::InvalidRecursive);
|
||||
syntaxerr!(err_rseq2, "**a", Error::InvalidRecursive);
|
||||
syntaxerr!(err_rseq3, "a**b", Error::InvalidRecursive);
|
||||
syntaxerr!(err_rseq4, "***", Error::InvalidRecursive);
|
||||
syntaxerr!(err_rseq5, "/a**", Error::InvalidRecursive);
|
||||
syntaxerr!(err_rseq6, "/**a", Error::InvalidRecursive);
|
||||
syntaxerr!(err_rseq7, "/a**b", Error::InvalidRecursive);
|
||||
syntaxerr!(err_unclosed1, "[", Error::UnclosedClass);
|
||||
syntaxerr!(err_unclosed2, "[]", Error::UnclosedClass);
|
||||
syntaxerr!(err_unclosed3, "[!", Error::UnclosedClass);
|
||||
syntaxerr!(err_unclosed4, "[!]", Error::UnclosedClass);
|
||||
syntaxerr!(err_range1, "[z-a]", Error::InvalidRange('z', 'a'));
|
||||
syntaxerr!(err_range2, "[z--]", Error::InvalidRange('z', '-'));
|
||||
syntaxerr!(err_rseq1, "a**", ErrorKind::InvalidRecursive);
|
||||
syntaxerr!(err_rseq2, "**a", ErrorKind::InvalidRecursive);
|
||||
syntaxerr!(err_rseq3, "a**b", ErrorKind::InvalidRecursive);
|
||||
syntaxerr!(err_rseq4, "***", ErrorKind::InvalidRecursive);
|
||||
syntaxerr!(err_rseq5, "/a**", ErrorKind::InvalidRecursive);
|
||||
syntaxerr!(err_rseq6, "/**a", ErrorKind::InvalidRecursive);
|
||||
syntaxerr!(err_rseq7, "/a**b", ErrorKind::InvalidRecursive);
|
||||
syntaxerr!(err_unclosed1, "[", ErrorKind::UnclosedClass);
|
||||
syntaxerr!(err_unclosed2, "[]", ErrorKind::UnclosedClass);
|
||||
syntaxerr!(err_unclosed3, "[!", ErrorKind::UnclosedClass);
|
||||
syntaxerr!(err_unclosed4, "[!]", ErrorKind::UnclosedClass);
|
||||
syntaxerr!(err_range1, "[z-a]", ErrorKind::InvalidRange('z', 'a'));
|
||||
syntaxerr!(err_range2, "[z--]", ErrorKind::InvalidRange('z', '-'));
|
||||
|
||||
const CASEI: Options = Options {
|
||||
casei: true,
|
||||
litsep: false,
|
||||
casei: Some(true),
|
||||
litsep: None,
|
||||
bsesc: None,
|
||||
};
|
||||
const SLASHLIT: Options = Options {
|
||||
casei: false,
|
||||
litsep: true,
|
||||
casei: None,
|
||||
litsep: Some(true),
|
||||
bsesc: None,
|
||||
};
|
||||
const NOBSESC: Options = Options {
|
||||
casei: None,
|
||||
litsep: None,
|
||||
bsesc: Some(false),
|
||||
};
|
||||
const BSESC: Options = Options {
|
||||
casei: None,
|
||||
litsep: None,
|
||||
bsesc: Some(true),
|
||||
};
|
||||
|
||||
toregex!(re_casei, "a", "(?i)^a$", &CASEI);
|
||||
@@ -1133,6 +1219,7 @@ mod tests {
|
||||
matches!(matchrec22, ".*/**", ".abc/abc");
|
||||
matches!(matchrec23, "foo/**", "foo");
|
||||
matches!(matchrec24, "**/foo/bar", "foo/bar");
|
||||
matches!(matchrec25, "some/*/needle.txt", "some/one/needle.txt");
|
||||
|
||||
matches!(matchrange1, "a[0-9]b", "a0b");
|
||||
matches!(matchrange2, "a[0-9]b", "a9b");
|
||||
@@ -1145,6 +1232,7 @@ mod tests {
|
||||
matches!(matchrange9, "[-a-c]", "b");
|
||||
matches!(matchrange10, "[a-c-]", "b");
|
||||
matches!(matchrange11, "[-]", "-");
|
||||
matches!(matchrange12, "a[^0-9]b", "a_b");
|
||||
|
||||
matches!(matchpat1, "*hello.txt", "hello.txt");
|
||||
matches!(matchpat2, "*hello.txt", "gareth_says_hello.txt");
|
||||
@@ -1188,6 +1276,17 @@ mod tests {
|
||||
#[cfg(not(unix))]
|
||||
matches!(matchslash5, "abc\\def", "abc/def", SLASHLIT);
|
||||
|
||||
matches!(matchbackslash1, "\\[", "[", BSESC);
|
||||
matches!(matchbackslash2, "\\?", "?", BSESC);
|
||||
matches!(matchbackslash3, "\\*", "*", BSESC);
|
||||
matches!(matchbackslash4, "\\[a-z]", "\\a", NOBSESC);
|
||||
matches!(matchbackslash5, "\\?", "\\a", NOBSESC);
|
||||
matches!(matchbackslash6, "\\*", "\\\\", NOBSESC);
|
||||
#[cfg(unix)]
|
||||
matches!(matchbackslash7, "\\a", "a");
|
||||
#[cfg(not(unix))]
|
||||
matches!(matchbackslash8, "\\a", "/a");
|
||||
|
||||
nmatches!(matchnot1, "a*b*c", "abcd");
|
||||
nmatches!(matchnot2, "abc*abc*abc", "abcabcabcabcabcabcabca");
|
||||
nmatches!(matchnot3, "some/**/needle.txt", "some/other/notthis.txt");
|
||||
@@ -1217,18 +1316,35 @@ mod tests {
|
||||
nmatches!(matchnot25, "*.c", "mozilla-sha1/sha1.c", SLASHLIT);
|
||||
nmatches!(matchnot26, "**/m4/ltoptions.m4",
|
||||
"csharp/src/packages/repositories.config", SLASHLIT);
|
||||
nmatches!(matchnot27, "a[^0-9]b", "a0b");
|
||||
nmatches!(matchnot28, "a[^0-9]b", "a9b");
|
||||
nmatches!(matchnot29, "[^-]", "-");
|
||||
nmatches!(matchnot30, "some/*/needle.txt", "some/needle.txt");
|
||||
nmatches!(
|
||||
matchrec31,
|
||||
"some/*/needle.txt", "some/one/two/needle.txt", SLASHLIT);
|
||||
nmatches!(
|
||||
matchrec32,
|
||||
"some/*/needle.txt", "some/one/two/three/needle.txt", SLASHLIT);
|
||||
|
||||
macro_rules! extract {
|
||||
($which:ident, $name:ident, $pat:expr, $expect:expr) => {
|
||||
extract!($which, $name, $pat, $expect, Options::default());
|
||||
};
|
||||
($which:ident, $name:ident, $pat:expr, $expect:expr, $opts:expr) => {
|
||||
($which:ident, $name:ident, $pat:expr, $expect:expr, $options:expr) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let pat = GlobBuilder::new($pat)
|
||||
.case_insensitive($opts.casei)
|
||||
.literal_separator($opts.litsep)
|
||||
.build().unwrap();
|
||||
let mut builder = GlobBuilder::new($pat);
|
||||
if let Some(casei) = $options.casei {
|
||||
builder.case_insensitive(casei);
|
||||
}
|
||||
if let Some(litsep) = $options.litsep {
|
||||
builder.literal_separator(litsep);
|
||||
}
|
||||
if let Some(bsesc) = $options.bsesc {
|
||||
builder.backslash_escape(bsesc);
|
||||
}
|
||||
let pat = builder.build().unwrap();
|
||||
assert_eq!($expect, pat.$which());
|
||||
}
|
||||
};
|
||||
@@ -1285,19 +1401,19 @@ mod tests {
|
||||
Literal('f'), Literal('o'), ZeroOrMore, Literal('o'),
|
||||
]), SLASHLIT);
|
||||
|
||||
ext!(extract_ext1, "**/*.rs", Some(os(".rs")));
|
||||
ext!(extract_ext1, "**/*.rs", Some(s(".rs")));
|
||||
ext!(extract_ext2, "**/*.rs.bak", None);
|
||||
ext!(extract_ext3, "*.rs", Some(os(".rs")));
|
||||
ext!(extract_ext3, "*.rs", Some(s(".rs")));
|
||||
ext!(extract_ext4, "a*.rs", None);
|
||||
ext!(extract_ext5, "/*.c", None);
|
||||
ext!(extract_ext6, "*.c", None, SLASHLIT);
|
||||
ext!(extract_ext7, "*.c", Some(os(".c")));
|
||||
ext!(extract_ext7, "*.c", Some(s(".c")));
|
||||
|
||||
required_ext!(extract_req_ext1, "*.rs", Some(os(".rs")));
|
||||
required_ext!(extract_req_ext2, "/foo/bar/*.rs", Some(os(".rs")));
|
||||
required_ext!(extract_req_ext3, "/foo/bar/*.rs", Some(os(".rs")));
|
||||
required_ext!(extract_req_ext4, "/foo/bar/.rs", Some(os(".rs")));
|
||||
required_ext!(extract_req_ext5, ".rs", Some(os(".rs")));
|
||||
required_ext!(extract_req_ext1, "*.rs", Some(s(".rs")));
|
||||
required_ext!(extract_req_ext2, "/foo/bar/*.rs", Some(s(".rs")));
|
||||
required_ext!(extract_req_ext3, "/foo/bar/*.rs", Some(s(".rs")));
|
||||
required_ext!(extract_req_ext4, "/foo/bar/.rs", Some(s(".rs")));
|
||||
required_ext!(extract_req_ext5, ".rs", Some(s(".rs")));
|
||||
required_ext!(extract_req_ext6, "./rs", None);
|
||||
required_ext!(extract_req_ext7, "foo", None);
|
||||
required_ext!(extract_req_ext8, ".foo/", None);
|
||||
|
||||
@@ -22,7 +22,7 @@ This example shows how to match a single glob against a single file path.
|
||||
# fn example() -> Result<(), globset::Error> {
|
||||
use globset::Glob;
|
||||
|
||||
let glob = try!(Glob::new("*.rs")).compile_matcher();
|
||||
let glob = Glob::new("*.rs")?.compile_matcher();
|
||||
|
||||
assert!(glob.is_match("foo.rs"));
|
||||
assert!(glob.is_match("foo/bar.rs"));
|
||||
@@ -39,8 +39,8 @@ semantics. In this example, we prevent wildcards from matching path separators.
|
||||
# fn example() -> Result<(), globset::Error> {
|
||||
use globset::GlobBuilder;
|
||||
|
||||
let glob = try!(GlobBuilder::new("*.rs")
|
||||
.literal_separator(true).build()).compile_matcher();
|
||||
let glob = GlobBuilder::new("*.rs")
|
||||
.literal_separator(true).build()?.compile_matcher();
|
||||
|
||||
assert!(glob.is_match("foo.rs"));
|
||||
assert!(!glob.is_match("foo/bar.rs")); // no longer matches
|
||||
@@ -59,10 +59,10 @@ use globset::{Glob, GlobSetBuilder};
|
||||
let mut builder = GlobSetBuilder::new();
|
||||
// A GlobBuilder can be used to configure each glob's match semantics
|
||||
// independently.
|
||||
builder.add(try!(Glob::new("*.rs")));
|
||||
builder.add(try!(Glob::new("src/lib.rs")));
|
||||
builder.add(try!(Glob::new("src/**/foo.rs")));
|
||||
let set = try!(builder.build());
|
||||
builder.add(Glob::new("*.rs")?);
|
||||
builder.add(Glob::new("src/lib.rs")?);
|
||||
builder.add(Glob::new("src/**/foo.rs")?);
|
||||
let set = builder.build()?;
|
||||
|
||||
assert_eq!(set.matches("src/bar/baz/foo.rs"), vec![0, 2]);
|
||||
# Ok(()) } example().unwrap();
|
||||
@@ -91,6 +91,11 @@ Standard Unix-style glob syntax is supported:
|
||||
`[!ab]` to match any character except for `a` and `b`.
|
||||
* Metacharacters such as `*` and `?` can be escaped with character class
|
||||
notation. e.g., `[*]` matches `*`.
|
||||
* When backslash escapes are enabled, a backslash (`\`) will escape all meta
|
||||
characters in a glob. If it precedes a non-meta character, then the slash is
|
||||
ignored. A `\\` will match a literal `\\`. Note that this mode is only
|
||||
enabled on Unix platforms by default, but can be enabled on any platform
|
||||
via the `backslash_escape` setting on `Glob`.
|
||||
|
||||
A `GlobBuilder` can be used to prevent wildcards from matching path separators,
|
||||
or to enable case insensitive matching.
|
||||
@@ -108,7 +113,7 @@ extern crate regex;
|
||||
use std::borrow::Cow;
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::error::Error as StdError;
|
||||
use std::ffi::{OsStr, OsString};
|
||||
use std::ffi::OsStr;
|
||||
use std::fmt;
|
||||
use std::hash;
|
||||
use std::path::Path;
|
||||
@@ -128,7 +133,16 @@ mod pathutil;
|
||||
|
||||
/// Represents an error that can occur when parsing a glob pattern.
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub enum Error {
|
||||
pub struct Error {
|
||||
/// The original glob provided by the caller.
|
||||
glob: Option<String>,
|
||||
/// The kind of error.
|
||||
kind: ErrorKind,
|
||||
}
|
||||
|
||||
/// The kind of error that can occur when parsing a glob pattern.
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub enum ErrorKind {
|
||||
/// Occurs when a use of `**` is invalid. Namely, `**` can only appear
|
||||
/// adjacent to a path separator, or the beginning/end of a glob.
|
||||
InvalidRecursive,
|
||||
@@ -145,52 +159,96 @@ pub enum Error {
|
||||
/// Occurs when an alternating group is nested inside another alternating
|
||||
/// group, e.g., `{{a,b},{c,d}}`.
|
||||
NestedAlternates,
|
||||
/// Occurs when an unescaped '\' is found at the end of a glob.
|
||||
DanglingEscape,
|
||||
/// An error associated with parsing or compiling a regex.
|
||||
Regex(String),
|
||||
/// Hints that destructuring should not be exhaustive.
|
||||
///
|
||||
/// This enum may grow additional variants, so this makes sure clients
|
||||
/// don't count on exhaustive matching. (Otherwise, adding a new variant
|
||||
/// could break existing code.)
|
||||
#[doc(hidden)]
|
||||
__Nonexhaustive,
|
||||
}
|
||||
|
||||
impl StdError for Error {
|
||||
fn description(&self) -> &str {
|
||||
self.kind.description()
|
||||
}
|
||||
}
|
||||
|
||||
impl Error {
|
||||
/// Return the glob that caused this error, if one exists.
|
||||
pub fn glob(&self) -> Option<&str> {
|
||||
self.glob.as_ref().map(|s| &**s)
|
||||
}
|
||||
|
||||
/// Return the kind of this error.
|
||||
pub fn kind(&self) -> &ErrorKind {
|
||||
&self.kind
|
||||
}
|
||||
}
|
||||
|
||||
impl ErrorKind {
|
||||
fn description(&self) -> &str {
|
||||
match *self {
|
||||
Error::InvalidRecursive => {
|
||||
ErrorKind::InvalidRecursive => {
|
||||
"invalid use of **; must be one path component"
|
||||
}
|
||||
Error::UnclosedClass => {
|
||||
ErrorKind::UnclosedClass => {
|
||||
"unclosed character class; missing ']'"
|
||||
}
|
||||
Error::InvalidRange(_, _) => {
|
||||
ErrorKind::InvalidRange(_, _) => {
|
||||
"invalid character range"
|
||||
}
|
||||
Error::UnopenedAlternates => {
|
||||
ErrorKind::UnopenedAlternates => {
|
||||
"unopened alternate group; missing '{' \
|
||||
(maybe escape '}' with '[}]'?)"
|
||||
}
|
||||
Error::UnclosedAlternates => {
|
||||
ErrorKind::UnclosedAlternates => {
|
||||
"unclosed alternate group; missing '}' \
|
||||
(maybe escape '{' with '[{]'?)"
|
||||
}
|
||||
Error::NestedAlternates => {
|
||||
ErrorKind::NestedAlternates => {
|
||||
"nested alternate groups are not allowed"
|
||||
}
|
||||
Error::Regex(ref err) => err,
|
||||
ErrorKind::DanglingEscape => {
|
||||
"dangling '\\'"
|
||||
}
|
||||
ErrorKind::Regex(ref err) => err,
|
||||
ErrorKind::__Nonexhaustive => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self.glob {
|
||||
None => self.kind.fmt(f),
|
||||
Some(ref glob) => {
|
||||
write!(f, "error parsing glob '{}': {}", glob, self.kind)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ErrorKind {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match *self {
|
||||
Error::InvalidRecursive
|
||||
| Error::UnclosedClass
|
||||
| Error::UnopenedAlternates
|
||||
| Error::UnclosedAlternates
|
||||
| Error::NestedAlternates
|
||||
| Error::Regex(_) => {
|
||||
ErrorKind::InvalidRecursive
|
||||
| ErrorKind::UnclosedClass
|
||||
| ErrorKind::UnopenedAlternates
|
||||
| ErrorKind::UnclosedAlternates
|
||||
| ErrorKind::NestedAlternates
|
||||
| ErrorKind::DanglingEscape
|
||||
| ErrorKind::Regex(_) => {
|
||||
write!(f, "{}", self.description())
|
||||
}
|
||||
Error::InvalidRange(s, e) => {
|
||||
ErrorKind::InvalidRange(s, e) => {
|
||||
write!(f, "invalid range; '{}' > '{}'", s, e)
|
||||
}
|
||||
ErrorKind::__Nonexhaustive => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -201,12 +259,22 @@ fn new_regex(pat: &str) -> Result<Regex, Error> {
|
||||
.size_limit(10 * (1 << 20))
|
||||
.dfa_size_limit(10 * (1 << 20))
|
||||
.build()
|
||||
.map_err(|err| Error::Regex(err.to_string()))
|
||||
.map_err(|err| {
|
||||
Error {
|
||||
glob: Some(pat.to_string()),
|
||||
kind: ErrorKind::Regex(err.to_string()),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn new_regex_set<I, S>(pats: I) -> Result<RegexSet, Error>
|
||||
where S: AsRef<str>, I: IntoIterator<Item=S> {
|
||||
RegexSet::new(pats).map_err(|err| Error::Regex(err.to_string()))
|
||||
RegexSet::new(pats).map_err(|err| {
|
||||
Error {
|
||||
glob: None,
|
||||
kind: ErrorKind::Regex(err.to_string()),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
type Fnv = hash::BuildHasherDefault<fnv::FnvHasher>;
|
||||
@@ -364,8 +432,8 @@ impl GlobSet {
|
||||
GlobSetMatchStrategy::Suffix(suffixes.suffix()),
|
||||
GlobSetMatchStrategy::Prefix(prefixes.prefix()),
|
||||
GlobSetMatchStrategy::RequiredExtension(
|
||||
try!(required_exts.build())),
|
||||
GlobSetMatchStrategy::Regex(try!(regexes.regex_set())),
|
||||
required_exts.build()?),
|
||||
GlobSetMatchStrategy::Regex(regexes.regex_set()?),
|
||||
],
|
||||
})
|
||||
}
|
||||
@@ -373,6 +441,7 @@ impl GlobSet {
|
||||
|
||||
/// GlobSetBuilder builds a group of patterns that can be used to
|
||||
/// simultaneously match a file path.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct GlobSetBuilder {
|
||||
pats: Vec<Glob>,
|
||||
}
|
||||
@@ -410,7 +479,7 @@ impl GlobSetBuilder {
|
||||
pub struct Candidate<'a> {
|
||||
path: Cow<'a, [u8]>,
|
||||
basename: Cow<'a, [u8]>,
|
||||
ext: &'a OsStr,
|
||||
ext: Cow<'a, [u8]>,
|
||||
}
|
||||
|
||||
impl<'a> Candidate<'a> {
|
||||
@@ -421,7 +490,7 @@ impl<'a> Candidate<'a> {
|
||||
Candidate {
|
||||
path: normalize_path(path_bytes(path)),
|
||||
basename: os_str_bytes(basename),
|
||||
ext: file_name_ext(basename).unwrap_or(OsStr::new("")),
|
||||
ext: file_name_ext(basename).unwrap_or(Cow::Borrowed(b"")),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -536,22 +605,22 @@ impl BasenameLiteralStrategy {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct ExtensionStrategy(HashMap<OsString, Vec<usize>, Fnv>);
|
||||
struct ExtensionStrategy(HashMap<Vec<u8>, Vec<usize>, Fnv>);
|
||||
|
||||
impl ExtensionStrategy {
|
||||
fn new() -> ExtensionStrategy {
|
||||
ExtensionStrategy(HashMap::with_hasher(Fnv::default()))
|
||||
}
|
||||
|
||||
fn add(&mut self, global_index: usize, ext: OsString) {
|
||||
self.0.entry(ext).or_insert(vec![]).push(global_index);
|
||||
fn add(&mut self, global_index: usize, ext: String) {
|
||||
self.0.entry(ext.into_bytes()).or_insert(vec![]).push(global_index);
|
||||
}
|
||||
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
if candidate.ext.is_empty() {
|
||||
return false;
|
||||
}
|
||||
self.0.contains_key(candidate.ext)
|
||||
self.0.contains_key(&*candidate.ext)
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
@@ -559,7 +628,7 @@ impl ExtensionStrategy {
|
||||
if candidate.ext.is_empty() {
|
||||
return;
|
||||
}
|
||||
if let Some(hits) = self.0.get(candidate.ext) {
|
||||
if let Some(hits) = self.0.get(&*candidate.ext) {
|
||||
matches.extend(hits);
|
||||
}
|
||||
}
|
||||
@@ -622,14 +691,14 @@ impl SuffixStrategy {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct RequiredExtensionStrategy(HashMap<OsString, Vec<(usize, Regex)>, Fnv>);
|
||||
struct RequiredExtensionStrategy(HashMap<Vec<u8>, Vec<(usize, Regex)>, Fnv>);
|
||||
|
||||
impl RequiredExtensionStrategy {
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
if candidate.ext.is_empty() {
|
||||
return false;
|
||||
}
|
||||
match self.0.get(candidate.ext) {
|
||||
match self.0.get(&*candidate.ext) {
|
||||
None => false,
|
||||
Some(regexes) => {
|
||||
for &(_, ref re) in regexes {
|
||||
@@ -647,7 +716,7 @@ impl RequiredExtensionStrategy {
|
||||
if candidate.ext.is_empty() {
|
||||
return;
|
||||
}
|
||||
if let Some(regexes) = self.0.get(candidate.ext) {
|
||||
if let Some(regexes) = self.0.get(&*candidate.ext) {
|
||||
for &(global_index, ref re) in regexes {
|
||||
if re.is_match(&*candidate.path) {
|
||||
matches.push(global_index);
|
||||
@@ -719,7 +788,7 @@ impl MultiStrategyBuilder {
|
||||
|
||||
fn regex_set(self) -> Result<RegexSetStrategy, Error> {
|
||||
Ok(RegexSetStrategy {
|
||||
matcher: try!(new_regex_set(self.literals)),
|
||||
matcher: new_regex_set(self.literals)?,
|
||||
map: self.map,
|
||||
})
|
||||
}
|
||||
@@ -727,7 +796,7 @@ impl MultiStrategyBuilder {
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct RequiredExtensionStrategyBuilder(
|
||||
HashMap<OsString, Vec<(usize, String)>>,
|
||||
HashMap<Vec<u8>, Vec<(usize, String)>>,
|
||||
);
|
||||
|
||||
impl RequiredExtensionStrategyBuilder {
|
||||
@@ -735,8 +804,11 @@ impl RequiredExtensionStrategyBuilder {
|
||||
RequiredExtensionStrategyBuilder(HashMap::new())
|
||||
}
|
||||
|
||||
fn add(&mut self, global_index: usize, ext: OsString, regex: String) {
|
||||
self.0.entry(ext).or_insert(vec![]).push((global_index, regex));
|
||||
fn add(&mut self, global_index: usize, ext: String, regex: String) {
|
||||
self.0
|
||||
.entry(ext.into_bytes())
|
||||
.or_insert(vec![])
|
||||
.push((global_index, regex));
|
||||
}
|
||||
|
||||
fn build(self) -> Result<RequiredExtensionStrategy, Error> {
|
||||
@@ -744,7 +816,7 @@ impl RequiredExtensionStrategyBuilder {
|
||||
for (ext, regexes) in self.0.into_iter() {
|
||||
exts.insert(ext.clone(), vec![]);
|
||||
for (global_index, regex) in regexes {
|
||||
let compiled = try!(new_regex(®ex));
|
||||
let compiled = new_regex(®ex)?;
|
||||
exts.get_mut(&ext).unwrap().push((global_index, compiled));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,34 +54,28 @@ pub fn file_name<'a, P: AsRef<Path> + ?Sized>(
|
||||
/// a pattern like `*.rs` is obviously trying to match files with a `rs`
|
||||
/// extension, but it also matches files like `.rs`, which doesn't have an
|
||||
/// extension according to std::path::Path::extension.
|
||||
pub fn file_name_ext(name: &OsStr) -> Option<&OsStr> {
|
||||
// Yes, these functions are awful, and yes, we are completely violating
|
||||
// the abstraction barrier of std::ffi. The barrier we're violating is
|
||||
// that an OsStr's encoding is *ASCII compatible*. While this is obviously
|
||||
// true on Unix systems, it's also true on Windows because an OsStr uses
|
||||
// WTF-8 internally: https://simonsapin.github.io/wtf-8/
|
||||
//
|
||||
// We should consider doing the same for the other path utility functions.
|
||||
// Right now, we don't break any barriers, but Windows users are paying
|
||||
// for it.
|
||||
//
|
||||
// Got any better ideas that don't cost anything? Hit me up. ---AG
|
||||
unsafe fn os_str_as_u8_slice(s: &OsStr) -> &[u8] {
|
||||
::std::mem::transmute(s)
|
||||
}
|
||||
unsafe fn u8_slice_as_os_str(s: &[u8]) -> &OsStr {
|
||||
::std::mem::transmute(s)
|
||||
}
|
||||
pub fn file_name_ext(name: &OsStr) -> Option<Cow<[u8]>> {
|
||||
if name.is_empty() {
|
||||
return None;
|
||||
}
|
||||
let name = unsafe { os_str_as_u8_slice(name) };
|
||||
for (i, &b) in name.iter().enumerate().rev() {
|
||||
if b == b'.' {
|
||||
return Some(unsafe { u8_slice_as_os_str(&name[i..]) });
|
||||
let name = os_str_bytes(name);
|
||||
let last_dot_at = {
|
||||
let result = name
|
||||
.iter().enumerate().rev()
|
||||
.find(|&(_, &b)| b == b'.')
|
||||
.map(|(i, _)| i);
|
||||
match result {
|
||||
None => return None,
|
||||
Some(i) => i,
|
||||
}
|
||||
};
|
||||
Some(match name {
|
||||
Cow::Borrowed(name) => Cow::Borrowed(&name[last_dot_at..]),
|
||||
Cow::Owned(mut name) => {
|
||||
name.drain(..last_dot_at);
|
||||
Cow::Owned(name)
|
||||
}
|
||||
None
|
||||
})
|
||||
}
|
||||
|
||||
/// Return raw bytes of a path, transcoded to UTF-8 if necessary.
|
||||
@@ -144,7 +138,7 @@ mod tests {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let got = file_name_ext(OsStr::new($file_name));
|
||||
assert_eq!($ext.map(OsStr::new), got);
|
||||
assert_eq!($ext.map(|s| Cow::Borrowed(s.as_bytes())), got);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "grep"
|
||||
version = "0.1.6" #:version
|
||||
version = "0.1.8" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
Fast line oriented regex searching as a library.
|
||||
@@ -13,7 +13,7 @@ keywords = ["regex", "grep", "egrep", "search", "pattern"]
|
||||
license = "Unlicense/MIT"
|
||||
|
||||
[dependencies]
|
||||
log = "0.3"
|
||||
memchr = "1"
|
||||
regex = "0.2.1"
|
||||
regex-syntax = "0.4.0"
|
||||
log = "0.4"
|
||||
memchr = "2"
|
||||
regex = "0.2.9"
|
||||
regex-syntax = "0.5.3"
|
||||
|
||||
@@ -19,6 +19,7 @@ pub use search::{Grep, GrepBuilder, Iter, Match};
|
||||
mod literals;
|
||||
mod nonl;
|
||||
mod search;
|
||||
mod smart_case;
|
||||
mod word_boundary;
|
||||
|
||||
/// Result is a convenient type alias that fixes the type of the error to
|
||||
|
||||
@@ -10,10 +10,8 @@ principled.
|
||||
use std::cmp;
|
||||
|
||||
use regex::bytes::RegexBuilder;
|
||||
use syntax::{
|
||||
Expr, Literals, Lit,
|
||||
ByteClass, ByteRange, CharClass, ClassRange, Repeater,
|
||||
};
|
||||
use syntax::hir::{self, Hir, HirKind};
|
||||
use syntax::hir::literal::{Literal, Literals};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct LiteralSets {
|
||||
@@ -23,12 +21,12 @@ pub struct LiteralSets {
|
||||
}
|
||||
|
||||
impl LiteralSets {
|
||||
pub fn create(expr: &Expr) -> Self {
|
||||
pub fn create(expr: &Hir) -> Self {
|
||||
let mut required = Literals::empty();
|
||||
union_required(expr, &mut required);
|
||||
LiteralSets {
|
||||
prefixes: expr.prefixes(),
|
||||
suffixes: expr.suffixes(),
|
||||
prefixes: Literals::prefixes(expr),
|
||||
suffixes: Literals::suffixes(expr),
|
||||
required: required,
|
||||
}
|
||||
}
|
||||
@@ -93,60 +91,52 @@ impl LiteralSets {
|
||||
}
|
||||
}
|
||||
|
||||
fn union_required(expr: &Expr, lits: &mut Literals) {
|
||||
use syntax::Expr::*;
|
||||
match *expr {
|
||||
Literal { ref chars, casei: false } => {
|
||||
let s: String = chars.iter().cloned().collect();
|
||||
lits.cross_add(s.as_bytes());
|
||||
fn union_required(expr: &Hir, lits: &mut Literals) {
|
||||
match *expr.kind() {
|
||||
HirKind::Literal(hir::Literal::Unicode(c)) => {
|
||||
let mut buf = [0u8; 4];
|
||||
lits.cross_add(c.encode_utf8(&mut buf).as_bytes());
|
||||
}
|
||||
Literal { ref chars, casei: true } => {
|
||||
for &c in chars {
|
||||
let cls = CharClass::new(vec![
|
||||
ClassRange { start: c, end: c },
|
||||
]).case_fold();
|
||||
if !lits.add_char_class(&cls) {
|
||||
lits.cut();
|
||||
return;
|
||||
HirKind::Literal(hir::Literal::Byte(b)) => {
|
||||
lits.cross_add(&[b]);
|
||||
}
|
||||
}
|
||||
}
|
||||
LiteralBytes { ref bytes, casei: false } => {
|
||||
lits.cross_add(bytes);
|
||||
}
|
||||
LiteralBytes { ref bytes, casei: true } => {
|
||||
for &b in bytes {
|
||||
let cls = ByteClass::new(vec![
|
||||
ByteRange { start: b, end: b },
|
||||
]).case_fold();
|
||||
if !lits.add_byte_class(&cls) {
|
||||
lits.cut();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
Class(_) => {
|
||||
HirKind::Class(hir::Class::Unicode(ref cls)) => {
|
||||
if count_unicode_class(cls) >= 5 || !lits.add_char_class(cls) {
|
||||
lits.cut();
|
||||
}
|
||||
ClassBytes(_) => {
|
||||
}
|
||||
HirKind::Class(hir::Class::Bytes(ref cls)) => {
|
||||
if count_byte_class(cls) >= 5 || !lits.add_byte_class(cls) {
|
||||
lits.cut();
|
||||
}
|
||||
Group { ref e, .. } => {
|
||||
union_required(&**e, lits);
|
||||
}
|
||||
Repeat { r: Repeater::ZeroOrOne, .. } => lits.cut(),
|
||||
Repeat { r: Repeater::ZeroOrMore, .. } => lits.cut(),
|
||||
Repeat { ref e, r: Repeater::OneOrMore, .. } => {
|
||||
union_required(&**e, lits);
|
||||
HirKind::Group(hir::Group { ref hir, .. }) => {
|
||||
union_required(&**hir, lits);
|
||||
}
|
||||
HirKind::Repetition(ref x) => {
|
||||
match x.kind {
|
||||
hir::RepetitionKind::ZeroOrOne => lits.cut(),
|
||||
hir::RepetitionKind::ZeroOrMore => lits.cut(),
|
||||
hir::RepetitionKind::OneOrMore => {
|
||||
union_required(&x.hir, lits);
|
||||
lits.cut();
|
||||
}
|
||||
Repeat { ref e, r: Repeater::Range { min, max }, greedy } => {
|
||||
hir::RepetitionKind::Range(ref rng) => {
|
||||
let (min, max) = match *rng {
|
||||
hir::RepetitionRange::Exactly(m) => (m, Some(m)),
|
||||
hir::RepetitionRange::AtLeast(m) => (m, None),
|
||||
hir::RepetitionRange::Bounded(m, n) => (m, Some(n)),
|
||||
};
|
||||
repeat_range_literals(
|
||||
&**e, min, max, greedy, lits, union_required);
|
||||
&x.hir, min, max, x.greedy, lits, union_required);
|
||||
}
|
||||
Concat(ref es) if es.is_empty() => {}
|
||||
Concat(ref es) if es.len() == 1 => union_required(&es[0], lits),
|
||||
Concat(ref es) => {
|
||||
}
|
||||
}
|
||||
HirKind::Concat(ref es) if es.is_empty() => {}
|
||||
HirKind::Concat(ref es) if es.len() == 1 => {
|
||||
union_required(&es[0], lits)
|
||||
}
|
||||
HirKind::Concat(ref es) => {
|
||||
for e in es {
|
||||
let mut lits2 = lits.to_empty();
|
||||
union_required(e, &mut lits2);
|
||||
@@ -157,7 +147,6 @@ fn union_required(expr: &Expr, lits: &mut Literals) {
|
||||
if lits2.contains_empty() {
|
||||
lits.cut();
|
||||
}
|
||||
// if !lits.union(lits2) {
|
||||
if !lits.cross_product(&lits2) {
|
||||
// If this expression couldn't yield any literal that
|
||||
// could be extended, then we need to quit. Since we're
|
||||
@@ -167,15 +156,15 @@ fn union_required(expr: &Expr, lits: &mut Literals) {
|
||||
}
|
||||
}
|
||||
}
|
||||
Alternate(ref es) => {
|
||||
HirKind::Alternation(ref es) => {
|
||||
alternate_literals(es, lits, union_required);
|
||||
}
|
||||
_ => lits.cut(),
|
||||
}
|
||||
}
|
||||
|
||||
fn repeat_range_literals<F: FnMut(&Expr, &mut Literals)>(
|
||||
e: &Expr,
|
||||
fn repeat_range_literals<F: FnMut(&Hir, &mut Literals)>(
|
||||
e: &Hir,
|
||||
min: u32,
|
||||
max: Option<u32>,
|
||||
_greedy: bool,
|
||||
@@ -204,8 +193,8 @@ fn repeat_range_literals<F: FnMut(&Expr, &mut Literals)>(
|
||||
}
|
||||
}
|
||||
|
||||
fn alternate_literals<F: FnMut(&Expr, &mut Literals)>(
|
||||
es: &[Expr],
|
||||
fn alternate_literals<F: FnMut(&Hir, &mut Literals)>(
|
||||
es: &[Hir],
|
||||
lits: &mut Literals,
|
||||
mut f: F,
|
||||
) {
|
||||
@@ -234,11 +223,21 @@ fn alternate_literals<F: FnMut(&Expr, &mut Literals)>(
|
||||
}
|
||||
lits.cut();
|
||||
if !lcs.is_empty() {
|
||||
lits.add(Lit::empty());
|
||||
lits.add(Lit::new(lcs.to_vec()));
|
||||
lits.add(Literal::empty());
|
||||
lits.add(Literal::new(lcs.to_vec()));
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the number of characters in the given class.
|
||||
fn count_unicode_class(cls: &hir::ClassUnicode) -> u32 {
|
||||
cls.iter().map(|r| 1 + (r.end() as u32 - r.start() as u32)).sum()
|
||||
}
|
||||
|
||||
/// Return the number of bytes in the given class.
|
||||
fn count_byte_class(cls: &hir::ClassBytes) -> u32 {
|
||||
cls.iter().map(|r| 1 + (r.end() as u32 - r.start() as u32)).sum()
|
||||
}
|
||||
|
||||
/// Converts an arbitrary sequence of bytes to a literal suitable for building
|
||||
/// a regular expression.
|
||||
fn bytes_to_regex(bs: &[u8]) -> String {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use syntax::Expr;
|
||||
use syntax::hir::{self, Hir, HirKind};
|
||||
|
||||
use {Error, Result};
|
||||
|
||||
@@ -9,61 +9,66 @@ use {Error, Result};
|
||||
///
|
||||
/// If `byte` is not an ASCII character (i.e., greater than `0x7F`), then this
|
||||
/// function panics.
|
||||
pub fn remove(expr: Expr, byte: u8) -> Result<Expr> {
|
||||
// TODO(burntsushi): There is a bug in this routine where only `\n` is
|
||||
// handled correctly. Namely, `AnyChar` and `AnyByte` need to be translated
|
||||
// to proper character classes instead of the special `AnyCharNoNL` and
|
||||
// `AnyByteNoNL` classes.
|
||||
use syntax::Expr::*;
|
||||
pub fn remove(expr: Hir, byte: u8) -> Result<Hir> {
|
||||
assert!(byte <= 0x7F);
|
||||
let chr = byte as char;
|
||||
assert!(chr.len_utf8() == 1);
|
||||
|
||||
Ok(match expr {
|
||||
Literal { chars, casei } => {
|
||||
if chars.iter().position(|&c| c == chr).is_some() {
|
||||
Ok(match expr.into_kind() {
|
||||
HirKind::Empty => Hir::empty(),
|
||||
HirKind::Literal(hir::Literal::Unicode(c)) => {
|
||||
if c == chr {
|
||||
return Err(Error::LiteralNotAllowed(chr));
|
||||
}
|
||||
Literal { chars: chars, casei: casei }
|
||||
Hir::literal(hir::Literal::Unicode(c))
|
||||
}
|
||||
LiteralBytes { bytes, casei } => {
|
||||
if bytes.iter().position(|&b| b == byte).is_some() {
|
||||
HirKind::Literal(hir::Literal::Byte(b)) => {
|
||||
if b as char == chr {
|
||||
return Err(Error::LiteralNotAllowed(chr));
|
||||
}
|
||||
LiteralBytes { bytes: bytes, casei: casei }
|
||||
Hir::literal(hir::Literal::Byte(b))
|
||||
}
|
||||
AnyChar => AnyCharNoNL,
|
||||
AnyByte => AnyByteNoNL,
|
||||
Class(mut cls) => {
|
||||
cls.remove(chr);
|
||||
Class(cls)
|
||||
HirKind::Class(hir::Class::Unicode(mut cls)) => {
|
||||
let remove = hir::ClassUnicode::new(Some(
|
||||
hir::ClassUnicodeRange::new(chr, chr),
|
||||
));
|
||||
cls.difference(&remove);
|
||||
if cls.iter().next().is_none() {
|
||||
return Err(Error::LiteralNotAllowed(chr));
|
||||
}
|
||||
ClassBytes(mut cls) => {
|
||||
cls.remove(byte);
|
||||
ClassBytes(cls)
|
||||
Hir::class(hir::Class::Unicode(cls))
|
||||
}
|
||||
Group { e, i, name } => {
|
||||
Group {
|
||||
e: Box::new(try!(remove(*e, byte))),
|
||||
i: i,
|
||||
name: name,
|
||||
HirKind::Class(hir::Class::Bytes(mut cls)) => {
|
||||
let remove = hir::ClassBytes::new(Some(
|
||||
hir::ClassBytesRange::new(byte, byte),
|
||||
));
|
||||
cls.difference(&remove);
|
||||
if cls.iter().next().is_none() {
|
||||
return Err(Error::LiteralNotAllowed(chr));
|
||||
}
|
||||
Hir::class(hir::Class::Bytes(cls))
|
||||
}
|
||||
Repeat { e, r, greedy } => {
|
||||
Repeat {
|
||||
e: Box::new(try!(remove(*e, byte))),
|
||||
r: r,
|
||||
greedy: greedy,
|
||||
HirKind::Anchor(x) => Hir::anchor(x),
|
||||
HirKind::WordBoundary(x) => Hir::word_boundary(x),
|
||||
HirKind::Repetition(mut x) => {
|
||||
x.hir = Box::new(remove(*x.hir, byte)?);
|
||||
Hir::repetition(x)
|
||||
}
|
||||
HirKind::Group(mut x) => {
|
||||
x.hir = Box::new(remove(*x.hir, byte)?);
|
||||
Hir::group(x)
|
||||
}
|
||||
Concat(exprs) => {
|
||||
Concat(try!(
|
||||
exprs.into_iter().map(|e| remove(e, byte)).collect()))
|
||||
HirKind::Concat(xs) => {
|
||||
let xs = xs.into_iter()
|
||||
.map(|e| remove(e, byte))
|
||||
.collect::<Result<Vec<Hir>>>()?;
|
||||
Hir::concat(xs)
|
||||
}
|
||||
Alternate(exprs) => {
|
||||
Alternate(try!(
|
||||
exprs.into_iter().map(|e| remove(e, byte)).collect()))
|
||||
HirKind::Alternation(xs) => {
|
||||
let xs = xs.into_iter()
|
||||
.map(|e| remove(e, byte))
|
||||
.collect::<Result<Vec<Hir>>>()?;
|
||||
Hir::alternation(xs)
|
||||
}
|
||||
e => e,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
use memchr::{memchr, memrchr};
|
||||
use syntax::ParserBuilder;
|
||||
use syntax::hir::Hir;
|
||||
use regex::bytes::{Regex, RegexBuilder};
|
||||
use syntax;
|
||||
|
||||
use literals::LiteralSets;
|
||||
use nonl;
|
||||
use syntax::Expr;
|
||||
use smart_case::Cased;
|
||||
use word_boundary::strip_unicode_word_boundaries;
|
||||
use Result;
|
||||
|
||||
@@ -102,9 +103,9 @@ impl GrepBuilder {
|
||||
|
||||
/// Whether to enable smart case search or not (disabled by default).
|
||||
///
|
||||
/// Smart case uses case insensitive search if the regex is contains all
|
||||
/// lowercase literal characters. Otherwise, a case sensitive search is
|
||||
/// used instead.
|
||||
/// Smart case uses case insensitive search if the pattern contains only
|
||||
/// lowercase characters (ignoring any characters which immediately follow
|
||||
/// a '\'). Otherwise, a case sensitive search is used instead.
|
||||
///
|
||||
/// Enabling the case_insensitive flag overrides this.
|
||||
pub fn case_smart(mut self, yes: bool) -> GrepBuilder {
|
||||
@@ -141,11 +142,11 @@ impl GrepBuilder {
|
||||
/// If there was a problem parsing or compiling the regex with the given
|
||||
/// options, then an error is returned.
|
||||
pub fn build(self) -> Result<Grep> {
|
||||
let expr = try!(self.parse());
|
||||
let expr = self.parse()?;
|
||||
let literals = LiteralSets::create(&expr);
|
||||
let re = try!(self.regex(&expr));
|
||||
let re = self.regex(&expr)?;
|
||||
let required = match literals.to_regex_builder() {
|
||||
Some(builder) => Some(try!(self.regex_build(builder))),
|
||||
Some(builder) => Some(self.regex_build(builder)?),
|
||||
None => {
|
||||
match strip_unicode_word_boundaries(&expr) {
|
||||
None => None,
|
||||
@@ -166,7 +167,7 @@ impl GrepBuilder {
|
||||
|
||||
/// Creates a new regex from the given expression with the current
|
||||
/// configuration.
|
||||
fn regex(&self, expr: &Expr) -> Result<Regex> {
|
||||
fn regex(&self, expr: &Hir) -> Result<Regex> {
|
||||
let mut builder = RegexBuilder::new(&expr.to_string());
|
||||
builder.unicode(true);
|
||||
self.regex_build(builder)
|
||||
@@ -184,21 +185,20 @@ impl GrepBuilder {
|
||||
|
||||
/// Parses the underlying pattern and ensures the pattern can never match
|
||||
/// the line terminator.
|
||||
fn parse(&self) -> Result<syntax::Expr> {
|
||||
let expr =
|
||||
try!(syntax::ExprBuilder::new()
|
||||
.allow_bytes(true)
|
||||
.unicode(true)
|
||||
.case_insensitive(try!(self.is_case_insensitive()))
|
||||
.parse(&self.pattern));
|
||||
let expr = try!(nonl::remove(expr, self.opts.line_terminator));
|
||||
debug!("regex ast:\n{:#?}", expr);
|
||||
fn parse(&self) -> Result<Hir> {
|
||||
let expr = ParserBuilder::new()
|
||||
.allow_invalid_utf8(true)
|
||||
.case_insensitive(self.is_case_insensitive()?)
|
||||
.multi_line(true)
|
||||
.build()
|
||||
.parse(&self.pattern)?;
|
||||
debug!("original regex HIR pattern:\n{}", expr);
|
||||
let expr = nonl::remove(expr, self.opts.line_terminator)?;
|
||||
debug!("transformed regex HIR pattern:\n{}", expr);
|
||||
Ok(expr)
|
||||
}
|
||||
|
||||
/// Determines whether the case insensitive flag should be enabled or not.
|
||||
///
|
||||
/// An error is returned if the regex could not be parsed.
|
||||
fn is_case_insensitive(&self) -> Result<bool> {
|
||||
if self.opts.case_insensitive {
|
||||
return Ok(true);
|
||||
@@ -206,12 +206,11 @@ impl GrepBuilder {
|
||||
if !self.opts.case_smart {
|
||||
return Ok(false);
|
||||
}
|
||||
let expr =
|
||||
try!(syntax::ExprBuilder::new()
|
||||
.allow_bytes(true)
|
||||
.unicode(true)
|
||||
.parse(&self.pattern));
|
||||
Ok(!has_uppercase_literal(&expr))
|
||||
let cased = match Cased::from_pattern(&self.pattern) {
|
||||
None => return Ok(false),
|
||||
Some(cased) => cased,
|
||||
};
|
||||
Ok(cased.any_literal && !cased.any_uppercase)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -317,44 +316,8 @@ impl<'b, 's> Iterator for Iter<'b, 's> {
|
||||
}
|
||||
}
|
||||
|
||||
fn has_uppercase_literal(expr: &Expr) -> bool {
|
||||
use syntax::Expr::*;
|
||||
fn byte_is_upper(b: u8) -> bool { b'A' <= b && b <= b'Z' }
|
||||
match *expr {
|
||||
Literal { ref chars, casei } => {
|
||||
casei || chars.iter().any(|c| c.is_uppercase())
|
||||
}
|
||||
LiteralBytes { ref bytes, casei } => {
|
||||
casei || bytes.iter().any(|&b| byte_is_upper(b))
|
||||
}
|
||||
Class(ref ranges) => {
|
||||
for r in ranges {
|
||||
if r.start.is_uppercase() || r.end.is_uppercase() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
ClassBytes(ref ranges) => {
|
||||
for r in ranges {
|
||||
if byte_is_upper(r.start) || byte_is_upper(r.end) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
Group { ref e, .. } => has_uppercase_literal(e),
|
||||
Repeat { ref e, .. } => has_uppercase_literal(e),
|
||||
Concat(ref es) => es.iter().any(has_uppercase_literal),
|
||||
Alternate(ref es) => es.iter().any(has_uppercase_literal),
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#![allow(unused_imports)]
|
||||
|
||||
use memchr::{memchr, memrchr};
|
||||
use regex::bytes::Regex;
|
||||
|
||||
@@ -362,11 +325,6 @@ mod tests {
|
||||
|
||||
static SHERLOCK: &'static [u8] = include_bytes!("./data/sherlock.txt");
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn s(bytes: &[u8]) -> String {
|
||||
String::from_utf8(bytes.to_vec()).unwrap()
|
||||
}
|
||||
|
||||
fn find_lines(pat: &str, haystack: &[u8]) -> Vec<Match> {
|
||||
let re = Regex::new(pat).unwrap();
|
||||
let mut lines = vec![];
|
||||
|
||||
191
grep/src/smart_case.rs
Normal file
191
grep/src/smart_case.rs
Normal file
@@ -0,0 +1,191 @@
|
||||
use syntax::ast::{self, Ast};
|
||||
use syntax::ast::parse::Parser;
|
||||
|
||||
/// The results of analyzing a regex for cased literals.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct Cased {
|
||||
/// True if and only if a literal uppercase character occurs in the regex.
|
||||
///
|
||||
/// A regex like `\pL` contains no uppercase literals, even though `L`
|
||||
/// is uppercase and the `\pL` class contains uppercase characters.
|
||||
pub any_uppercase: bool,
|
||||
/// True if and only if the regex contains any literal at all. A regex like
|
||||
/// `\pL` has this set to false.
|
||||
pub any_literal: bool,
|
||||
}
|
||||
|
||||
impl Cased {
|
||||
/// Returns a `Cased` value by doing analysis on the AST of `pattern`.
|
||||
///
|
||||
/// If `pattern` is not a valid regular expression, then `None` is
|
||||
/// returned.
|
||||
pub fn from_pattern(pattern: &str) -> Option<Cased> {
|
||||
Parser::new()
|
||||
.parse(pattern)
|
||||
.map(|ast| Cased::from_ast(&ast))
|
||||
.ok()
|
||||
}
|
||||
|
||||
fn from_ast(ast: &Ast) -> Cased {
|
||||
let mut cased = Cased::default();
|
||||
cased.from_ast_impl(ast);
|
||||
cased
|
||||
}
|
||||
|
||||
fn from_ast_impl(&mut self, ast: &Ast) {
|
||||
if self.done() {
|
||||
return;
|
||||
}
|
||||
match *ast {
|
||||
Ast::Empty(_)
|
||||
| Ast::Flags(_)
|
||||
| Ast::Dot(_)
|
||||
| Ast::Assertion(_)
|
||||
| Ast::Class(ast::Class::Unicode(_))
|
||||
| Ast::Class(ast::Class::Perl(_)) => {}
|
||||
Ast::Literal(ref x) => {
|
||||
self.from_ast_literal(x);
|
||||
}
|
||||
Ast::Class(ast::Class::Bracketed(ref x)) => {
|
||||
self.from_ast_class_set(&x.kind);
|
||||
}
|
||||
Ast::Repetition(ref x) => {
|
||||
self.from_ast_impl(&x.ast);
|
||||
}
|
||||
Ast::Group(ref x) => {
|
||||
self.from_ast_impl(&x.ast);
|
||||
}
|
||||
Ast::Alternation(ref alt) => {
|
||||
for x in &alt.asts {
|
||||
self.from_ast_impl(x);
|
||||
}
|
||||
}
|
||||
Ast::Concat(ref alt) => {
|
||||
for x in &alt.asts {
|
||||
self.from_ast_impl(x);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn from_ast_class_set(&mut self, ast: &ast::ClassSet) {
|
||||
if self.done() {
|
||||
return;
|
||||
}
|
||||
match *ast {
|
||||
ast::ClassSet::Item(ref item) => {
|
||||
self.from_ast_class_set_item(item);
|
||||
}
|
||||
ast::ClassSet::BinaryOp(ref x) => {
|
||||
self.from_ast_class_set(&x.lhs);
|
||||
self.from_ast_class_set(&x.rhs);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn from_ast_class_set_item(&mut self, ast: &ast::ClassSetItem) {
|
||||
if self.done() {
|
||||
return;
|
||||
}
|
||||
match *ast {
|
||||
ast::ClassSetItem::Empty(_)
|
||||
| ast::ClassSetItem::Ascii(_)
|
||||
| ast::ClassSetItem::Unicode(_)
|
||||
| ast::ClassSetItem::Perl(_) => {}
|
||||
ast::ClassSetItem::Literal(ref x) => {
|
||||
self.from_ast_literal(x);
|
||||
}
|
||||
ast::ClassSetItem::Range(ref x) => {
|
||||
self.from_ast_literal(&x.start);
|
||||
self.from_ast_literal(&x.end);
|
||||
}
|
||||
ast::ClassSetItem::Bracketed(ref x) => {
|
||||
self.from_ast_class_set(&x.kind);
|
||||
}
|
||||
ast::ClassSetItem::Union(ref union) => {
|
||||
for x in &union.items {
|
||||
self.from_ast_class_set_item(x);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn from_ast_literal(&mut self, ast: &ast::Literal) {
|
||||
self.any_literal = true;
|
||||
self.any_uppercase = self.any_uppercase || ast.c.is_uppercase();
|
||||
}
|
||||
|
||||
/// Returns true if and only if the attributes can never change no matter
|
||||
/// what other AST it might see.
|
||||
fn done(&self) -> bool {
|
||||
self.any_uppercase && self.any_literal
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn cased(pattern: &str) -> Cased {
|
||||
Cased::from_pattern(pattern).unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn various() {
|
||||
let x = cased("");
|
||||
assert!(!x.any_uppercase);
|
||||
assert!(!x.any_literal);
|
||||
|
||||
let x = cased("foo");
|
||||
assert!(!x.any_uppercase);
|
||||
assert!(x.any_literal);
|
||||
|
||||
let x = cased("Foo");
|
||||
assert!(x.any_uppercase);
|
||||
assert!(x.any_literal);
|
||||
|
||||
let x = cased("foO");
|
||||
assert!(x.any_uppercase);
|
||||
assert!(x.any_literal);
|
||||
|
||||
let x = cased(r"foo\\");
|
||||
assert!(!x.any_uppercase);
|
||||
assert!(x.any_literal);
|
||||
|
||||
let x = cased(r"foo\w");
|
||||
assert!(!x.any_uppercase);
|
||||
assert!(x.any_literal);
|
||||
|
||||
let x = cased(r"foo\S");
|
||||
assert!(!x.any_uppercase);
|
||||
assert!(x.any_literal);
|
||||
|
||||
let x = cased(r"foo\p{Ll}");
|
||||
assert!(!x.any_uppercase);
|
||||
assert!(x.any_literal);
|
||||
|
||||
let x = cased(r"foo[a-z]");
|
||||
assert!(!x.any_uppercase);
|
||||
assert!(x.any_literal);
|
||||
|
||||
let x = cased(r"foo[A-Z]");
|
||||
assert!(x.any_uppercase);
|
||||
assert!(x.any_literal);
|
||||
|
||||
let x = cased(r"foo[\S\t]");
|
||||
assert!(!x.any_uppercase);
|
||||
assert!(x.any_literal);
|
||||
|
||||
let x = cased(r"foo\\S");
|
||||
assert!(x.any_uppercase);
|
||||
assert!(x.any_literal);
|
||||
|
||||
let x = cased(r"\p{Ll}");
|
||||
assert!(!x.any_uppercase);
|
||||
assert!(!x.any_literal);
|
||||
|
||||
let x = cased(r"aBc\w");
|
||||
assert!(x.any_uppercase);
|
||||
assert!(x.any_literal);
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
use syntax::Expr;
|
||||
use syntax::hir::{self, Hir, HirKind};
|
||||
|
||||
/// Strips Unicode word boundaries from the given expression.
|
||||
///
|
||||
@@ -8,7 +8,7 @@ use syntax::Expr;
|
||||
/// false negatives.
|
||||
///
|
||||
/// If no word boundaries could be stripped, then None is returned.
|
||||
pub fn strip_unicode_word_boundaries(expr: &Expr) -> Option<Expr> {
|
||||
pub fn strip_unicode_word_boundaries(expr: &Hir) -> Option<Hir> {
|
||||
// The real reason we do this is because Unicode word boundaries are the
|
||||
// one thing that Rust's regex DFA engine can't handle. When it sees a
|
||||
// Unicode word boundary among non-ASCII text, it falls back to one of the
|
||||
@@ -16,23 +16,24 @@ pub fn strip_unicode_word_boundaries(expr: &Expr) -> Option<Expr> {
|
||||
// a regex to find candidate matches without a Unicode word boundary. We'll
|
||||
// only then use the full (and slower) regex to confirm a candidate as a
|
||||
// match or not during search.
|
||||
use syntax::Expr::*;
|
||||
|
||||
match *expr {
|
||||
Concat(ref es) if !es.is_empty() => {
|
||||
//
|
||||
// It looks like we only check the outer edges for `\b`? I guess this is
|
||||
// an attempt to optimize for the `-w/--word-regexp` flag? ---AG
|
||||
match *expr.kind() {
|
||||
HirKind::Concat(ref es) if !es.is_empty() => {
|
||||
let first = is_unicode_word_boundary(&es[0]);
|
||||
let last = is_unicode_word_boundary(es.last().unwrap());
|
||||
// Be careful not to strip word boundaries if there are no other
|
||||
// expressions to match.
|
||||
match (first, last) {
|
||||
(true, false) if es.len() > 1 => {
|
||||
Some(Concat(es[1..].to_vec()))
|
||||
Some(Hir::concat(es[1..].to_vec()))
|
||||
}
|
||||
(false, true) if es.len() > 1 => {
|
||||
Some(Concat(es[..es.len() - 1].to_vec()))
|
||||
Some(Hir::concat(es[..es.len() - 1].to_vec()))
|
||||
}
|
||||
(true, true) if es.len() > 2 => {
|
||||
Some(Concat(es[1..es.len() - 1].to_vec()))
|
||||
Some(Hir::concat(es[1..es.len() - 1].to_vec()))
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
@@ -42,13 +43,11 @@ pub fn strip_unicode_word_boundaries(expr: &Expr) -> Option<Expr> {
|
||||
}
|
||||
|
||||
/// Returns true if the given expression is a Unicode word boundary.
|
||||
fn is_unicode_word_boundary(expr: &Expr) -> bool {
|
||||
use syntax::Expr::*;
|
||||
|
||||
match *expr {
|
||||
WordBoundary => true,
|
||||
NotWordBoundary => true,
|
||||
Group { ref e, .. } => is_unicode_word_boundary(e),
|
||||
fn is_unicode_word_boundary(expr: &Hir) -> bool {
|
||||
match *expr.kind() {
|
||||
HirKind::WordBoundary(hir::WordBoundary::Unicode) => true,
|
||||
HirKind::WordBoundary(hir::WordBoundary::UnicodeNegate) => true,
|
||||
HirKind::Group(ref x) => is_unicode_word_boundary(&x.hir),
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "ignore"
|
||||
version = "0.1.8" #:version
|
||||
version = "0.4.1" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
A fast library for efficiently matching ignore files such as `.gitignore`
|
||||
@@ -18,20 +18,22 @@ name = "ignore"
|
||||
bench = false
|
||||
|
||||
[dependencies]
|
||||
crossbeam = "0.2"
|
||||
globset = { version = "0.1.4", path = "../globset" }
|
||||
lazy_static = "0.2"
|
||||
log = "0.3"
|
||||
memchr = "1"
|
||||
regex = "0.2.1"
|
||||
crossbeam = "0.3"
|
||||
globset = { version = "0.4.0", path = "../globset" }
|
||||
lazy_static = "1"
|
||||
log = "0.4"
|
||||
memchr = "2"
|
||||
regex = "0.2.9"
|
||||
same-file = "1"
|
||||
thread_local = "0.3.2"
|
||||
walkdir = "1.0.7"
|
||||
walkdir = "2"
|
||||
|
||||
[target.'cfg(windows)'.dependencies.winapi]
|
||||
version = "0.3"
|
||||
features = ["std", "winnt"]
|
||||
|
||||
[dev-dependencies]
|
||||
tempdir = "0.3.5"
|
||||
|
||||
[features]
|
||||
simd-accel = ["globset/simd-accel"]
|
||||
|
||||
[profile.release]
|
||||
debug = true
|
||||
|
||||
@@ -20,7 +20,7 @@ Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
ignore = "0.1"
|
||||
ignore = "0.3"
|
||||
```
|
||||
|
||||
and this to your crate root:
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
// well.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::ffi::OsString;
|
||||
use std::ffi::{OsString, OsStr};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
@@ -73,13 +73,6 @@ struct IgnoreOptions {
|
||||
git_exclude: bool,
|
||||
}
|
||||
|
||||
impl IgnoreOptions {
|
||||
/// Returns true if at least one type of ignore rules should be matched.
|
||||
fn has_any_ignore_options(&self) -> bool {
|
||||
self.ignore || self.git_global || self.git_ignore || self.git_exclude
|
||||
}
|
||||
}
|
||||
|
||||
/// Ignore is a matcher useful for recursively walking one or more directories.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Ignore(Arc<IgnoreInner>);
|
||||
@@ -109,8 +102,12 @@ struct IgnoreInner {
|
||||
/// The absolute base path of this matcher. Populated only if parent
|
||||
/// directories are added.
|
||||
absolute_base: Option<Arc<PathBuf>>,
|
||||
/// Explicit ignore matchers specified by the caller.
|
||||
/// Explicit global ignore matchers specified by the caller.
|
||||
explicit_ignores: Arc<Vec<Gitignore>>,
|
||||
/// Ignore files used in addition to `.ignore`
|
||||
custom_ignore_filenames: Arc<Vec<OsString>>,
|
||||
/// The matcher for custom ignore files
|
||||
custom_ignore_matcher: Gitignore,
|
||||
/// The matcher for .ignore files.
|
||||
ignore_matcher: Gitignore,
|
||||
/// A global gitignore matcher, usually from $XDG_CONFIG_HOME/git/ignore.
|
||||
@@ -127,7 +124,6 @@ struct IgnoreInner {
|
||||
|
||||
impl Ignore {
|
||||
/// Return the directory path of this matcher.
|
||||
#[allow(dead_code)]
|
||||
pub fn path(&self) -> &Path {
|
||||
&self.0.dir
|
||||
}
|
||||
@@ -211,14 +207,19 @@ impl Ignore {
|
||||
|
||||
/// Like add_child, but takes a full path and returns an IgnoreInner.
|
||||
fn add_child_path(&self, dir: &Path) -> (IgnoreInner, Option<Error>) {
|
||||
static IG_NAMES: &'static [&'static str] = &[".rgignore", ".ignore"];
|
||||
|
||||
let mut errs = PartialErrorBuilder::default();
|
||||
let custom_ig_matcher =
|
||||
{
|
||||
let (m, err) =
|
||||
create_gitignore(&dir, &self.0.custom_ignore_filenames);
|
||||
errs.maybe_push(err);
|
||||
m
|
||||
};
|
||||
let ig_matcher =
|
||||
if !self.0.opts.ignore {
|
||||
Gitignore::empty()
|
||||
} else {
|
||||
let (m, err) = create_gitignore(&dir, IG_NAMES);
|
||||
let (m, err) = create_gitignore(&dir, &[".ignore"]);
|
||||
errs.maybe_push(err);
|
||||
m
|
||||
};
|
||||
@@ -247,6 +248,8 @@ impl Ignore {
|
||||
is_absolute_parent: false,
|
||||
absolute_base: self.0.absolute_base.clone(),
|
||||
explicit_ignores: self.0.explicit_ignores.clone(),
|
||||
custom_ignore_filenames: self.0.custom_ignore_filenames.clone(),
|
||||
custom_ignore_matcher: custom_ig_matcher,
|
||||
ignore_matcher: ig_matcher,
|
||||
git_global_matcher: self.0.git_global_matcher.clone(),
|
||||
git_ignore_matcher: gi_matcher,
|
||||
@@ -257,6 +260,15 @@ impl Ignore {
|
||||
(ig, errs.into_error_option())
|
||||
}
|
||||
|
||||
/// Returns true if at least one type of ignore rule should be matched.
|
||||
fn has_any_ignore_rules(&self) -> bool {
|
||||
let opts = self.0.opts;
|
||||
let has_custom_ignore_files = !self.0.custom_ignore_filenames.is_empty();
|
||||
|
||||
opts.ignore || opts.git_global || opts.git_ignore
|
||||
|| opts.git_exclude || has_custom_ignore_files
|
||||
}
|
||||
|
||||
/// Returns a match indicating whether the given file path should be
|
||||
/// ignored or not.
|
||||
///
|
||||
@@ -285,7 +297,7 @@ impl Ignore {
|
||||
}
|
||||
}
|
||||
let mut whitelisted = Match::None;
|
||||
if self.0.opts.has_any_ignore_options() {
|
||||
if self.has_any_ignore_rules() {
|
||||
let mat = self.matched_ignore(path, is_dir);
|
||||
if mat.is_ignore() {
|
||||
return mat;
|
||||
@@ -315,10 +327,15 @@ impl Ignore {
|
||||
path: &Path,
|
||||
is_dir: bool,
|
||||
) -> Match<IgnoreMatch<'a>> {
|
||||
let (mut m_ignore, mut m_gi, mut m_gi_exclude, mut m_explicit) =
|
||||
(Match::None, Match::None, Match::None, Match::None);
|
||||
let (mut m_custom_ignore, mut m_ignore, mut m_gi, mut m_gi_exclude, mut m_explicit) =
|
||||
(Match::None, Match::None, Match::None, Match::None, Match::None);
|
||||
let mut saw_git = false;
|
||||
for ig in self.parents().take_while(|ig| !ig.0.is_absolute_parent) {
|
||||
if m_custom_ignore.is_none() {
|
||||
m_custom_ignore =
|
||||
ig.0.custom_ignore_matcher.matched(path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
if m_ignore.is_none() {
|
||||
m_ignore =
|
||||
ig.0.ignore_matcher.matched(path, is_dir)
|
||||
@@ -339,6 +356,11 @@ impl Ignore {
|
||||
if let Some(abs_parent_path) = self.absolute_base() {
|
||||
let path = abs_parent_path.join(path);
|
||||
for ig in self.parents().skip_while(|ig|!ig.0.is_absolute_parent) {
|
||||
if m_custom_ignore.is_none() {
|
||||
m_custom_ignore =
|
||||
ig.0.custom_ignore_matcher.matched(&path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
if m_ignore.is_none() {
|
||||
m_ignore =
|
||||
ig.0.ignore_matcher.matched(&path, is_dir)
|
||||
@@ -366,7 +388,7 @@ impl Ignore {
|
||||
let m_global = self.0.git_global_matcher.matched(&path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
|
||||
m_ignore.or(m_gi).or(m_gi_exclude).or(m_global).or(m_explicit)
|
||||
m_custom_ignore.or(m_ignore).or(m_gi).or(m_gi_exclude).or(m_global).or(m_explicit)
|
||||
}
|
||||
|
||||
/// Returns an iterator over parent ignore matchers, including this one.
|
||||
@@ -409,8 +431,10 @@ pub struct IgnoreBuilder {
|
||||
overrides: Arc<Override>,
|
||||
/// A type matcher (default is empty).
|
||||
types: Arc<Types>,
|
||||
/// Explicit ignore matchers.
|
||||
/// Explicit global ignore matchers.
|
||||
explicit_ignores: Vec<Gitignore>,
|
||||
/// Ignore files in addition to .ignore.
|
||||
custom_ignore_filenames: Vec<OsString>,
|
||||
/// Ignore config.
|
||||
opts: IgnoreOptions,
|
||||
}
|
||||
@@ -426,6 +450,7 @@ impl IgnoreBuilder {
|
||||
overrides: Arc::new(Override::empty()),
|
||||
types: Arc::new(Types::empty()),
|
||||
explicit_ignores: vec![],
|
||||
custom_ignore_filenames: vec![],
|
||||
opts: IgnoreOptions {
|
||||
hidden: true,
|
||||
ignore: true,
|
||||
@@ -451,6 +476,7 @@ impl IgnoreBuilder {
|
||||
}
|
||||
gi
|
||||
};
|
||||
|
||||
Ignore(Arc::new(IgnoreInner {
|
||||
compiled: Arc::new(RwLock::new(HashMap::new())),
|
||||
dir: self.dir.clone(),
|
||||
@@ -460,6 +486,8 @@ impl IgnoreBuilder {
|
||||
is_absolute_parent: true,
|
||||
absolute_base: None,
|
||||
explicit_ignores: Arc::new(self.explicit_ignores.clone()),
|
||||
custom_ignore_filenames: Arc::new(self.custom_ignore_filenames.clone()),
|
||||
custom_ignore_matcher: Gitignore::empty(),
|
||||
ignore_matcher: Gitignore::empty(),
|
||||
git_global_matcher: Arc::new(git_global_matcher),
|
||||
git_ignore_matcher: Gitignore::empty(),
|
||||
@@ -495,6 +523,20 @@ impl IgnoreBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Add a custom ignore file name
|
||||
///
|
||||
/// These ignore files have higher precedence than all other ignore files.
|
||||
///
|
||||
/// When specifying multiple names, earlier names have lower precedence than
|
||||
/// later names.
|
||||
pub fn add_custom_ignore_filename<S: AsRef<OsStr>>(
|
||||
&mut self,
|
||||
file_name: S
|
||||
) -> &mut IgnoreBuilder {
|
||||
self.custom_ignore_filenames.push(file_name.as_ref().to_os_string());
|
||||
self
|
||||
}
|
||||
|
||||
/// Enables ignoring hidden files.
|
||||
///
|
||||
/// This is enabled by default.
|
||||
@@ -556,14 +598,14 @@ impl IgnoreBuilder {
|
||||
/// order given (earlier names have lower precedence than later names).
|
||||
///
|
||||
/// I/O errors are ignored.
|
||||
pub fn create_gitignore(
|
||||
pub fn create_gitignore<T: AsRef<OsStr>>(
|
||||
dir: &Path,
|
||||
names: &[&str],
|
||||
names: &[T],
|
||||
) -> (Gitignore, Option<Error>) {
|
||||
let mut builder = GitignoreBuilder::new(dir);
|
||||
let mut errs = PartialErrorBuilder::default();
|
||||
for name in names {
|
||||
let gipath = dir.join(name);
|
||||
let gipath = dir.join(name.as_ref());
|
||||
errs.maybe_push_ignore_io(builder.add(gipath));
|
||||
}
|
||||
let gi = match builder.build() {
|
||||
@@ -656,6 +698,53 @@ mod tests {
|
||||
assert!(ig.matched("baz", false).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn custom_ignore() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let custom_ignore = ".customignore";
|
||||
wfile(td.path().join(custom_ignore), "foo\n!bar");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new()
|
||||
.add_custom_ignore_filename(custom_ignore)
|
||||
.build().add_child(td.path());
|
||||
assert!(err.is_none());
|
||||
assert!(ig.matched("foo", false).is_ignore());
|
||||
assert!(ig.matched("bar", false).is_whitelist());
|
||||
assert!(ig.matched("baz", false).is_none());
|
||||
}
|
||||
|
||||
// Tests that a custom ignore file will override an .ignore.
|
||||
#[test]
|
||||
fn custom_ignore_over_ignore() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let custom_ignore = ".customignore";
|
||||
wfile(td.path().join(".ignore"), "foo");
|
||||
wfile(td.path().join(custom_ignore), "!foo");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new()
|
||||
.add_custom_ignore_filename(custom_ignore)
|
||||
.build().add_child(td.path());
|
||||
assert!(err.is_none());
|
||||
assert!(ig.matched("foo", false).is_whitelist());
|
||||
}
|
||||
|
||||
// Tests that earlier custom ignore files have lower precedence than later.
|
||||
#[test]
|
||||
fn custom_ignore_precedence() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let custom_ignore1 = ".customignore1";
|
||||
let custom_ignore2 = ".customignore2";
|
||||
wfile(td.path().join(custom_ignore1), "foo");
|
||||
wfile(td.path().join(custom_ignore2), "!foo");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new()
|
||||
.add_custom_ignore_filename(custom_ignore1)
|
||||
.add_custom_ignore_filename(custom_ignore2)
|
||||
.build().add_child(td.path());
|
||||
assert!(err.is_none());
|
||||
assert!(ig.matched("foo", false).is_whitelist());
|
||||
}
|
||||
|
||||
// Tests that an .ignore will override a .gitignore.
|
||||
#[test]
|
||||
fn ignore_over_gitignore() {
|
||||
|
||||
@@ -66,6 +66,12 @@ impl Glob {
|
||||
pub fn is_only_dir(&self) -> bool {
|
||||
self.is_only_dir
|
||||
}
|
||||
|
||||
/// Returns true if and only if this glob has a `**/` prefix.
|
||||
fn has_doublestar_prefix(&self) -> bool {
|
||||
self.actual.starts_with("**/")
|
||||
|| (self.actual == "**" && self.is_only_dir)
|
||||
}
|
||||
}
|
||||
|
||||
/// Gitignore is a matcher for the globs in one or more gitignore files
|
||||
@@ -169,8 +175,8 @@ impl Gitignore {
|
||||
self.num_whitelists
|
||||
}
|
||||
|
||||
/// Returns whether the given file path matched a pattern in this gitignore
|
||||
/// matcher.
|
||||
/// Returns whether the given path (file or directory) matched a pattern in
|
||||
/// this gitignore matcher.
|
||||
///
|
||||
/// `is_dir` should be true if the path refers to a directory and false
|
||||
/// otherwise.
|
||||
@@ -191,6 +197,48 @@ impl Gitignore {
|
||||
self.matched_stripped(self.strip(path.as_ref()), is_dir)
|
||||
}
|
||||
|
||||
/// Returns whether the given path (file or directory, and expected to be
|
||||
/// under the root) or any of its parent directories (up to the root)
|
||||
/// matched a pattern in this gitignore matcher.
|
||||
///
|
||||
/// NOTE: This method is more expensive than walking the directory hierarchy
|
||||
/// top-to-bottom and matching the entries. But, is easier to use in cases
|
||||
/// when a list of paths are available without a hierarchy.
|
||||
///
|
||||
/// `is_dir` should be true if the path refers to a directory and false
|
||||
/// otherwise.
|
||||
///
|
||||
/// The given path is matched relative to the path given when building
|
||||
/// the matcher. Specifically, before matching `path`, its prefix (as
|
||||
/// determined by a common suffix of the directory containing this
|
||||
/// gitignore) is stripped. If there is no common suffix/prefix overlap,
|
||||
/// then `path` is assumed to be relative to this matcher.
|
||||
pub fn matched_path_or_any_parents<P: AsRef<Path>>(
|
||||
&self,
|
||||
path: P,
|
||||
is_dir: bool,
|
||||
) -> Match<&Glob> {
|
||||
if self.is_empty() {
|
||||
return Match::None;
|
||||
}
|
||||
let mut path = self.strip(path.as_ref());
|
||||
debug_assert!(
|
||||
!path.has_root(),
|
||||
"path is expect to be under the root"
|
||||
);
|
||||
match self.matched_stripped(path, is_dir) {
|
||||
Match::None => (), // walk up
|
||||
a_match => return a_match,
|
||||
}
|
||||
while let Some(parent) = path.parent() {
|
||||
match self.matched_stripped(parent, /* is_dir */ true) {
|
||||
Match::None => path = parent, // walk up
|
||||
a_match => return a_match,
|
||||
}
|
||||
}
|
||||
Match::None
|
||||
}
|
||||
|
||||
/// Like matched, but takes a path that has already been stripped.
|
||||
fn matched_stripped<P: AsRef<Path>>(
|
||||
&self,
|
||||
@@ -236,7 +284,10 @@ impl Gitignore {
|
||||
// BUT, a file name might not have any directory components to it,
|
||||
// in which case, we don't want to accidentally strip any part of the
|
||||
// file name.
|
||||
if !is_file_name(path) {
|
||||
//
|
||||
// As an additional special case, if the root is just `.`, then we
|
||||
// shouldn't try to strip anything, e.g., when path begins with a `.`.
|
||||
if self.root != Path::new(".") && !is_file_name(path) {
|
||||
if let Some(p) = strip_prefix(&self.root, path) {
|
||||
path = p;
|
||||
// If we're left with a leading slash, get rid of it.
|
||||
@@ -250,10 +301,12 @@ impl Gitignore {
|
||||
}
|
||||
|
||||
/// Builds a matcher for a single set of globs from a .gitignore file.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct GitignoreBuilder {
|
||||
builder: GlobSetBuilder,
|
||||
root: PathBuf,
|
||||
globs: Vec<Glob>,
|
||||
case_insensitive: bool,
|
||||
}
|
||||
|
||||
impl GitignoreBuilder {
|
||||
@@ -269,6 +322,7 @@ impl GitignoreBuilder {
|
||||
builder: GlobSetBuilder::new(),
|
||||
root: strip_prefix("./", root).unwrap_or(root).to_path_buf(),
|
||||
globs: vec![],
|
||||
case_insensitive: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -278,8 +332,13 @@ impl GitignoreBuilder {
|
||||
pub fn build(&self) -> Result<Gitignore, Error> {
|
||||
let nignore = self.globs.iter().filter(|g| !g.is_whitelist()).count();
|
||||
let nwhite = self.globs.iter().filter(|g| g.is_whitelist()).count();
|
||||
let set = try!(
|
||||
self.builder.build().map_err(|err| Error::Glob(err.to_string())));
|
||||
let set =
|
||||
self.builder.build().map_err(|err| {
|
||||
Error::Glob {
|
||||
glob: None,
|
||||
err: err.to_string(),
|
||||
}
|
||||
})?;
|
||||
Ok(Gitignore {
|
||||
set: set,
|
||||
root: self.root.clone(),
|
||||
@@ -334,7 +393,7 @@ impl GitignoreBuilder {
|
||||
gitignore: &str,
|
||||
) -> Result<&mut GitignoreBuilder, Error> {
|
||||
for line in gitignore.lines() {
|
||||
try!(self.add_line(from.clone(), line));
|
||||
self.add_line(from.clone(), line)?;
|
||||
}
|
||||
Ok(self)
|
||||
}
|
||||
@@ -367,7 +426,6 @@ impl GitignoreBuilder {
|
||||
is_only_dir: false,
|
||||
};
|
||||
let mut literal_separator = false;
|
||||
let has_slash = line.chars().any(|c| c == '/');
|
||||
let mut is_absolute = false;
|
||||
if line.starts_with("\\!") || line.starts_with("\\#") {
|
||||
line = &line[1..];
|
||||
@@ -398,15 +456,15 @@ impl GitignoreBuilder {
|
||||
// If there is a literal slash, then we note that so that globbing
|
||||
// doesn't let wildcards match slashes.
|
||||
glob.actual = line.to_string();
|
||||
if has_slash {
|
||||
if is_absolute || line.chars().any(|c| c == '/') {
|
||||
literal_separator = true;
|
||||
}
|
||||
// If there was a leading slash, then this is a glob that must
|
||||
// match the entire path name. Otherwise, we should let it match
|
||||
// anywhere, so use a **/ prefix.
|
||||
if !is_absolute {
|
||||
// If there was a slash, then this is a glob that must match the entire
|
||||
// path name. Otherwise, we should let it match anywhere, so use a **/
|
||||
// prefix.
|
||||
if !literal_separator {
|
||||
// ... but only if we don't already have a **/ prefix.
|
||||
if !glob.actual.starts_with("**/") {
|
||||
if !glob.has_doublestar_prefix() {
|
||||
glob.actual = format!("**/{}", glob.actual);
|
||||
}
|
||||
}
|
||||
@@ -416,15 +474,32 @@ impl GitignoreBuilder {
|
||||
if glob.actual.ends_with("/**") {
|
||||
glob.actual = format!("{}/*", glob.actual);
|
||||
}
|
||||
let parsed = try!(
|
||||
let parsed =
|
||||
GlobBuilder::new(&glob.actual)
|
||||
.literal_separator(literal_separator)
|
||||
.case_insensitive(self.case_insensitive)
|
||||
.backslash_escape(true)
|
||||
.build()
|
||||
.map_err(|err| Error::Glob(err.to_string())));
|
||||
.map_err(|err| {
|
||||
Error::Glob {
|
||||
glob: Some(glob.original.clone()),
|
||||
err: err.kind().to_string(),
|
||||
}
|
||||
})?;
|
||||
self.builder.add(parsed);
|
||||
self.globs.push(glob);
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Toggle whether the globs should be matched case insensitively or not.
|
||||
///
|
||||
/// This is disabled by default.
|
||||
pub fn case_insensitive(
|
||||
&mut self, yes: bool
|
||||
) -> Result<&mut GitignoreBuilder, Error> {
|
||||
self.case_insensitive = yes;
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the file path of the current environment's global gitignore file.
|
||||
@@ -552,9 +627,20 @@ mod tests {
|
||||
ignored!(ig25, ROOT, "Cargo.lock", "./tabwriter-bin/Cargo.lock");
|
||||
ignored!(ig26, ROOT, "/foo/bar/baz", "./foo/bar/baz");
|
||||
ignored!(ig27, ROOT, "foo/", "xyz/foo", true);
|
||||
ignored!(ig28, ROOT, "src/*.rs", "src/grep/src/main.rs");
|
||||
ignored!(ig29, "./src", "/llvm/", "./src/llvm", true);
|
||||
ignored!(ig30, ROOT, "node_modules/ ", "node_modules", true);
|
||||
ignored!(ig28, "./src", "/llvm/", "./src/llvm", true);
|
||||
ignored!(ig29, ROOT, "node_modules/ ", "node_modules", true);
|
||||
ignored!(ig30, ROOT, "**/", "foo/bar", true);
|
||||
ignored!(ig31, ROOT, "path1/*", "path1/foo");
|
||||
ignored!(ig32, ROOT, ".a/b", ".a/b");
|
||||
ignored!(ig33, "./", ".a/b", ".a/b");
|
||||
ignored!(ig34, ".", ".a/b", ".a/b");
|
||||
ignored!(ig35, "./.", ".a/b", ".a/b");
|
||||
ignored!(ig36, "././", ".a/b", ".a/b");
|
||||
ignored!(ig37, "././.", ".a/b", ".a/b");
|
||||
ignored!(ig38, ROOT, "\\[", "[");
|
||||
ignored!(ig39, ROOT, "\\?", "?");
|
||||
ignored!(ig40, ROOT, "\\*", "*");
|
||||
ignored!(ig41, ROOT, "\\a", "a");
|
||||
|
||||
not_ignored!(ignot1, ROOT, "amonths", "months");
|
||||
not_ignored!(ignot2, ROOT, "monthsa", "months");
|
||||
@@ -573,6 +659,9 @@ mod tests {
|
||||
ignot14, "./third_party/protobuf", "m4/ltoptions.m4",
|
||||
"./third_party/protobuf/csharp/src/packages/repositories.config");
|
||||
not_ignored!(ignot15, ROOT, "!/bar", "foo/bar");
|
||||
not_ignored!(ignot16, ROOT, "*\n!**/", "foo", true);
|
||||
not_ignored!(ignot17, ROOT, "src/*.rs", "src/grep/src/main.rs");
|
||||
not_ignored!(ignot18, ROOT, "path1/*", "path2/path1/foo");
|
||||
|
||||
fn bytes(s: &str) -> Vec<u8> {
|
||||
s.to_string().into_bytes()
|
||||
@@ -607,4 +696,21 @@ mod tests {
|
||||
fn regression_106() {
|
||||
gi_from_str("/", " ");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn case_insensitive() {
|
||||
let gi = GitignoreBuilder::new(ROOT)
|
||||
.case_insensitive(true).unwrap()
|
||||
.add_str(None, "*.html").unwrap()
|
||||
.build().unwrap();
|
||||
assert!(gi.matched("foo.html", false).is_ignore());
|
||||
assert!(gi.matched("foo.HTML", false).is_ignore());
|
||||
assert!(!gi.matched("foo.htm", false).is_ignore());
|
||||
assert!(!gi.matched("foo.HTM", false).is_ignore());
|
||||
}
|
||||
|
||||
ignored!(cs1, ROOT, "*.html", "foo.html");
|
||||
not_ignored!(cs2, ROOT, "*.html", "foo.HTML");
|
||||
not_ignored!(cs3, ROOT, "*.html", "foo.htm");
|
||||
not_ignored!(cs4, ROOT, "*.html", "foo.HTM");
|
||||
}
|
||||
|
||||
@@ -54,10 +54,13 @@ extern crate lazy_static;
|
||||
extern crate log;
|
||||
extern crate memchr;
|
||||
extern crate regex;
|
||||
extern crate same_file;
|
||||
#[cfg(test)]
|
||||
extern crate tempdir;
|
||||
extern crate thread_local;
|
||||
extern crate walkdir;
|
||||
#[cfg(windows)]
|
||||
extern crate winapi;
|
||||
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
@@ -112,13 +115,61 @@ pub enum Error {
|
||||
/// An error that occurs when doing I/O, such as reading an ignore file.
|
||||
Io(io::Error),
|
||||
/// An error that occurs when trying to parse a glob.
|
||||
Glob(String),
|
||||
Glob {
|
||||
/// The original glob that caused this error. This glob, when
|
||||
/// available, always corresponds to the glob provided by an end user.
|
||||
/// e.g., It is the glob as written in a `.gitignore` file.
|
||||
///
|
||||
/// (This glob may be distinct from the glob that is actually
|
||||
/// compiled, after accounting for `gitignore` semantics.)
|
||||
glob: Option<String>,
|
||||
/// The underlying glob error as a string.
|
||||
err: String,
|
||||
},
|
||||
/// A type selection for a file type that is not defined.
|
||||
UnrecognizedFileType(String),
|
||||
/// A user specified file type definition could not be parsed.
|
||||
InvalidDefinition,
|
||||
}
|
||||
|
||||
impl Clone for Error {
|
||||
fn clone(&self) -> Error {
|
||||
match *self {
|
||||
Error::Partial(ref errs) => Error::Partial(errs.clone()),
|
||||
Error::WithLineNumber { line, ref err } => {
|
||||
Error::WithLineNumber { line: line, err: err.clone() }
|
||||
}
|
||||
Error::WithPath { ref path, ref err } => {
|
||||
Error::WithPath { path: path.clone(), err: err.clone() }
|
||||
}
|
||||
Error::WithDepth { depth, ref err } => {
|
||||
Error::WithDepth { depth: depth, err: err.clone() }
|
||||
}
|
||||
Error::Loop { ref ancestor, ref child } => {
|
||||
Error::Loop {
|
||||
ancestor: ancestor.clone(),
|
||||
child: child.clone()
|
||||
}
|
||||
}
|
||||
Error::Io(ref err) => {
|
||||
match err.raw_os_error() {
|
||||
Some(e) => Error::Io(io::Error::from_raw_os_error(e)),
|
||||
None => {
|
||||
Error::Io(io::Error::new(err.kind(), err.to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
Error::Glob { ref glob, ref err } => {
|
||||
Error::Glob { glob: glob.clone(), err: err.clone() }
|
||||
}
|
||||
Error::UnrecognizedFileType(ref err) => {
|
||||
Error::UnrecognizedFileType(err.clone())
|
||||
}
|
||||
Error::InvalidDefinition => Error::InvalidDefinition,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Error {
|
||||
/// Returns true if this is a partial error.
|
||||
///
|
||||
@@ -144,7 +195,7 @@ impl Error {
|
||||
Error::WithDepth { ref err, .. } => err.is_io(),
|
||||
Error::Loop { .. } => false,
|
||||
Error::Io(_) => true,
|
||||
Error::Glob(_) => false,
|
||||
Error::Glob { .. } => false,
|
||||
Error::UnrecognizedFileType(_) => false,
|
||||
Error::InvalidDefinition => false,
|
||||
}
|
||||
@@ -188,6 +239,29 @@ impl Error {
|
||||
}
|
||||
errline.with_path(path)
|
||||
}
|
||||
|
||||
/// Build an error from a walkdir error.
|
||||
fn from_walkdir(err: walkdir::Error) -> Error {
|
||||
let depth = err.depth();
|
||||
if let (Some(anc), Some(child)) = (err.loop_ancestor(), err.path()) {
|
||||
return Error::WithDepth {
|
||||
depth: depth,
|
||||
err: Box::new(Error::Loop {
|
||||
ancestor: anc.to_path_buf(),
|
||||
child: child.to_path_buf(),
|
||||
}),
|
||||
};
|
||||
}
|
||||
let path = err.path().map(|p| p.to_path_buf());
|
||||
let mut ig_err = Error::Io(io::Error::from(err));
|
||||
if let Some(path) = path {
|
||||
ig_err = Error::WithPath {
|
||||
path: path,
|
||||
err: Box::new(ig_err),
|
||||
};
|
||||
}
|
||||
ig_err
|
||||
}
|
||||
}
|
||||
|
||||
impl error::Error for Error {
|
||||
@@ -199,7 +273,7 @@ impl error::Error for Error {
|
||||
Error::WithDepth { ref err, .. } => err.description(),
|
||||
Error::Loop { .. } => "file system loop found",
|
||||
Error::Io(ref err) => err.description(),
|
||||
Error::Glob(ref msg) => msg,
|
||||
Error::Glob { ref err, .. } => err,
|
||||
Error::UnrecognizedFileType(_) => "unrecognized file type",
|
||||
Error::InvalidDefinition => "invalid definition",
|
||||
}
|
||||
@@ -227,7 +301,10 @@ impl fmt::Display for Error {
|
||||
child.display(), ancestor.display())
|
||||
}
|
||||
Error::Io(ref err) => err.fmt(f),
|
||||
Error::Glob(ref msg) => write!(f, "{}", msg),
|
||||
Error::Glob { glob: None, ref err } => write!(f, "{}", err),
|
||||
Error::Glob { glob: Some(ref glob), ref err } => {
|
||||
write!(f, "error parsing glob '{}': {}", glob, err)
|
||||
}
|
||||
Error::UnrecognizedFileType(ref ty) => {
|
||||
write!(f, "unrecognized file type: {}", ty)
|
||||
}
|
||||
@@ -245,30 +322,6 @@ impl From<io::Error> for Error {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<walkdir::Error> for Error {
|
||||
fn from(err: walkdir::Error) -> Error {
|
||||
let depth = err.depth();
|
||||
if let (Some(anc), Some(child)) = (err.loop_ancestor(), err.path()) {
|
||||
return Error::WithDepth {
|
||||
depth: depth,
|
||||
err: Box::new(Error::Loop {
|
||||
ancestor: anc.to_path_buf(),
|
||||
child: child.to_path_buf(),
|
||||
}),
|
||||
};
|
||||
}
|
||||
let path = err.path().map(|p| p.to_path_buf());
|
||||
let mut ig_err = Error::Io(io::Error::from(err));
|
||||
if let Some(path) = path {
|
||||
ig_err = Error::WithPath {
|
||||
path: path,
|
||||
err: Box::new(ig_err),
|
||||
};
|
||||
}
|
||||
ig_err
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct PartialErrorBuilder(Vec<Error>);
|
||||
|
||||
|
||||
@@ -124,7 +124,7 @@ impl OverrideBuilder {
|
||||
///
|
||||
/// Once a matcher is built, no new globs can be added to it.
|
||||
pub fn build(&self) -> Result<Override, Error> {
|
||||
Ok(Override(try!(self.builder.build())))
|
||||
Ok(Override(self.builder.build()?))
|
||||
}
|
||||
|
||||
/// Add a glob to the set of overrides.
|
||||
@@ -134,7 +134,17 @@ impl OverrideBuilder {
|
||||
/// namely, `!` at the beginning of a glob will ignore a file. Without `!`,
|
||||
/// all matches of the glob provided are treated as whitelist matches.
|
||||
pub fn add(&mut self, glob: &str) -> Result<&mut OverrideBuilder, Error> {
|
||||
try!(self.builder.add_line(None, glob));
|
||||
self.builder.add_line(None, glob)?;
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Toggle whether the globs should be matched case insensitively or not.
|
||||
///
|
||||
/// This is disabled by default.
|
||||
pub fn case_insensitive(
|
||||
&mut self, yes: bool
|
||||
) -> Result<&mut OverrideBuilder, Error> {
|
||||
self.builder.case_insensitive(yes)?;
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
@@ -192,8 +202,9 @@ mod tests {
|
||||
#[test]
|
||||
fn gitignore() {
|
||||
let ov = ov(&["/foo", "bar/*.rs", "baz/**"]);
|
||||
assert!(ov.matched("bar/lib.rs", false).is_whitelist());
|
||||
assert!(ov.matched("bar/wat/lib.rs", false).is_ignore());
|
||||
assert!(ov.matched("wat/bar/lib.rs", false).is_whitelist());
|
||||
assert!(ov.matched("wat/bar/lib.rs", false).is_ignore());
|
||||
assert!(ov.matched("foo", false).is_whitelist());
|
||||
assert!(ov.matched("wat/foo", false).is_ignore());
|
||||
assert!(ov.matched("baz", false).is_ignore());
|
||||
@@ -220,4 +231,27 @@ mod tests {
|
||||
let ov = ov(&["!/bar"]);
|
||||
assert!(ov.matched("./foo/bar", false).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn case_insensitive() {
|
||||
let ov = OverrideBuilder::new(ROOT)
|
||||
.case_insensitive(true).unwrap()
|
||||
.add("*.html").unwrap()
|
||||
.build().unwrap();
|
||||
assert!(ov.matched("foo.html", false).is_whitelist());
|
||||
assert!(ov.matched("foo.HTML", false).is_whitelist());
|
||||
assert!(ov.matched("foo.htm", false).is_ignore());
|
||||
assert!(ov.matched("foo.HTM", false).is_ignore());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn default_case_sensitive() {
|
||||
let ov = OverrideBuilder::new(ROOT)
|
||||
.add("*.html").unwrap()
|
||||
.build().unwrap();
|
||||
assert!(ov.matched("foo.html", false).is_whitelist());
|
||||
assert!(ov.matched("foo.HTML", false).is_ignore());
|
||||
assert!(ov.matched("foo.htm", false).is_ignore());
|
||||
assert!(ov.matched("foo.HTM", false).is_ignore());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -100,28 +100,36 @@ const DEFAULT_TYPES: &'static [(&'static str, &'static [&'static str])] = &[
|
||||
("agda", &["*.agda", "*.lagda"]),
|
||||
("asciidoc", &["*.adoc", "*.asc", "*.asciidoc"]),
|
||||
("asm", &["*.asm", "*.s", "*.S"]),
|
||||
("avro", &["*.avdl", "*.avpr", "*.avsc"]),
|
||||
("awk", &["*.awk"]),
|
||||
("bitbake", &["*.bb", "*.bbappend", "*.bbclass", "*.conf", "*.inc"]),
|
||||
("bzip2", &["*.bz2"]),
|
||||
("c", &["*.c", "*.h", "*.H"]),
|
||||
("cabal", &["*.cabal"]),
|
||||
("cbor", &["*.cbor"]),
|
||||
("ceylon", &["*.ceylon"]),
|
||||
("clojure", &["*.clj", "*.cljc", "*.cljs", "*.cljx"]),
|
||||
("cmake", &["*.cmake", "CMakeLists.txt"]),
|
||||
("coffeescript", &["*.coffee"]),
|
||||
("creole", &["*.creole"]),
|
||||
("config", &["*.config"]),
|
||||
("config", &["*.cfg", "*.conf", "*.config", "*.ini"]),
|
||||
("cpp", &[
|
||||
"*.C", "*.cc", "*.cpp", "*.cxx",
|
||||
"*.h", "*.H", "*.hh", "*.hpp",
|
||||
"*.h", "*.H", "*.hh", "*.hpp", "*.hxx", "*.inl",
|
||||
]),
|
||||
("crystal", &["Projectfile", "*.cr"]),
|
||||
("cs", &["*.cs"]),
|
||||
("csharp", &["*.cs"]),
|
||||
("cshtml", &["*.cshtml"]),
|
||||
("css", &["*.css", "*.scss"]),
|
||||
("csv", &["*.csv"]),
|
||||
("cython", &["*.pyx"]),
|
||||
("dart", &["*.dart"]),
|
||||
("d", &["*.d"]),
|
||||
("docker", &["*Dockerfile*"]),
|
||||
("elisp", &["*.el"]),
|
||||
("elixir", &["*.ex", "*.eex", "*.exs"]),
|
||||
("elm", &["*.elm"]),
|
||||
("erlang", &["*.erl", "*.hrl"]),
|
||||
("fish", &["*.fish"]),
|
||||
("fortran", &[
|
||||
@@ -129,32 +137,74 @@ const DEFAULT_TYPES: &'static [(&'static str, &'static [&'static str])] = &[
|
||||
"*.f90", "*.F90", "*.f95", "*.F95",
|
||||
]),
|
||||
("fsharp", &["*.fs", "*.fsx", "*.fsi"]),
|
||||
("gn", &["*.gn", "*.gni"]),
|
||||
("go", &["*.go"]),
|
||||
("gzip", &["*.gz"]),
|
||||
("groovy", &["*.groovy", "*.gradle"]),
|
||||
("h", &["*.h", "*.hpp"]),
|
||||
("hbs", &["*.hbs"]),
|
||||
("haskell", &["*.hs", "*.lhs"]),
|
||||
("html", &["*.htm", "*.html", "*.ejs"]),
|
||||
("java", &["*.java"]),
|
||||
("jinja", &["*.jinja", "*.jinja2"]),
|
||||
("jinja", &["*.j2", "*.jinja", "*.jinja2"]),
|
||||
("js", &[
|
||||
"*.js", "*.jsx", "*.vue",
|
||||
]),
|
||||
("json", &["*.json"]),
|
||||
("json", &["*.json", "composer.lock"]),
|
||||
("jsonl", &["*.jsonl"]),
|
||||
("julia", &["*.jl"]),
|
||||
("jupyter", &["*.ipynb", "*.jpynb"]),
|
||||
("jl", &["*.jl"]),
|
||||
("kotlin", &["*.kt", "*.kts"]),
|
||||
("less", &["*.less"]),
|
||||
("license", &[
|
||||
// General
|
||||
"COPYING", "COPYING[.-]*",
|
||||
"COPYRIGHT", "COPYRIGHT[.-]*",
|
||||
"EULA", "EULA[.-]*",
|
||||
"licen[cs]e", "licen[cs]e.*",
|
||||
"LICEN[CS]E", "LICEN[CS]E[.-]*", "*[.-]LICEN[CS]E*",
|
||||
"NOTICE", "NOTICE[.-]*",
|
||||
"PATENTS", "PATENTS[.-]*",
|
||||
"UNLICEN[CS]E", "UNLICEN[CS]E[.-]*",
|
||||
// GPL (gpl.txt, etc.)
|
||||
"agpl[.-]*",
|
||||
"gpl[.-]*",
|
||||
"lgpl[.-]*",
|
||||
// Other license-specific (APACHE-2.0.txt, etc.)
|
||||
"AGPL-*[0-9]*",
|
||||
"APACHE-*[0-9]*",
|
||||
"BSD-*[0-9]*",
|
||||
"CC-BY-*",
|
||||
"GFDL-*[0-9]*",
|
||||
"GNU-*[0-9]*",
|
||||
"GPL-*[0-9]*",
|
||||
"LGPL-*[0-9]*",
|
||||
"MIT-*[0-9]*",
|
||||
"MPL-*[0-9]*",
|
||||
"OFL-*[0-9]*",
|
||||
]),
|
||||
("lisp", &["*.el", "*.jl", "*.lisp", "*.lsp", "*.sc", "*.scm"]),
|
||||
("log", &["*.log"]),
|
||||
("lua", &["*.lua"]),
|
||||
("lzma", &["*.lzma"]),
|
||||
("m4", &["*.ac", "*.m4"]),
|
||||
("make", &["gnumakefile", "Gnumakefile", "makefile", "Makefile", "*.mk", "*.mak"]),
|
||||
("make", &[
|
||||
"gnumakefile", "Gnumakefile", "GNUmakefile",
|
||||
"makefile", "Makefile",
|
||||
"*.mk", "*.mak"
|
||||
]),
|
||||
("markdown", &["*.markdown", "*.md", "*.mdown", "*.mkdn"]),
|
||||
("md", &["*.markdown", "*.md", "*.mdown", "*.mkdn"]),
|
||||
("man", &["*.[0-9lnpx]", "*.[0-9][cEFMmpSx]"]),
|
||||
("matlab", &["*.m"]),
|
||||
("mk", &["mkfile"]),
|
||||
("ml", &["*.ml"]),
|
||||
("msbuild", &[
|
||||
"*.csproj", "*.fsproj", "*.vcxproj", "*.proj", "*.props", "*.targets"
|
||||
]),
|
||||
("nim", &["*.nim"]),
|
||||
("nix", &["*.nix"]),
|
||||
("objc", &["*.h", "*.m"]),
|
||||
("objcpp", &["*.h", "*.mm"]),
|
||||
("ocaml", &["*.ml", "*.mli", "*.mll", "*.mly"]),
|
||||
@@ -163,8 +213,11 @@ const DEFAULT_TYPES: &'static [(&'static str, &'static [&'static str])] = &[
|
||||
("pdf", &["*.pdf"]),
|
||||
("php", &["*.php", "*.php3", "*.php4", "*.php5", "*.phtml"]),
|
||||
("pod", &["*.pod"]),
|
||||
("protobuf", &["*.proto"]),
|
||||
("ps", &["*.cdxml", "*.ps1", "*.ps1xml", "*.psd1", "*.psm1"]),
|
||||
("purs", &["*.purs"]),
|
||||
("py", &["*.py"]),
|
||||
("qmake", &["*.pro", "*.pri", "*.prf"]),
|
||||
("readme", &["README*", "*README"]),
|
||||
("r", &["*.R", "*.r", "*.Rmd", "*.Rnw"]),
|
||||
("rdoc", &["*.rdoc"]),
|
||||
@@ -173,30 +226,72 @@ const DEFAULT_TYPES: &'static [(&'static str, &'static [&'static str])] = &[
|
||||
("rust", &["*.rs"]),
|
||||
("sass", &["*.sass", "*.scss"]),
|
||||
("scala", &["*.scala"]),
|
||||
("sh", &["*.bash", "*.csh", "*.ksh", "*.sh", "*.tcsh"]),
|
||||
("sh", &[
|
||||
// Portable/misc. init files
|
||||
".login", ".logout", ".profile", "profile",
|
||||
// bash-specific init files
|
||||
".bash_login", "bash_login",
|
||||
".bash_logout", "bash_logout",
|
||||
".bash_profile", "bash_profile",
|
||||
".bashrc", "bashrc", "*.bashrc",
|
||||
// csh-specific init files
|
||||
".cshrc", "*.cshrc",
|
||||
// ksh-specific init files
|
||||
".kshrc", "*.kshrc",
|
||||
// tcsh-specific init files
|
||||
".tcshrc",
|
||||
// zsh-specific init files
|
||||
".zshenv", "zshenv",
|
||||
".zlogin", "zlogin",
|
||||
".zlogout", "zlogout",
|
||||
".zprofile", "zprofile",
|
||||
".zshrc", "zshrc",
|
||||
// Extensions
|
||||
"*.bash", "*.csh", "*.ksh", "*.sh", "*.tcsh", "*.zsh",
|
||||
]),
|
||||
("smarty", &["*.tpl"]),
|
||||
("sml", &["*.sml", "*.sig"]),
|
||||
("soy", &["*.soy"]),
|
||||
("spark", &["*.spark"]),
|
||||
("sql", &["*.sql", "*.psql"]),
|
||||
("stylus", &["*.styl"]),
|
||||
("sql", &["*.sql"]),
|
||||
("sv", &["*.v", "*.vg", "*.sv", "*.svh", "*.h"]),
|
||||
("svg", &["*.svg"]),
|
||||
("swift", &["*.swift"]),
|
||||
("swig", &["*.def", "*.i"]),
|
||||
("systemd", &[
|
||||
"*.automount", "*.conf", "*.device", "*.link", "*.mount", "*.path",
|
||||
"*.scope", "*.service", "*.slice", "*.socket", "*.swap", "*.target",
|
||||
"*.timer",
|
||||
]),
|
||||
("taskpaper", &["*.taskpaper"]),
|
||||
("tcl", &["*.tcl"]),
|
||||
("tex", &["*.tex", "*.ltx", "*.cls", "*.sty", "*.bib"]),
|
||||
("textile", &["*.textile"]),
|
||||
("tf", &["*.tf"]),
|
||||
("ts", &["*.ts", "*.tsx"]),
|
||||
("txt", &["*.txt"]),
|
||||
("toml", &["*.toml", "Cargo.lock"]),
|
||||
("twig", &["*.twig"]),
|
||||
("vala", &["*.vala"]),
|
||||
("vb", &["*.vb"]),
|
||||
("vhdl", &["*.vhd", "*.vhdl"]),
|
||||
("vim", &["*.vim"]),
|
||||
("vimscript", &["*.vim"]),
|
||||
("wiki", &["*.mediawiki", "*.wiki"]),
|
||||
("xml", &["*.xml"]),
|
||||
("webidl", &["*.idl", "*.webidl", "*.widl"]),
|
||||
("xml", &["*.xml", "*.xml.dist"]),
|
||||
("xz", &["*.xz"]),
|
||||
("yacc", &["*.y"]),
|
||||
("yaml", &["*.yaml", "*.yml"]),
|
||||
("zsh", &["zshenv", ".zshenv", "zprofile", ".zprofile", "zshrc", ".zshrc", "zlogin", ".zlogin", "zlogout", ".zlogout", "*.zsh"]),
|
||||
("zsh", &[
|
||||
".zshenv", "zshenv",
|
||||
".zlogin", "zlogin",
|
||||
".zlogout", "zlogout",
|
||||
".zprofile", "zprofile",
|
||||
".zshrc", "zshrc",
|
||||
"*.zsh",
|
||||
]),
|
||||
];
|
||||
|
||||
/// Glob represents a single glob in a set of file type definitions.
|
||||
@@ -442,18 +537,23 @@ impl TypesBuilder {
|
||||
}
|
||||
};
|
||||
for (iglob, glob) in def.globs.iter().enumerate() {
|
||||
build_set.add(try!(
|
||||
build_set.add(
|
||||
GlobBuilder::new(glob)
|
||||
.literal_separator(true)
|
||||
.build()
|
||||
.map_err(|err| Error::Glob(err.to_string()))));
|
||||
.map_err(|err| {
|
||||
Error::Glob {
|
||||
glob: Some(glob.to_string()),
|
||||
err: err.kind().to_string(),
|
||||
}
|
||||
})?);
|
||||
glob_to_selection.push((isel, iglob));
|
||||
}
|
||||
selections.push(selection.clone().map(move |_| def));
|
||||
}
|
||||
let set = try!(build_set.build().map_err(|err| {
|
||||
Error::Glob(err.to_string())
|
||||
}));
|
||||
let set = build_set.build().map_err(|err| {
|
||||
Error::Glob { glob: None, err: err.to_string() }
|
||||
})?;
|
||||
Ok(Types {
|
||||
defs: defs,
|
||||
selections: selections,
|
||||
@@ -566,7 +666,7 @@ impl TypesBuilder {
|
||||
for type_name in types {
|
||||
let globs = self.types.get(type_name).unwrap().globs.clone();
|
||||
for glob in globs {
|
||||
try!(self.add(name, &glob));
|
||||
self.add(name, &glob)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use std::cmp;
|
||||
use std::ffi::{OsStr, OsString};
|
||||
use std::ffi::OsStr;
|
||||
use std::fmt;
|
||||
use std::fs::{self, FileType, Metadata};
|
||||
use std::io;
|
||||
@@ -11,7 +11,8 @@ use std::time::Duration;
|
||||
use std::vec;
|
||||
|
||||
use crossbeam::sync::MsQueue;
|
||||
use walkdir::{self, WalkDir, WalkDirIterator, is_same_file};
|
||||
use same_file::Handle;
|
||||
use walkdir::{self, WalkDir};
|
||||
|
||||
use dir::{Ignore, IgnoreBuilder};
|
||||
use gitignore::GitignoreBuilder;
|
||||
@@ -23,7 +24,7 @@ use {Error, PartialErrorBuilder};
|
||||
///
|
||||
/// The error typically refers to a problem parsing ignore files in a
|
||||
/// particular directory.
|
||||
#[derive(Debug)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DirEntry {
|
||||
dent: DirEntryInner,
|
||||
err: Option<Error>,
|
||||
@@ -36,8 +37,8 @@ impl DirEntry {
|
||||
}
|
||||
|
||||
/// Whether this entry corresponds to a symbolic link or not.
|
||||
pub fn path_is_symbolic_link(&self) -> bool {
|
||||
self.dent.path_is_symbolic_link()
|
||||
pub fn path_is_symlink(&self) -> bool {
|
||||
self.dent.path_is_symlink()
|
||||
}
|
||||
|
||||
/// Returns true if and only if this entry corresponds to stdin.
|
||||
@@ -88,6 +89,11 @@ impl DirEntry {
|
||||
self.err.as_ref()
|
||||
}
|
||||
|
||||
/// Returns true if and only if this entry points to a directory.
|
||||
fn is_dir(&self) -> bool {
|
||||
self.dent.is_dir()
|
||||
}
|
||||
|
||||
fn new_stdin() -> DirEntry {
|
||||
DirEntry {
|
||||
dent: DirEntryInner::Stdin,
|
||||
@@ -120,7 +126,7 @@ impl DirEntry {
|
||||
///
|
||||
/// Specifically, (3) has to essentially re-create the DirEntry implementation
|
||||
/// from WalkDir.
|
||||
#[derive(Debug)]
|
||||
#[derive(Clone, Debug)]
|
||||
enum DirEntryInner {
|
||||
Stdin,
|
||||
Walkdir(walkdir::DirEntry),
|
||||
@@ -137,12 +143,12 @@ impl DirEntryInner {
|
||||
}
|
||||
}
|
||||
|
||||
fn path_is_symbolic_link(&self) -> bool {
|
||||
fn path_is_symlink(&self) -> bool {
|
||||
use self::DirEntryInner::*;
|
||||
match *self {
|
||||
Stdin => false,
|
||||
Walkdir(ref x) => x.path_is_symbolic_link(),
|
||||
Raw(ref x) => x.path_is_symbolic_link(),
|
||||
Walkdir(ref x) => x.path_is_symlink(),
|
||||
Raw(ref x) => x.path_is_symlink(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -199,6 +205,7 @@ impl DirEntryInner {
|
||||
|
||||
#[cfg(unix)]
|
||||
fn ino(&self) -> Option<u64> {
|
||||
use walkdir::DirEntryExt;
|
||||
use self::DirEntryInner::*;
|
||||
match *self {
|
||||
Stdin => None,
|
||||
@@ -206,10 +213,29 @@ impl DirEntryInner {
|
||||
Raw(ref x) => Some(x.ino()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if and only if this entry points to a directory.
|
||||
///
|
||||
/// This works around a bug in Rust's standard library:
|
||||
/// https://github.com/rust-lang/rust/issues/46484
|
||||
#[cfg(windows)]
|
||||
fn is_dir(&self) -> bool {
|
||||
self.metadata().map(|md| metadata_is_dir(&md)).unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Returns true if and only if this entry points to a directory.
|
||||
///
|
||||
/// This works around a bug in Rust's standard library:
|
||||
/// https://github.com/rust-lang/rust/issues/46484
|
||||
#[cfg(not(windows))]
|
||||
fn is_dir(&self) -> bool {
|
||||
self.file_type().map(|ft| ft.is_dir()).unwrap_or(false)
|
||||
}
|
||||
}
|
||||
|
||||
/// DirEntryRaw is essentially copied from the walkdir crate so that we can
|
||||
/// build `DirEntry`s from whole cloth in the parallel iterator.
|
||||
#[derive(Clone)]
|
||||
struct DirEntryRaw {
|
||||
/// The path as reported by the `fs::ReadDir` iterator (even if it's a
|
||||
/// symbolic link).
|
||||
@@ -224,6 +250,14 @@ struct DirEntryRaw {
|
||||
/// The underlying inode number (Unix only).
|
||||
#[cfg(unix)]
|
||||
ino: u64,
|
||||
/// The underlying metadata (Windows only). We store this on Windows
|
||||
/// because this comes for free while reading a directory.
|
||||
///
|
||||
/// We use this to determine whether an entry is a directory or not, which
|
||||
/// works around a bug in Rust's standard library:
|
||||
/// https://github.com/rust-lang/rust/issues/46484
|
||||
#[cfg(windows)]
|
||||
metadata: fs::Metadata,
|
||||
}
|
||||
|
||||
impl fmt::Debug for DirEntryRaw {
|
||||
@@ -244,11 +278,25 @@ impl DirEntryRaw {
|
||||
&self.path
|
||||
}
|
||||
|
||||
fn path_is_symbolic_link(&self) -> bool {
|
||||
fn path_is_symlink(&self) -> bool {
|
||||
self.ty.is_symlink() || self.follow_link
|
||||
}
|
||||
|
||||
fn metadata(&self) -> Result<Metadata, Error> {
|
||||
self.metadata_internal()
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
fn metadata_internal(&self) -> Result<fs::Metadata, Error> {
|
||||
if self.follow_link {
|
||||
fs::metadata(&self.path)
|
||||
} else {
|
||||
Ok(self.metadata.clone())
|
||||
}.map_err(|err| Error::Io(io::Error::from(err)).with_path(&self.path))
|
||||
}
|
||||
|
||||
#[cfg(not(windows))]
|
||||
fn metadata_internal(&self) -> Result<fs::Metadata, Error> {
|
||||
if self.follow_link {
|
||||
fs::metadata(&self.path)
|
||||
} else {
|
||||
@@ -277,28 +325,36 @@ impl DirEntryRaw {
|
||||
depth: usize,
|
||||
ent: &fs::DirEntry,
|
||||
) -> Result<DirEntryRaw, Error> {
|
||||
let ty = try!(ent.file_type().map_err(|err| {
|
||||
let ty = ent.file_type().map_err(|err| {
|
||||
let err = Error::Io(io::Error::from(err)).with_path(ent.path());
|
||||
Error::WithDepth {
|
||||
depth: depth,
|
||||
err: Box::new(err),
|
||||
}
|
||||
}));
|
||||
Ok(DirEntryRaw::from_entry_os(depth, ent, ty))
|
||||
})?;
|
||||
DirEntryRaw::from_entry_os(depth, ent, ty)
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
#[cfg(windows)]
|
||||
fn from_entry_os(
|
||||
depth: usize,
|
||||
ent: &fs::DirEntry,
|
||||
ty: fs::FileType,
|
||||
) -> DirEntryRaw {
|
||||
DirEntryRaw {
|
||||
) -> Result<DirEntryRaw, Error> {
|
||||
let md = ent.metadata().map_err(|err| {
|
||||
let err = Error::Io(io::Error::from(err)).with_path(ent.path());
|
||||
Error::WithDepth {
|
||||
depth: depth,
|
||||
err: Box::new(err),
|
||||
}
|
||||
})?;
|
||||
Ok(DirEntryRaw {
|
||||
path: ent.path(),
|
||||
ty: ty,
|
||||
follow_link: false,
|
||||
depth: depth,
|
||||
}
|
||||
metadata: md,
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
@@ -306,28 +362,29 @@ impl DirEntryRaw {
|
||||
depth: usize,
|
||||
ent: &fs::DirEntry,
|
||||
ty: fs::FileType,
|
||||
) -> DirEntryRaw {
|
||||
) -> Result<DirEntryRaw, Error> {
|
||||
use std::os::unix::fs::DirEntryExt;
|
||||
|
||||
DirEntryRaw {
|
||||
Ok(DirEntryRaw {
|
||||
path: ent.path(),
|
||||
ty: ty,
|
||||
follow_link: false,
|
||||
depth: depth,
|
||||
ino: ent.ino(),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
fn from_link(depth: usize, pb: PathBuf) -> Result<DirEntryRaw, Error> {
|
||||
let md = try!(fs::metadata(&pb).map_err(|err| {
|
||||
let md = fs::metadata(&pb).map_err(|err| {
|
||||
Error::Io(err).with_path(&pb)
|
||||
}));
|
||||
})?;
|
||||
Ok(DirEntryRaw {
|
||||
path: pb,
|
||||
ty: md.file_type(),
|
||||
follow_link: true,
|
||||
depth: depth,
|
||||
metadata: md,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -335,9 +392,9 @@ impl DirEntryRaw {
|
||||
fn from_link(depth: usize, pb: PathBuf) -> Result<DirEntryRaw, Error> {
|
||||
use std::os::unix::fs::MetadataExt;
|
||||
|
||||
let md = try!(fs::metadata(&pb).map_err(|err| {
|
||||
let md = fs::metadata(&pb).map_err(|err| {
|
||||
Error::Io(err).with_path(&pb)
|
||||
}));
|
||||
})?;
|
||||
Ok(DirEntryRaw {
|
||||
path: pb,
|
||||
ty: md.file_type(),
|
||||
@@ -380,16 +437,16 @@ impl DirEntryRaw {
|
||||
/// is: `.ignore`, `.gitignore`, `.git/info/exclude`, global gitignore and
|
||||
/// finally explicitly added ignore files. Note that precedence between
|
||||
/// different types of ignore files is not impacted by the directory hierarchy;
|
||||
/// any `.ignore` file overrides all `.gitignore` files. Within each
|
||||
/// precedence level, more nested ignore files have a higher precedence over
|
||||
/// less nested ignore files.
|
||||
/// * Third, if the previous step yields an ignore match, than all matching
|
||||
/// is stopped and the path is skipped.. If it yields a whitelist match, then
|
||||
/// process continues. A whitelist match can be overridden by a later matcher.
|
||||
/// any `.ignore` file overrides all `.gitignore` files. Within each precedence
|
||||
/// level, more nested ignore files have a higher precedence than less nested
|
||||
/// ignore files.
|
||||
/// * Third, if the previous step yields an ignore match, then all matching
|
||||
/// is stopped and the path is skipped. If it yields a whitelist match, then
|
||||
/// matching continues. A whitelist match can be overridden by a later matcher.
|
||||
/// * Fourth, unless the path is a directory, the file type matcher is run on
|
||||
/// the path. As above, if it's an ignore match, then all matching is stopped
|
||||
/// and the path is skipped. If it's a whitelist match, then matching
|
||||
/// continues.
|
||||
/// the path. As above, if it yields an ignore match, then all matching is
|
||||
/// stopped and the path is skipped. If it yields a whitelist match, then
|
||||
/// matching continues.
|
||||
/// * Fifth, if the path hasn't been whitelisted and it is hidden, then the
|
||||
/// path is skipped.
|
||||
/// * Sixth, unless the path is a directory, the size of the file is compared
|
||||
@@ -404,7 +461,9 @@ pub struct WalkBuilder {
|
||||
max_depth: Option<usize>,
|
||||
max_filesize: Option<u64>,
|
||||
follow_links: bool,
|
||||
sorter: Option<Arc<Fn(&OsString, &OsString) -> cmp::Ordering + 'static>>,
|
||||
sorter: Option<Arc<
|
||||
Fn(&OsStr, &OsStr) -> cmp::Ordering + Send + Sync + 'static
|
||||
>>,
|
||||
threads: usize,
|
||||
}
|
||||
|
||||
@@ -452,13 +511,15 @@ impl WalkBuilder {
|
||||
(p.to_path_buf(), None)
|
||||
} else {
|
||||
let mut wd = WalkDir::new(p);
|
||||
wd = wd.follow_links(follow_links || p.is_file());
|
||||
wd = wd.follow_links(follow_links || path_is_file(p));
|
||||
if let Some(max_depth) = max_depth {
|
||||
wd = wd.max_depth(max_depth);
|
||||
}
|
||||
if let Some(ref cmp) = cmp {
|
||||
let cmp = cmp.clone();
|
||||
wd = wd.sort_by(move |a, b| cmp(a, b));
|
||||
wd = wd.sort_by(move |a, b| {
|
||||
cmp(a.file_name(), b.file_name())
|
||||
});
|
||||
}
|
||||
(p.to_path_buf(), Some(WalkEventIter::from(wd)))
|
||||
}
|
||||
@@ -532,7 +593,7 @@ impl WalkBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Add an ignore file to the matcher.
|
||||
/// Add a global ignore file to the matcher.
|
||||
///
|
||||
/// This has lower precedence than all other sources of ignore rules.
|
||||
///
|
||||
@@ -551,6 +612,20 @@ impl WalkBuilder {
|
||||
errs.into_error_option()
|
||||
}
|
||||
|
||||
/// Add a custom ignore file name
|
||||
///
|
||||
/// These ignore files have higher precedence than all other ignore files.
|
||||
///
|
||||
/// When specifying multiple names, earlier names have lower precedence than
|
||||
/// later names.
|
||||
pub fn add_custom_ignore_filename<S: AsRef<OsStr>>(
|
||||
&mut self,
|
||||
file_name: S
|
||||
) -> &mut WalkBuilder {
|
||||
self.ig_builder.add_custom_ignore_filename(file_name);
|
||||
self
|
||||
}
|
||||
|
||||
/// Add an override matcher.
|
||||
///
|
||||
/// By default, no override matcher is used.
|
||||
@@ -571,6 +646,29 @@ impl WalkBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Enables all the standard ignore filters.
|
||||
///
|
||||
/// This toggles, as a group, all the filters that are enabled by default:
|
||||
///
|
||||
/// - [hidden()](#method.hidden)
|
||||
/// - [parents()](#method.parents)
|
||||
/// - [ignore()](#method.ignore)
|
||||
/// - [git_ignore()](#method.git_ignore)
|
||||
/// - [git_global()](#method.git_global)
|
||||
/// - [git_exclude()](#method.git_exclude)
|
||||
///
|
||||
/// They may still be toggled individually after calling this function.
|
||||
///
|
||||
/// This is (by definition) enabled by default.
|
||||
pub fn standard_filters(&mut self, yes: bool) -> &mut WalkBuilder {
|
||||
self.hidden(yes)
|
||||
.parents(yes)
|
||||
.ignore(yes)
|
||||
.git_ignore(yes)
|
||||
.git_global(yes)
|
||||
.git_exclude(yes)
|
||||
}
|
||||
|
||||
/// Enables ignoring hidden files.
|
||||
///
|
||||
/// This is enabled by default.
|
||||
@@ -610,6 +708,8 @@ impl WalkBuilder {
|
||||
/// does not exist or does not specify `core.excludesFile`, then
|
||||
/// `$XDG_CONFIG_HOME/git/ignore` is read. If `$XDG_CONFIG_HOME` is not
|
||||
/// set or is empty, then `$HOME/.config/git/ignore` is used instead.
|
||||
///
|
||||
/// This is enabled by default.
|
||||
pub fn git_global(&mut self, yes: bool) -> &mut WalkBuilder {
|
||||
self.ig_builder.git_global(yes);
|
||||
self
|
||||
@@ -637,7 +737,7 @@ impl WalkBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Set a function for sorting directory entries.
|
||||
/// Set a function for sorting directory entries by file name.
|
||||
///
|
||||
/// If a compare function is set, the resulting iterator will return all
|
||||
/// paths in sorted order. The compare function will be called to compare
|
||||
@@ -645,8 +745,9 @@ impl WalkBuilder {
|
||||
/// entry.
|
||||
///
|
||||
/// Note that this is not used in the parallel iterator.
|
||||
pub fn sort_by<F>(&mut self, cmp: F) -> &mut WalkBuilder
|
||||
where F: Fn(&OsString, &OsString) -> cmp::Ordering + 'static {
|
||||
pub fn sort_by_file_name<F>(&mut self, cmp: F) -> &mut WalkBuilder
|
||||
where F: Fn(&OsStr, &OsStr) -> cmp::Ordering + Send + Sync + 'static
|
||||
{
|
||||
self.sorter = Some(Arc::new(cmp));
|
||||
self
|
||||
}
|
||||
@@ -682,7 +783,7 @@ impl Walk {
|
||||
return false;
|
||||
}
|
||||
|
||||
let is_dir = ent.file_type().is_dir();
|
||||
let is_dir = walkdir_entry_is_dir(ent);
|
||||
let max_size = self.max_filesize;
|
||||
let should_skip_path = skip_path(&self.ig, ent.path(), is_dir);
|
||||
let should_skip_filesize = if !is_dir && max_size.is_some() {
|
||||
@@ -711,7 +812,7 @@ impl Iterator for Walk {
|
||||
}
|
||||
Some((path, Some(it))) => {
|
||||
self.it = Some(it);
|
||||
if self.parents && path.is_dir() {
|
||||
if self.parents && path_is_dir(&path) {
|
||||
let (ig, err) = self.ig_root.add_parents(path);
|
||||
self.ig = ig;
|
||||
if let Some(err) = err {
|
||||
@@ -727,7 +828,7 @@ impl Iterator for Walk {
|
||||
};
|
||||
match ev {
|
||||
Err(err) => {
|
||||
return Some(Err(Error::from(err)));
|
||||
return Some(Err(Error::from_walkdir(err)));
|
||||
}
|
||||
Ok(WalkEvent::Exit) => {
|
||||
self.ig = self.ig.parent().unwrap();
|
||||
@@ -763,7 +864,7 @@ impl Iterator for Walk {
|
||||
/// the entire contents of a directory have been enumerated.
|
||||
struct WalkEventIter {
|
||||
depth: usize,
|
||||
it: walkdir::Iter,
|
||||
it: walkdir::IntoIter,
|
||||
next: Option<Result<walkdir::DirEntry, walkdir::Error>>,
|
||||
}
|
||||
|
||||
@@ -801,7 +902,7 @@ impl Iterator for WalkEventIter {
|
||||
None => None,
|
||||
Some(Err(err)) => Some(Err(err)),
|
||||
Some(Ok(dent)) => {
|
||||
if dent.file_type().is_dir() {
|
||||
if walkdir_entry_is_dir(&dent) {
|
||||
self.depth += 1;
|
||||
Some(Ok(WalkEvent::Dir(dent)))
|
||||
} else {
|
||||
@@ -954,7 +1055,12 @@ struct Work {
|
||||
impl Work {
|
||||
/// Returns true if and only if this work item is a directory.
|
||||
fn is_dir(&self) -> bool {
|
||||
self.dent.file_type().map_or(false, |t| t.is_dir())
|
||||
self.dent.is_dir()
|
||||
}
|
||||
|
||||
/// Returns true if and only if this work item is a symlink.
|
||||
fn is_symlink(&self) -> bool {
|
||||
self.dent.file_type().map_or(false, |ft| ft.is_symlink())
|
||||
}
|
||||
|
||||
/// Adds ignore rules for parent directories.
|
||||
@@ -1043,7 +1149,7 @@ impl Worker {
|
||||
while let Some(mut work) = self.get_work() {
|
||||
// If the work is not a directory, then we can just execute the
|
||||
// caller's callback immediately and move on.
|
||||
if !work.is_dir() {
|
||||
if work.is_symlink() || !work.is_dir() {
|
||||
if (self.f)(Ok(work.dent)).is_quit() {
|
||||
self.quit_now();
|
||||
return;
|
||||
@@ -1128,13 +1234,13 @@ impl Worker {
|
||||
return (self.f)(Err(err));
|
||||
}
|
||||
};
|
||||
if dent.file_type().map_or(false, |ft| ft.is_dir()) {
|
||||
if dent.is_dir() {
|
||||
if let Err(err) = check_symlink_loop(ig, dent.path(), depth) {
|
||||
return (self.f)(Err(err));
|
||||
}
|
||||
}
|
||||
}
|
||||
let is_dir = dent.file_type().map_or(false, |ft| ft.is_dir());
|
||||
let is_dir = dent.is_dir();
|
||||
let max_size = self.max_filesize;
|
||||
let should_skip_path = skip_path(ig, dent.path(), is_dir);
|
||||
let should_skip_filesize = if !is_dir && max_size.is_some() {
|
||||
@@ -1276,11 +1382,14 @@ fn check_symlink_loop(
|
||||
child_path: &Path,
|
||||
child_depth: usize,
|
||||
) -> Result<(), Error> {
|
||||
for ig in ig_parent.parents().take_while(|ig| !ig.is_absolute_parent()) {
|
||||
let same = try!(is_same_file(ig.path(), child_path).map_err(|err| {
|
||||
let hchild = Handle::from_path(child_path).map_err(|err| {
|
||||
Error::from(err).with_path(child_path).with_depth(child_depth)
|
||||
}));
|
||||
if same {
|
||||
})?;
|
||||
for ig in ig_parent.parents().take_while(|ig| !ig.is_absolute_parent()) {
|
||||
let h = Handle::from_path(ig.path()).map_err(|err| {
|
||||
Error::from(err).with_path(child_path).with_depth(child_depth)
|
||||
})?;
|
||||
if hchild == h {
|
||||
return Err(Error::Loop {
|
||||
ancestor: ig.path().to_path_buf(),
|
||||
child: child_path.to_path_buf(),
|
||||
@@ -1327,6 +1436,62 @@ fn skip_path(ig: &Ignore, path: &Path, is_dir: bool) -> bool {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if and only if this path points to a directory.
|
||||
///
|
||||
/// This works around a bug in Rust's standard library:
|
||||
/// https://github.com/rust-lang/rust/issues/46484
|
||||
#[cfg(windows)]
|
||||
fn path_is_dir(path: &Path) -> bool {
|
||||
fs::metadata(path).map(|md| metadata_is_dir(&md)).unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Returns true if and only if this entry points to a directory.
|
||||
#[cfg(not(windows))]
|
||||
fn path_is_dir(path: &Path) -> bool {
|
||||
path.is_dir()
|
||||
}
|
||||
|
||||
/// Returns true if and only if this path points to a file.
|
||||
///
|
||||
/// This works around a bug in Rust's standard library:
|
||||
/// https://github.com/rust-lang/rust/issues/46484
|
||||
#[cfg(windows)]
|
||||
fn path_is_file(path: &Path) -> bool {
|
||||
!path_is_dir(path)
|
||||
}
|
||||
|
||||
/// Returns true if and only if this entry points to a directory.
|
||||
#[cfg(not(windows))]
|
||||
fn path_is_file(path: &Path) -> bool {
|
||||
path.is_file()
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given walkdir entry points to a directory.
|
||||
///
|
||||
/// This works around a bug in Rust's standard library:
|
||||
/// https://github.com/rust-lang/rust/issues/46484
|
||||
#[cfg(windows)]
|
||||
fn walkdir_entry_is_dir(dent: &walkdir::DirEntry) -> bool {
|
||||
dent.metadata().map(|md| metadata_is_dir(&md)).unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given walkdir entry points to a directory.
|
||||
#[cfg(not(windows))]
|
||||
fn walkdir_entry_is_dir(dent: &walkdir::DirEntry) -> bool {
|
||||
dent.file_type().is_dir()
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given metadata points to a directory.
|
||||
///
|
||||
/// This works around a bug in Rust's standard library:
|
||||
/// https://github.com/rust-lang/rust/issues/46484
|
||||
#[cfg(windows)]
|
||||
fn metadata_is_dir(md: &fs::Metadata) -> bool {
|
||||
use std::os::windows::fs::MetadataExt;
|
||||
use winapi::um::winnt::FILE_ATTRIBUTE_DIRECTORY;
|
||||
md.file_attributes() & FILE_ATTRIBUTE_DIRECTORY != 0
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::fs::{self, File};
|
||||
@@ -1441,6 +1606,42 @@ mod tests {
|
||||
]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn custom_ignore() {
|
||||
let td = TempDir::new("walk-test-").unwrap();
|
||||
let custom_ignore = ".customignore";
|
||||
mkdirp(td.path().join("a"));
|
||||
wfile(td.path().join(custom_ignore), "foo");
|
||||
wfile(td.path().join("foo"), "");
|
||||
wfile(td.path().join("a/foo"), "");
|
||||
wfile(td.path().join("bar"), "");
|
||||
wfile(td.path().join("a/bar"), "");
|
||||
|
||||
let mut builder = WalkBuilder::new(td.path());
|
||||
builder.add_custom_ignore_filename(&custom_ignore);
|
||||
assert_paths(td.path(), &builder, &["bar", "a", "a/bar"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn custom_ignore_exclusive_use() {
|
||||
let td = TempDir::new("walk-test-").unwrap();
|
||||
let custom_ignore = ".customignore";
|
||||
mkdirp(td.path().join("a"));
|
||||
wfile(td.path().join(custom_ignore), "foo");
|
||||
wfile(td.path().join("foo"), "");
|
||||
wfile(td.path().join("a/foo"), "");
|
||||
wfile(td.path().join("bar"), "");
|
||||
wfile(td.path().join("a/bar"), "");
|
||||
|
||||
let mut builder = WalkBuilder::new(td.path());
|
||||
builder.ignore(false);
|
||||
builder.git_ignore(false);
|
||||
builder.git_global(false);
|
||||
builder.git_exclude(false);
|
||||
builder.add_custom_ignore_filename(&custom_ignore);
|
||||
assert_paths(td.path(), &builder, &["bar", "a", "a/bar"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gitignore() {
|
||||
let td = TempDir::new("walk-test-").unwrap();
|
||||
|
||||
@@ -0,0 +1,216 @@
|
||||
# Based on https://github.com/behnam/gitignore-test/blob/master/.gitignore
|
||||
|
||||
### file in root
|
||||
|
||||
# MATCH /file_root_1
|
||||
file_root_00
|
||||
|
||||
# NO_MATCH
|
||||
file_root_01/
|
||||
|
||||
# NO_MATCH
|
||||
file_root_02/*
|
||||
|
||||
# NO_MATCH
|
||||
file_root_03/**
|
||||
|
||||
|
||||
# MATCH /file_root_10
|
||||
/file_root_10
|
||||
|
||||
# NO_MATCH
|
||||
/file_root_11/
|
||||
|
||||
# NO_MATCH
|
||||
/file_root_12/*
|
||||
|
||||
# NO_MATCH
|
||||
/file_root_13/**
|
||||
|
||||
|
||||
# NO_MATCH
|
||||
*/file_root_20
|
||||
|
||||
# NO_MATCH
|
||||
*/file_root_21/
|
||||
|
||||
# NO_MATCH
|
||||
*/file_root_22/*
|
||||
|
||||
# NO_MATCH
|
||||
*/file_root_23/**
|
||||
|
||||
|
||||
# MATCH /file_root_30
|
||||
**/file_root_30
|
||||
|
||||
# NO_MATCH
|
||||
**/file_root_31/
|
||||
|
||||
# NO_MATCH
|
||||
**/file_root_32/*
|
||||
|
||||
# NO_MATCH
|
||||
**/file_root_33/**
|
||||
|
||||
|
||||
### file in sub-dir
|
||||
|
||||
# MATCH /parent_dir/file_deep_1
|
||||
file_deep_00
|
||||
|
||||
# NO_MATCH
|
||||
file_deep_01/
|
||||
|
||||
# NO_MATCH
|
||||
file_deep_02/*
|
||||
|
||||
# NO_MATCH
|
||||
file_deep_03/**
|
||||
|
||||
|
||||
# NO_MATCH
|
||||
/file_deep_10
|
||||
|
||||
# NO_MATCH
|
||||
/file_deep_11/
|
||||
|
||||
# NO_MATCH
|
||||
/file_deep_12/*
|
||||
|
||||
# NO_MATCH
|
||||
/file_deep_13/**
|
||||
|
||||
|
||||
# MATCH /parent_dir/file_deep_20
|
||||
*/file_deep_20
|
||||
|
||||
# NO_MATCH
|
||||
*/file_deep_21/
|
||||
|
||||
# NO_MATCH
|
||||
*/file_deep_22/*
|
||||
|
||||
# NO_MATCH
|
||||
*/file_deep_23/**
|
||||
|
||||
|
||||
# MATCH /parent_dir/file_deep_30
|
||||
**/file_deep_30
|
||||
|
||||
# NO_MATCH
|
||||
**/file_deep_31/
|
||||
|
||||
# NO_MATCH
|
||||
**/file_deep_32/*
|
||||
|
||||
# NO_MATCH
|
||||
**/file_deep_33/**
|
||||
|
||||
|
||||
### dir in root
|
||||
|
||||
# MATCH /dir_root_00
|
||||
dir_root_00
|
||||
|
||||
# MATCH /dir_root_01
|
||||
dir_root_01/
|
||||
|
||||
# MATCH /dir_root_02
|
||||
dir_root_02/*
|
||||
|
||||
# MATCH /dir_root_03
|
||||
dir_root_03/**
|
||||
|
||||
|
||||
# MATCH /dir_root_10
|
||||
/dir_root_10
|
||||
|
||||
# MATCH /dir_root_11
|
||||
/dir_root_11/
|
||||
|
||||
# MATCH /dir_root_12
|
||||
/dir_root_12/*
|
||||
|
||||
# MATCH /dir_root_13
|
||||
/dir_root_13/**
|
||||
|
||||
|
||||
# NO_MATCH
|
||||
*/dir_root_20
|
||||
|
||||
# NO_MATCH
|
||||
*/dir_root_21/
|
||||
|
||||
# NO_MATCH
|
||||
*/dir_root_22/*
|
||||
|
||||
# NO_MATCH
|
||||
*/dir_root_23/**
|
||||
|
||||
|
||||
# MATCH /dir_root_30
|
||||
**/dir_root_30
|
||||
|
||||
# MATCH /dir_root_31
|
||||
**/dir_root_31/
|
||||
|
||||
# MATCH /dir_root_32
|
||||
**/dir_root_32/*
|
||||
|
||||
# MATCH /dir_root_33
|
||||
**/dir_root_33/**
|
||||
|
||||
|
||||
### dir in sub-dir
|
||||
|
||||
# MATCH /parent_dir/dir_deep_00
|
||||
dir_deep_00
|
||||
|
||||
# MATCH /parent_dir/dir_deep_01
|
||||
dir_deep_01/
|
||||
|
||||
# NO_MATCH
|
||||
dir_deep_02/*
|
||||
|
||||
# NO_MATCH
|
||||
dir_deep_03/**
|
||||
|
||||
|
||||
# NO_MATCH
|
||||
/dir_deep_10
|
||||
|
||||
# NO_MATCH
|
||||
/dir_deep_11/
|
||||
|
||||
# NO_MATCH
|
||||
/dir_deep_12/*
|
||||
|
||||
# NO_MATCH
|
||||
/dir_deep_13/**
|
||||
|
||||
|
||||
# MATCH /parent_dir/dir_deep_20
|
||||
*/dir_deep_20
|
||||
|
||||
# MATCH /parent_dir/dir_deep_21
|
||||
*/dir_deep_21/
|
||||
|
||||
# MATCH /parent_dir/dir_deep_22
|
||||
*/dir_deep_22/*
|
||||
|
||||
# MATCH /parent_dir/dir_deep_23
|
||||
*/dir_deep_23/**
|
||||
|
||||
|
||||
# MATCH /parent_dir/dir_deep_30
|
||||
**/dir_deep_30
|
||||
|
||||
# MATCH /parent_dir/dir_deep_31
|
||||
**/dir_deep_31/
|
||||
|
||||
# MATCH /parent_dir/dir_deep_32
|
||||
**/dir_deep_32/*
|
||||
|
||||
# MATCH /parent_dir/dir_deep_33
|
||||
**/dir_deep_33/**
|
||||
297
ignore/tests/gitignore_matched_path_or_any_parents_tests.rs
Normal file
297
ignore/tests/gitignore_matched_path_or_any_parents_tests.rs
Normal file
@@ -0,0 +1,297 @@
|
||||
extern crate ignore;
|
||||
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
use ignore::gitignore::{Gitignore, GitignoreBuilder};
|
||||
|
||||
|
||||
const IGNORE_FILE: &'static str = "tests/gitignore_matched_path_or_any_parents_tests.gitignore";
|
||||
|
||||
|
||||
fn get_gitignore() -> Gitignore {
|
||||
let mut builder = GitignoreBuilder::new("ROOT");
|
||||
let error = builder.add(IGNORE_FILE);
|
||||
assert!(error.is_none(), "failed to open gitignore file");
|
||||
builder.build().unwrap()
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "path is expect to be under the root")]
|
||||
fn test_path_should_be_under_root() {
|
||||
let gitignore = get_gitignore();
|
||||
let path = "/tmp/some_file";
|
||||
gitignore.matched_path_or_any_parents(Path::new(path), false);
|
||||
assert!(false);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_files_in_root() {
|
||||
let gitignore = get_gitignore();
|
||||
let m = |path: &str| gitignore.matched_path_or_any_parents(Path::new(path), false);
|
||||
|
||||
// 0x
|
||||
assert!(m("ROOT/file_root_00").is_ignore());
|
||||
assert!(m("ROOT/file_root_01").is_none());
|
||||
assert!(m("ROOT/file_root_02").is_none());
|
||||
assert!(m("ROOT/file_root_03").is_none());
|
||||
|
||||
// 1x
|
||||
assert!(m("ROOT/file_root_10").is_ignore());
|
||||
assert!(m("ROOT/file_root_11").is_none());
|
||||
assert!(m("ROOT/file_root_12").is_none());
|
||||
assert!(m("ROOT/file_root_13").is_none());
|
||||
|
||||
// 2x
|
||||
assert!(m("ROOT/file_root_20").is_none());
|
||||
assert!(m("ROOT/file_root_21").is_none());
|
||||
assert!(m("ROOT/file_root_22").is_none());
|
||||
assert!(m("ROOT/file_root_23").is_none());
|
||||
|
||||
// 3x
|
||||
assert!(m("ROOT/file_root_30").is_ignore());
|
||||
assert!(m("ROOT/file_root_31").is_none());
|
||||
assert!(m("ROOT/file_root_32").is_none());
|
||||
assert!(m("ROOT/file_root_33").is_none());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_files_in_deep() {
|
||||
let gitignore = get_gitignore();
|
||||
let m = |path: &str| gitignore.matched_path_or_any_parents(Path::new(path), false);
|
||||
|
||||
// 0x
|
||||
assert!(m("ROOT/parent_dir/file_deep_00").is_ignore());
|
||||
assert!(m("ROOT/parent_dir/file_deep_01").is_none());
|
||||
assert!(m("ROOT/parent_dir/file_deep_02").is_none());
|
||||
assert!(m("ROOT/parent_dir/file_deep_03").is_none());
|
||||
|
||||
// 1x
|
||||
assert!(m("ROOT/parent_dir/file_deep_10").is_none());
|
||||
assert!(m("ROOT/parent_dir/file_deep_11").is_none());
|
||||
assert!(m("ROOT/parent_dir/file_deep_12").is_none());
|
||||
assert!(m("ROOT/parent_dir/file_deep_13").is_none());
|
||||
|
||||
// 2x
|
||||
assert!(m("ROOT/parent_dir/file_deep_20").is_ignore());
|
||||
assert!(m("ROOT/parent_dir/file_deep_21").is_none());
|
||||
assert!(m("ROOT/parent_dir/file_deep_22").is_none());
|
||||
assert!(m("ROOT/parent_dir/file_deep_23").is_none());
|
||||
|
||||
// 3x
|
||||
assert!(m("ROOT/parent_dir/file_deep_30").is_ignore());
|
||||
assert!(m("ROOT/parent_dir/file_deep_31").is_none());
|
||||
assert!(m("ROOT/parent_dir/file_deep_32").is_none());
|
||||
assert!(m("ROOT/parent_dir/file_deep_33").is_none());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_dirs_in_root() {
|
||||
let gitignore = get_gitignore();
|
||||
let m =
|
||||
|path: &str, is_dir: bool| gitignore.matched_path_or_any_parents(Path::new(path), is_dir);
|
||||
|
||||
// 00
|
||||
assert!(m("ROOT/dir_root_00", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_00/file", false).is_ignore());
|
||||
assert!(m("ROOT/dir_root_00/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_00/child_dir/file", false).is_ignore());
|
||||
|
||||
// 01
|
||||
assert!(m("ROOT/dir_root_01", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_01/file", false).is_ignore());
|
||||
assert!(m("ROOT/dir_root_01/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_01/child_dir/file", false).is_ignore());
|
||||
|
||||
// 02
|
||||
assert!(m("ROOT/dir_root_02", true).is_none()); // dir itself doesn't match
|
||||
assert!(m("ROOT/dir_root_02/file", false).is_ignore());
|
||||
assert!(m("ROOT/dir_root_02/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_02/child_dir/file", false).is_ignore());
|
||||
|
||||
// 03
|
||||
assert!(m("ROOT/dir_root_03", true).is_none()); // dir itself doesn't match
|
||||
assert!(m("ROOT/dir_root_03/file", false).is_ignore());
|
||||
assert!(m("ROOT/dir_root_03/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_03/child_dir/file", false).is_ignore());
|
||||
|
||||
// 10
|
||||
assert!(m("ROOT/dir_root_10", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_10/file", false).is_ignore());
|
||||
assert!(m("ROOT/dir_root_10/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_10/child_dir/file", false).is_ignore());
|
||||
|
||||
// 11
|
||||
assert!(m("ROOT/dir_root_11", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_11/file", false).is_ignore());
|
||||
assert!(m("ROOT/dir_root_11/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_11/child_dir/file", false).is_ignore());
|
||||
|
||||
// 12
|
||||
assert!(m("ROOT/dir_root_12", true).is_none()); // dir itself doesn't match
|
||||
assert!(m("ROOT/dir_root_12/file", false).is_ignore());
|
||||
assert!(m("ROOT/dir_root_12/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_12/child_dir/file", false).is_ignore());
|
||||
|
||||
// 13
|
||||
assert!(m("ROOT/dir_root_13", true).is_none());
|
||||
assert!(m("ROOT/dir_root_13/file", false).is_ignore());
|
||||
assert!(m("ROOT/dir_root_13/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_13/child_dir/file", false).is_ignore());
|
||||
|
||||
// 20
|
||||
assert!(m("ROOT/dir_root_20", true).is_none());
|
||||
assert!(m("ROOT/dir_root_20/file", false).is_none());
|
||||
assert!(m("ROOT/dir_root_20/child_dir", true).is_none());
|
||||
assert!(m("ROOT/dir_root_20/child_dir/file", false).is_none());
|
||||
|
||||
// 21
|
||||
assert!(m("ROOT/dir_root_21", true).is_none());
|
||||
assert!(m("ROOT/dir_root_21/file", false).is_none());
|
||||
assert!(m("ROOT/dir_root_21/child_dir", true).is_none());
|
||||
assert!(m("ROOT/dir_root_21/child_dir/file", false).is_none());
|
||||
|
||||
// 22
|
||||
assert!(m("ROOT/dir_root_22", true).is_none());
|
||||
assert!(m("ROOT/dir_root_22/file", false).is_none());
|
||||
assert!(m("ROOT/dir_root_22/child_dir", true).is_none());
|
||||
assert!(m("ROOT/dir_root_22/child_dir/file", false).is_none());
|
||||
|
||||
// 23
|
||||
assert!(m("ROOT/dir_root_23", true).is_none());
|
||||
assert!(m("ROOT/dir_root_23/file", false).is_none());
|
||||
assert!(m("ROOT/dir_root_23/child_dir", true).is_none());
|
||||
assert!(m("ROOT/dir_root_23/child_dir/file", false).is_none());
|
||||
|
||||
// 30
|
||||
assert!(m("ROOT/dir_root_30", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_30/file", false).is_ignore());
|
||||
assert!(m("ROOT/dir_root_30/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_30/child_dir/file", false).is_ignore());
|
||||
|
||||
// 31
|
||||
assert!(m("ROOT/dir_root_31", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_31/file", false).is_ignore());
|
||||
assert!(m("ROOT/dir_root_31/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_31/child_dir/file", false).is_ignore());
|
||||
|
||||
// 32
|
||||
assert!(m("ROOT/dir_root_32", true).is_none()); // dir itself doesn't match
|
||||
assert!(m("ROOT/dir_root_32/file", false).is_ignore());
|
||||
assert!(m("ROOT/dir_root_32/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_32/child_dir/file", false).is_ignore());
|
||||
|
||||
// 33
|
||||
assert!(m("ROOT/dir_root_33", true).is_none()); // dir itself doesn't match
|
||||
assert!(m("ROOT/dir_root_33/file", false).is_ignore());
|
||||
assert!(m("ROOT/dir_root_33/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_33/child_dir/file", false).is_ignore());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_dirs_in_deep() {
|
||||
let gitignore = get_gitignore();
|
||||
let m =
|
||||
|path: &str, is_dir: bool| gitignore.matched_path_or_any_parents(Path::new(path), is_dir);
|
||||
|
||||
// 00
|
||||
assert!(m("ROOT/parent_dir/dir_deep_00", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_00/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_00/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_00/child_dir/file", false).is_ignore());
|
||||
|
||||
// 01
|
||||
assert!(m("ROOT/parent_dir/dir_deep_01", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_01/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_01/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_01/child_dir/file", false).is_ignore());
|
||||
|
||||
// 02
|
||||
assert!(m("ROOT/parent_dir/dir_deep_02", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_02/file", false).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_02/child_dir", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_02/child_dir/file", false).is_none());
|
||||
|
||||
// 03
|
||||
assert!(m("ROOT/parent_dir/dir_deep_03", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_03/file", false).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_03/child_dir", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_03/child_dir/file", false).is_none());
|
||||
|
||||
// 10
|
||||
assert!(m("ROOT/parent_dir/dir_deep_10", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_10/file", false).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_10/child_dir", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_10/child_dir/file", false).is_none());
|
||||
|
||||
// 11
|
||||
assert!(m("ROOT/parent_dir/dir_deep_11", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_11/file", false).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_11/child_dir", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_11/child_dir/file", false).is_none());
|
||||
|
||||
// 12
|
||||
assert!(m("ROOT/parent_dir/dir_deep_12", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_12/file", false).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_12/child_dir", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_12/child_dir/file", false).is_none());
|
||||
|
||||
// 13
|
||||
assert!(m("ROOT/parent_dir/dir_deep_13", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_13/file", false).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_13/child_dir", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_13/child_dir/file", false).is_none());
|
||||
|
||||
// 20
|
||||
assert!(m("ROOT/parent_dir/dir_deep_20", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_20/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_20/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_20/child_dir/file", false).is_ignore());
|
||||
|
||||
// 21
|
||||
assert!(m("ROOT/parent_dir/dir_deep_21", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_21/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_21/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_21/child_dir/file", false).is_ignore());
|
||||
|
||||
// 22
|
||||
assert!(m("ROOT/parent_dir/dir_deep_22", true).is_none()); // dir itself doesn't match
|
||||
assert!(m("ROOT/parent_dir/dir_deep_22/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_22/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_22/child_dir/file", false).is_ignore());
|
||||
|
||||
// 23
|
||||
assert!(m("ROOT/parent_dir/dir_deep_23", true).is_none()); // dir itself doesn't match
|
||||
assert!(m("ROOT/parent_dir/dir_deep_23/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_23/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_23/child_dir/file", false).is_ignore());
|
||||
|
||||
// 30
|
||||
assert!(m("ROOT/parent_dir/dir_deep_30", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_30/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_30/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_30/child_dir/file", false).is_ignore());
|
||||
|
||||
// 31
|
||||
assert!(m("ROOT/parent_dir/dir_deep_31", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_31/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_31/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_31/child_dir/file", false).is_ignore());
|
||||
|
||||
// 32
|
||||
assert!(m("ROOT/parent_dir/dir_deep_32", true).is_none()); // dir itself doesn't match
|
||||
assert!(m("ROOT/parent_dir/dir_deep_32/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_32/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_32/child_dir/file", false).is_ignore());
|
||||
|
||||
// 33
|
||||
assert!(m("ROOT/parent_dir/dir_deep_33", true).is_none()); // dir itself doesn't match
|
||||
assert!(m("ROOT/parent_dir/dir_deep_33/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_33/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_33/child_dir/file", false).is_ignore());
|
||||
}
|
||||
4
pkg/archlinux/.gitignore
vendored
4
pkg/archlinux/.gitignore
vendored
@@ -1,4 +0,0 @@
|
||||
*.xz
|
||||
src
|
||||
pkg
|
||||
*.gz
|
||||
@@ -1,37 +0,0 @@
|
||||
# Contributor: Andrew Gallant <jamslam@gmail.com>
|
||||
# Maintainer: Andrew Gallant
|
||||
pkgname=ripgrep
|
||||
pkgver=0.2.3
|
||||
pkgrel=1
|
||||
pkgdesc="A search tool that combines the usability of The Silver Searcher with the raw speed of grep."
|
||||
arch=('i686' 'x86_64')
|
||||
url="https://github.com/BurntSushi/ripgrep"
|
||||
license=('UNLICENSE')
|
||||
makedepends=('cargo')
|
||||
source=("https://github.com/BurntSushi/$pkgname/archive/$pkgver.tar.gz")
|
||||
sha256sums=('a88531558d2023df76190ea2e52bee50d739eabece8a57df29abbad0c6bdb917')
|
||||
|
||||
build() {
|
||||
cd "$pkgname-$pkgver"
|
||||
if command -v rustup > /dev/null 2>&1; then
|
||||
RUSTFLAGS="-C target-cpu=native" rustup run nightly \
|
||||
cargo build --release --features simd-accel
|
||||
elif rustc --version | grep -q nightly; then
|
||||
RUSTFLAGS="-C target-cpu=native" \
|
||||
cargo build --release --features simd-accel
|
||||
else
|
||||
cargo build --release
|
||||
fi
|
||||
}
|
||||
|
||||
package() {
|
||||
cd "$pkgname-$pkgver"
|
||||
|
||||
install -Dm755 "target/release/rg" "$pkgdir/usr/bin/rg"
|
||||
install -Dm644 "doc/rg.1" "$pkgdir/usr/share/man/man1/rg.1"
|
||||
install -Dm644 "README.md" "$pkgdir/usr/share/doc/ripgrep/README.md"
|
||||
install -Dm644 "COPYING" "$pkgdir/usr/share/doc/ripgrep/COPYING"
|
||||
install -Dm644 "LICENSE-MIT" "$pkgdir/usr/share/doc/ripgrep/LICENSE-MIT"
|
||||
install -Dm644 "UNLICENSE" "$pkgdir/usr/share/doc/ripgrep/UNLICENSE"
|
||||
install -Dm644 "CHANGELOG.md" "$pkgdir/usr/share/doc/ripgrep/CHANGELOG.md"
|
||||
}
|
||||
@@ -1,17 +1,23 @@
|
||||
class RipgrepBin < Formula
|
||||
version '0.5.0'
|
||||
desc "Search tool like grep and The Silver Searcher."
|
||||
version '0.8.1'
|
||||
desc "Recursively search directories for a regex pattern."
|
||||
homepage "https://github.com/BurntSushi/ripgrep"
|
||||
|
||||
if OS.mac?
|
||||
url "https://github.com/BurntSushi/ripgrep/releases/download/#{version}/ripgrep-#{version}-x86_64-apple-darwin.tar.gz"
|
||||
sha256 "efdc64456fc64910c2998b25c402aac2ff17caf1507e8bcca4584b429ef7f7ae"
|
||||
sha256 "71f8d2907b473e5fc30159b822b0f1de247634ee292d5cc3fa1bb80222e0f613"
|
||||
elsif OS.linux?
|
||||
url "https://github.com/BurntSushi/ripgrep/releases/download/#{version}/ripgrep-#{version}-x86_64-unknown-linux-musl.tar.gz"
|
||||
sha256 "08b1aa1440a23a88c94cff41a860340ecf38e9108817aff30ff778c00c63eb76"
|
||||
end
|
||||
|
||||
conflicts_with "ripgrep"
|
||||
|
||||
def install
|
||||
bin.install "rg"
|
||||
man1.install "rg.1"
|
||||
man1.install "doc/rg.1"
|
||||
|
||||
bash_completion.install "complete/rg.bash-completion"
|
||||
bash_completion.install "complete/rg.bash"
|
||||
fish_completion.install "complete/rg.fish"
|
||||
zsh_completion.install "complete/_rg"
|
||||
end
|
||||
|
||||
14
snapcraft.yaml
Normal file
14
snapcraft.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
name: ripgrep # you probably want to 'snapcraft register <name>'
|
||||
version: '0.8.1' # just for humans, typically '1.2+git' or '1.3.2'
|
||||
summary: Fast file searcher # 79 char long summary
|
||||
description: |
|
||||
ripgrep combines the usability of The Silver Searcher with the raw speed of grep.
|
||||
grade: stable # must be 'stable' to release into candidate/stable channels
|
||||
confinement: classic # use 'strict' once you have the right plugs and slots
|
||||
parts:
|
||||
ripgrep:
|
||||
plugin: rust
|
||||
source: .
|
||||
apps:
|
||||
rg:
|
||||
command: env PATH=$SNAP/bin:$PATH rg
|
||||
2134
src/app.rs
2134
src/app.rs
File diff suppressed because it is too large
Load Diff
501
src/args.rs
501
src/args.rs
@@ -3,15 +3,12 @@ use std::env;
|
||||
use std::ffi::OsStr;
|
||||
use std::fs;
|
||||
use std::io::{self, BufRead};
|
||||
use std::ops;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
use clap;
|
||||
use encoding_rs::Encoding;
|
||||
use env_logger;
|
||||
use grep::{Grep, GrepBuilder};
|
||||
use log;
|
||||
use num_cpus;
|
||||
@@ -28,6 +25,8 @@ use printer::{ColorSpecs, Printer};
|
||||
use unescape::unescape;
|
||||
use worker::{Worker, WorkerBuilder};
|
||||
|
||||
use config;
|
||||
use logger::Logger;
|
||||
use Result;
|
||||
|
||||
/// `Args` are transformed/normalized from `ArgMatches`.
|
||||
@@ -36,12 +35,13 @@ pub struct Args {
|
||||
paths: Vec<PathBuf>,
|
||||
after_context: usize,
|
||||
before_context: usize,
|
||||
color: bool,
|
||||
byte_offset: bool,
|
||||
color_choice: termcolor::ColorChoice,
|
||||
colors: ColorSpecs,
|
||||
column: bool,
|
||||
context_separator: Vec<u8>,
|
||||
count: bool,
|
||||
count_matches: bool,
|
||||
encoding: Option<&'static Encoding>,
|
||||
files_with_matches: bool,
|
||||
files_without_matches: bool,
|
||||
@@ -56,6 +56,7 @@ pub struct Args {
|
||||
invert_match: bool,
|
||||
line_number: bool,
|
||||
line_per_match: bool,
|
||||
line_number_width: Option<usize>,
|
||||
max_columns: Option<usize>,
|
||||
max_count: Option<u64>,
|
||||
max_filesize: Option<u64>,
|
||||
@@ -66,6 +67,7 @@ pub struct Args {
|
||||
no_ignore_vcs: bool,
|
||||
no_messages: bool,
|
||||
null: bool,
|
||||
only_matching: bool,
|
||||
path_separator: Option<u8>,
|
||||
quiet: bool,
|
||||
quiet_matched: QuietMatched,
|
||||
@@ -77,6 +79,8 @@ pub struct Args {
|
||||
type_list: bool,
|
||||
types: Types,
|
||||
with_filename: bool,
|
||||
search_zip_files: bool,
|
||||
stats: bool
|
||||
}
|
||||
|
||||
impl Args {
|
||||
@@ -88,37 +92,59 @@ impl Args {
|
||||
///
|
||||
/// Also, initialize a global logger.
|
||||
pub fn parse() -> Result<Args> {
|
||||
use clap::ErrorKind::*;
|
||||
// We parse the args given on CLI. This does not include args from
|
||||
// the config. We use the CLI args as an initial configuration while
|
||||
// trying to parse config files. If a config file exists and has
|
||||
// arguments, then we re-parse argv, otherwise we just use the matches
|
||||
// we have here.
|
||||
let early_matches = ArgMatches(app::app().get_matches());
|
||||
|
||||
let matches = match app::app_short().get_matches_safe() {
|
||||
Ok(matches) => matches,
|
||||
Err(clap::Error { kind: HelpDisplayed, .. }) => {
|
||||
let _ = ::app::app_long().print_help();
|
||||
println!("");
|
||||
process::exit(0);
|
||||
}
|
||||
Err(err) => err.exit(),
|
||||
};
|
||||
if matches.is_present("help-short") {
|
||||
let _ = ::app::app_short().print_help();
|
||||
println!("");
|
||||
process::exit(0);
|
||||
}
|
||||
if matches.is_present("ripgrep-version") {
|
||||
println!("ripgrep {}", crate_version!());
|
||||
process::exit(0);
|
||||
}
|
||||
|
||||
let mut logb = env_logger::LogBuilder::new();
|
||||
if matches.is_present("debug") {
|
||||
logb.filter(None, log::LogLevelFilter::Debug);
|
||||
} else {
|
||||
logb.filter(None, log::LogLevelFilter::Warn);
|
||||
}
|
||||
if let Err(err) = logb.init() {
|
||||
if let Err(err) = Logger::init() {
|
||||
errored!("failed to initialize logger: {}", err);
|
||||
}
|
||||
ArgMatches(matches).to_args()
|
||||
if early_matches.is_present("debug") {
|
||||
log::set_max_level(log::LevelFilter::Debug);
|
||||
} else {
|
||||
log::set_max_level(log::LevelFilter::Warn);
|
||||
}
|
||||
|
||||
let matches = Args::matches(early_matches);
|
||||
// The logging level may have changed if we brought in additional
|
||||
// arguments from a configuration file, so recheck it and set the log
|
||||
// level as appropriate.
|
||||
if matches.is_present("debug") {
|
||||
log::set_max_level(log::LevelFilter::Debug);
|
||||
} else {
|
||||
log::set_max_level(log::LevelFilter::Warn);
|
||||
}
|
||||
matches.to_args()
|
||||
}
|
||||
|
||||
/// Run clap and return the matches. If clap determines a problem with the
|
||||
/// user provided arguments (or if --help or --version are given), then an
|
||||
/// error/usage/version will be printed and the process will exit.
|
||||
///
|
||||
/// If there are no additional arguments from the environment (e.g., a
|
||||
/// config file), then the given matches are returned as is.
|
||||
fn matches(early_matches: ArgMatches<'static>) -> ArgMatches<'static> {
|
||||
// If the end user says no config, then respect it.
|
||||
if early_matches.is_present("no-config") {
|
||||
debug!("not reading config files because --no-config is present");
|
||||
return early_matches;
|
||||
}
|
||||
// If the user wants ripgrep to use a config file, then parse args
|
||||
// from that first.
|
||||
let mut args = config::args(early_matches.is_present("no-messages"));
|
||||
if args.is_empty() {
|
||||
return early_matches;
|
||||
}
|
||||
let mut cliargs = env::args_os();
|
||||
if let Some(bin) = cliargs.next() {
|
||||
args.insert(0, bin);
|
||||
}
|
||||
args.extend(cliargs);
|
||||
debug!("final argv: {:?}", args);
|
||||
ArgMatches(app::app().get_matches_from(args))
|
||||
}
|
||||
|
||||
/// Returns true if ripgrep should print the files it will search and exit
|
||||
@@ -161,9 +187,11 @@ impl Args {
|
||||
.heading(self.heading)
|
||||
.line_per_match(self.line_per_match)
|
||||
.null(self.null)
|
||||
.only_matching(self.only_matching)
|
||||
.path_separator(self.path_separator)
|
||||
.with_filename(self.with_filename)
|
||||
.max_columns(self.max_columns);
|
||||
.max_columns(self.max_columns)
|
||||
.line_number_width(self.line_number_width);
|
||||
if let Some(ref rep) = self.replace {
|
||||
p = p.replace(rep.clone());
|
||||
}
|
||||
@@ -172,14 +200,17 @@ impl Args {
|
||||
|
||||
/// Retrieve the configured file separator.
|
||||
pub fn file_separator(&self) -> Option<Vec<u8>> {
|
||||
let use_heading_sep =
|
||||
self.heading
|
||||
&& !self.count
|
||||
&& !self.files_with_matches
|
||||
&& !self.files_without_matches;
|
||||
let contextless =
|
||||
self.count
|
||||
|| self.count_matches
|
||||
|| self.files_with_matches
|
||||
|| self.files_without_matches;
|
||||
let use_heading_sep = self.heading && !contextless;
|
||||
|
||||
if use_heading_sep {
|
||||
Some(b"".to_vec())
|
||||
} else if self.before_context > 0 || self.after_context > 0 {
|
||||
} else if !contextless
|
||||
&& (self.before_context > 0 || self.after_context > 0) {
|
||||
Some(self.context_separator.clone())
|
||||
} else {
|
||||
None
|
||||
@@ -191,6 +222,12 @@ impl Args {
|
||||
self.max_count == Some(0)
|
||||
}
|
||||
|
||||
|
||||
/// Returns whether ripgrep should track stats for this run
|
||||
pub fn stats(&self) -> bool {
|
||||
self.stats
|
||||
}
|
||||
|
||||
/// Create a new writer for single-threaded searching with color support.
|
||||
pub fn stdout(&self) -> termcolor::StandardStream {
|
||||
termcolor::StandardStream::stdout(self.color_choice)
|
||||
@@ -223,7 +260,7 @@ impl Args {
|
||||
/// Returns true if there is exactly one file path given to search.
|
||||
pub fn is_one_path(&self) -> bool {
|
||||
self.paths.len() == 1
|
||||
&& (self.paths[0] == Path::new("-") || self.paths[0].is_file())
|
||||
&& (self.paths[0] == Path::new("-") || path_is_file(&self.paths[0]))
|
||||
}
|
||||
|
||||
/// Create a worker whose configuration is taken from the
|
||||
@@ -232,7 +269,9 @@ impl Args {
|
||||
WorkerBuilder::new(self.grep())
|
||||
.after_context(self.after_context)
|
||||
.before_context(self.before_context)
|
||||
.byte_offset(self.byte_offset)
|
||||
.count(self.count)
|
||||
.count_matches(self.count_matches)
|
||||
.encoding(self.encoding)
|
||||
.files_with_matches(self.files_with_matches)
|
||||
.files_without_matches(self.files_without_matches)
|
||||
@@ -244,6 +283,7 @@ impl Args {
|
||||
.no_messages(self.no_messages)
|
||||
.quiet(self.quiet)
|
||||
.text(self.text)
|
||||
.search_zip_files(self.search_zip_files)
|
||||
.build()
|
||||
}
|
||||
|
||||
@@ -303,10 +343,13 @@ impl Args {
|
||||
wd.git_ignore(!self.no_ignore && !self.no_ignore_vcs);
|
||||
wd.git_exclude(!self.no_ignore && !self.no_ignore_vcs);
|
||||
wd.ignore(!self.no_ignore);
|
||||
if !self.no_ignore {
|
||||
wd.add_custom_ignore_filename(".rgignore");
|
||||
}
|
||||
wd.parents(!self.no_ignore_parent);
|
||||
wd.threads(self.threads());
|
||||
if self.sort_files {
|
||||
wd.sort_by(|a, b| a.cmp(b));
|
||||
wd.sort_by_file_name(|a, b| a.cmp(b));
|
||||
}
|
||||
wd
|
||||
}
|
||||
@@ -316,66 +359,67 @@ impl Args {
|
||||
/// several options/flags.
|
||||
struct ArgMatches<'a>(clap::ArgMatches<'a>);
|
||||
|
||||
impl<'a> ops::Deref for ArgMatches<'a> {
|
||||
type Target = clap::ArgMatches<'a>;
|
||||
fn deref(&self) -> &clap::ArgMatches<'a> { &self.0 }
|
||||
}
|
||||
|
||||
impl<'a> ArgMatches<'a> {
|
||||
/// Convert the result of parsing CLI arguments into ripgrep's
|
||||
/// configuration.
|
||||
fn to_args(&self) -> Result<Args> {
|
||||
let paths = self.paths();
|
||||
let line_number = self.line_number(&paths);
|
||||
let mmap = try!(self.mmap(&paths));
|
||||
let mmap = self.mmap(&paths)?;
|
||||
let with_filename = self.with_filename(&paths);
|
||||
let (before_context, after_context) = try!(self.contexts());
|
||||
let (before_context, after_context) = self.contexts()?;
|
||||
let (count, count_matches) = self.counts();
|
||||
let quiet = self.is_present("quiet");
|
||||
let args = Args {
|
||||
paths: paths,
|
||||
after_context: after_context,
|
||||
before_context: before_context,
|
||||
color: self.color(),
|
||||
byte_offset: self.is_present("byte-offset"),
|
||||
color_choice: self.color_choice(),
|
||||
colors: try!(self.color_specs()),
|
||||
colors: self.color_specs()?,
|
||||
column: self.column(),
|
||||
context_separator: self.context_separator(),
|
||||
count: self.is_present("count"),
|
||||
encoding: try!(self.encoding()),
|
||||
count: count,
|
||||
count_matches: count_matches,
|
||||
encoding: self.encoding()?,
|
||||
files_with_matches: self.is_present("files-with-matches"),
|
||||
files_without_matches: self.is_present("files-without-match"),
|
||||
eol: b'\n',
|
||||
files: self.is_present("files"),
|
||||
follow: self.is_present("follow"),
|
||||
glob_overrides: try!(self.overrides()),
|
||||
grep: try!(self.grep()),
|
||||
glob_overrides: self.overrides()?,
|
||||
grep: self.grep()?,
|
||||
heading: self.heading(),
|
||||
hidden: self.hidden(),
|
||||
ignore_files: self.ignore_files(),
|
||||
invert_match: self.is_present("invert-match"),
|
||||
line_number: line_number,
|
||||
line_number_width: try!(self.usize_of("line-number-width")),
|
||||
line_per_match: self.is_present("vimgrep"),
|
||||
max_columns: try!(self.usize_of("max-columns")),
|
||||
max_count: try!(self.usize_of("max-count")).map(|max| max as u64),
|
||||
max_filesize: try!(self.max_filesize()),
|
||||
maxdepth: try!(self.usize_of("maxdepth")),
|
||||
max_columns: self.usize_of_nonzero("max-columns")?,
|
||||
max_count: self.usize_of("max-count")?.map(|n| n as u64),
|
||||
max_filesize: self.max_filesize()?,
|
||||
maxdepth: self.usize_of("maxdepth")?,
|
||||
mmap: mmap,
|
||||
no_ignore: self.no_ignore(),
|
||||
no_ignore_parent: self.no_ignore_parent(),
|
||||
no_ignore_vcs: self.no_ignore_vcs(),
|
||||
no_messages: self.is_present("no-messages"),
|
||||
null: self.is_present("null"),
|
||||
path_separator: try!(self.path_separator()),
|
||||
only_matching: self.is_present("only-matching"),
|
||||
path_separator: self.path_separator()?,
|
||||
quiet: quiet,
|
||||
quiet_matched: QuietMatched::new(quiet),
|
||||
replace: self.replace(),
|
||||
sort_files: self.is_present("sort-files"),
|
||||
stdout_handle: self.stdout_handle(),
|
||||
text: self.text(),
|
||||
threads: try!(self.threads()),
|
||||
threads: self.threads()?,
|
||||
type_list: self.is_present("type-list"),
|
||||
types: try!(self.types()),
|
||||
types: self.types()?,
|
||||
with_filename: with_filename,
|
||||
search_zip_files: self.is_present("search-zip"),
|
||||
stats: self.stats()
|
||||
};
|
||||
if args.mmap {
|
||||
debug!("will try to use memory maps");
|
||||
@@ -438,7 +482,7 @@ impl<'a> ArgMatches<'a> {
|
||||
/// If any part of the pattern isn't valid UTF-8, then an error is
|
||||
/// returned.
|
||||
fn pattern(&self) -> Result<String> {
|
||||
Ok(try!(self.patterns()).join("|"))
|
||||
Ok(self.patterns()?.join("|"))
|
||||
}
|
||||
|
||||
/// Get a sequence of all available patterns from the command line.
|
||||
@@ -446,7 +490,10 @@ impl<'a> ArgMatches<'a> {
|
||||
///
|
||||
/// Note that if -F/--fixed-strings is set, then all patterns will be
|
||||
/// escaped. Similarly, if -w/--word-regexp is set, then all patterns
|
||||
/// are surrounded by `\b`.
|
||||
/// are surrounded by `\b`, and if -x/--line-regexp is set, then all
|
||||
/// patterns are surrounded by `^...$`. Finally, if --passthru is set,
|
||||
/// the pattern `^` is added to the end (to ensure that it works as
|
||||
/// expected with multiple -e/-f patterns).
|
||||
///
|
||||
/// If any pattern is invalid UTF-8, then an error is returned.
|
||||
fn patterns(&self) -> Result<Vec<String>> {
|
||||
@@ -458,13 +505,13 @@ impl<'a> ArgMatches<'a> {
|
||||
None => {
|
||||
if self.values_of_os("file").is_none() {
|
||||
if let Some(os_pat) = self.value_of_os("pattern") {
|
||||
pats.push(try!(self.os_str_pattern(os_pat)));
|
||||
pats.push(self.os_str_pattern(os_pat)?);
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(os_pats) => {
|
||||
for os_pat in os_pats {
|
||||
pats.push(try!(self.os_str_pattern(os_pat)));
|
||||
pats.push(self.os_str_pattern(os_pat)?);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -473,35 +520,41 @@ impl<'a> ArgMatches<'a> {
|
||||
if file == "-" {
|
||||
let stdin = io::stdin();
|
||||
for line in stdin.lock().lines() {
|
||||
pats.push(self.str_pattern(&try!(line)));
|
||||
pats.push(self.str_pattern(&line?));
|
||||
}
|
||||
} else {
|
||||
let f = try!(fs::File::open(file));
|
||||
let f = fs::File::open(file)?;
|
||||
for line in io::BufReader::new(f).lines() {
|
||||
pats.push(self.str_pattern(&try!(line)));
|
||||
pats.push(self.str_pattern(&line?));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if pats.is_empty() {
|
||||
// It's important that this be at the end; otherwise it would always
|
||||
// match first, and we wouldn't get colours in the output
|
||||
if self.is_present("passthru") && !self.is_present("count") {
|
||||
pats.push("^".to_string())
|
||||
} else if pats.is_empty() {
|
||||
pats.push(self.empty_pattern())
|
||||
}
|
||||
Ok(pats)
|
||||
}
|
||||
|
||||
/// Converts an OsStr pattern to a String pattern, including word
|
||||
/// Converts an OsStr pattern to a String pattern, including line/word
|
||||
/// boundaries or escapes if applicable.
|
||||
///
|
||||
/// If the pattern is not valid UTF-8, then an error is returned.
|
||||
fn os_str_pattern(&self, pat: &OsStr) -> Result<String> {
|
||||
let s = try!(pattern_to_str(pat));
|
||||
let s = pattern_to_str(pat)?;
|
||||
Ok(self.str_pattern(s))
|
||||
}
|
||||
|
||||
/// Converts a &str pattern to a String pattern, including word
|
||||
/// Converts a &str pattern to a String pattern, including line/word
|
||||
/// boundaries or escapes if applicable.
|
||||
fn str_pattern(&self, pat: &str) -> String {
|
||||
let s = self.word_pattern(self.literal_pattern(pat.to_string()));
|
||||
let litpat = self.literal_pattern(pat.to_string());
|
||||
let s = self.line_pattern(self.word_pattern(litpat));
|
||||
|
||||
if s.is_empty() {
|
||||
self.empty_pattern()
|
||||
} else {
|
||||
@@ -524,7 +577,17 @@ impl<'a> ArgMatches<'a> {
|
||||
/// flag is set. Otherwise, the pattern is returned unchanged.
|
||||
fn word_pattern(&self, pat: String) -> String {
|
||||
if self.is_present("word-regexp") {
|
||||
format!(r"\b{}\b", pat)
|
||||
format!(r"\b(?:{})\b", pat)
|
||||
} else {
|
||||
pat
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the given pattern as a line pattern if the -x/--line-regexp
|
||||
/// flag is set. Otherwise, the pattern is returned unchanged.
|
||||
fn line_pattern(&self, pat: String) -> String {
|
||||
if self.is_present("line-regexp") {
|
||||
format!(r"^(?:{})$", pat)
|
||||
} else {
|
||||
pat
|
||||
}
|
||||
@@ -536,7 +599,7 @@ impl<'a> ArgMatches<'a> {
|
||||
// This would normally just be an empty string, which works on its
|
||||
// own, but if the patterns are joined in a set of alternations, then
|
||||
// you wind up with `foo|`, which is invalid.
|
||||
self.word_pattern("z{0}".to_string())
|
||||
self.word_pattern("(?:z{0})*".to_string())
|
||||
}
|
||||
|
||||
/// Returns true if and only if file names containing each match should
|
||||
@@ -549,8 +612,9 @@ impl<'a> ArgMatches<'a> {
|
||||
false
|
||||
} else {
|
||||
self.is_present("with-filename")
|
||||
|| self.is_present("vimgrep")
|
||||
|| paths.len() > 1
|
||||
|| paths.get(0).map_or(false, |p| p.is_dir())
|
||||
|| paths.get(0).map_or(false, |p| path_is_dir(p))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -581,8 +645,8 @@ impl<'a> ArgMatches<'a> {
|
||||
/// `paths` should be a slice of all top-level file paths that ripgrep
|
||||
/// will need to search.
|
||||
fn mmap(&self, paths: &[PathBuf]) -> Result<bool> {
|
||||
let (before, after) = try!(self.contexts());
|
||||
let enc = try!(self.encoding());
|
||||
let (before, after) = self.contexts()?;
|
||||
let enc = self.encoding()?;
|
||||
Ok(if before > 0 || after > 0 || self.is_present("no-mmap") {
|
||||
false
|
||||
} else if self.is_present("mmap") {
|
||||
@@ -597,7 +661,7 @@ impl<'a> ArgMatches<'a> {
|
||||
} else {
|
||||
// If we're only searching a few paths and all of them are
|
||||
// files, then memory maps are probably faster.
|
||||
paths.len() <= 10 && paths.iter().all(|p| p.is_file())
|
||||
paths.len() <= 10 && paths.iter().all(|p| path_is_file(p))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -606,10 +670,10 @@ impl<'a> ArgMatches<'a> {
|
||||
if self.is_present("no-line-number") || self.is_present("count") {
|
||||
false
|
||||
} else {
|
||||
let only_stdin = paths == &[Path::new("-")];
|
||||
self.is_present("line-number")
|
||||
let only_stdin = paths == [Path::new("-")];
|
||||
(atty::is(atty::Stream::Stdout) && !only_stdin)
|
||||
|| self.is_present("line-number")
|
||||
|| self.is_present("column")
|
||||
|| (atty::is(atty::Stream::Stdout) && !only_stdin)
|
||||
|| self.is_present("pretty")
|
||||
|| self.is_present("vimgrep")
|
||||
}
|
||||
@@ -623,18 +687,18 @@ impl<'a> ArgMatches<'a> {
|
||||
/// Returns true if and only if matches should be grouped with file name
|
||||
/// headings.
|
||||
fn heading(&self) -> bool {
|
||||
if self.is_present("no-heading") {
|
||||
if self.is_present("no-heading") || self.is_present("vimgrep") {
|
||||
false
|
||||
} else {
|
||||
self.is_present("heading")
|
||||
|| atty::is(atty::Stream::Stdout)
|
||||
atty::is(atty::Stream::Stdout)
|
||||
|| self.is_present("heading")
|
||||
|| self.is_present("pretty")
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the replacement string as UTF-8 bytes if it exists.
|
||||
fn replace(&self) -> Option<Vec<u8>> {
|
||||
self.value_of_lossy("replace").map(|s| s.into_owned().into_bytes())
|
||||
self.value_of_lossy("replace").map(|s| s.into_bytes())
|
||||
}
|
||||
|
||||
/// Returns the unescaped context separator in UTF-8 bytes.
|
||||
@@ -671,9 +735,9 @@ impl<'a> ArgMatches<'a> {
|
||||
/// If there was a problem parsing the values from the user as an integer,
|
||||
/// then an error is returned.
|
||||
fn contexts(&self) -> Result<(usize, usize)> {
|
||||
let after = try!(self.usize_of("after-context")).unwrap_or(0);
|
||||
let before = try!(self.usize_of("before-context")).unwrap_or(0);
|
||||
let both = try!(self.usize_of("context")).unwrap_or(0);
|
||||
let after = self.usize_of("after-context")?.unwrap_or(0);
|
||||
let before = self.usize_of("before-context")?.unwrap_or(0);
|
||||
let both = self.usize_of("context")?.unwrap_or(0);
|
||||
Ok(if both > 0 {
|
||||
(both, both)
|
||||
} else {
|
||||
@@ -681,36 +745,39 @@ impl<'a> ArgMatches<'a> {
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns true if and only if ripgrep should color its output.
|
||||
fn color(&self) -> bool {
|
||||
let preference = match self.0.value_of_lossy("color") {
|
||||
None => "auto".to_string(),
|
||||
Some(v) => v.into_owned(),
|
||||
};
|
||||
if preference == "always" {
|
||||
true
|
||||
} else if self.is_present("vimgrep") {
|
||||
false
|
||||
} else if preference == "auto" {
|
||||
atty::is(atty::Stream::Stdout) || self.is_present("pretty")
|
||||
/// Returns whether the -c/--count or the --count-matches flags were
|
||||
/// passed from the command line.
|
||||
///
|
||||
/// If --count-matches and --invert-match were passed in, behave
|
||||
/// as if --count and --invert-match were passed in (i.e. rg will
|
||||
/// count inverted matches as per existing behavior).
|
||||
fn counts(&self) -> (bool, bool) {
|
||||
let count = self.is_present("count");
|
||||
let count_matches = self.is_present("count-matches");
|
||||
let invert_matches = self.is_present("invert-match");
|
||||
let only_matching = self.is_present("only-matching");
|
||||
if count_matches && invert_matches {
|
||||
// Treat `-v --count-matches` as `-v -c`.
|
||||
(true, false)
|
||||
} else if count && only_matching {
|
||||
// Treat `-c --only-matching` as `--count-matches`.
|
||||
(false, true)
|
||||
} else {
|
||||
false
|
||||
(count, count_matches)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the user's color choice based on command line parameters and
|
||||
/// environment.
|
||||
fn color_choice(&self) -> termcolor::ColorChoice {
|
||||
let preference = match self.0.value_of_lossy("color") {
|
||||
let preference = match self.value_of_lossy("color") {
|
||||
None => "auto".to_string(),
|
||||
Some(v) => v.into_owned(),
|
||||
Some(v) => v,
|
||||
};
|
||||
if preference == "always" {
|
||||
termcolor::ColorChoice::Always
|
||||
} else if preference == "ansi" {
|
||||
termcolor::ColorChoice::AlwaysAnsi
|
||||
} else if self.is_present("vimgrep") {
|
||||
termcolor::ColorChoice::Never
|
||||
} else if preference == "auto" {
|
||||
if atty::is(atty::Stream::Stdout) || self.is_present("pretty") {
|
||||
termcolor::ColorChoice::Auto
|
||||
@@ -729,13 +796,16 @@ impl<'a> ArgMatches<'a> {
|
||||
fn color_specs(&self) -> Result<ColorSpecs> {
|
||||
// Start with a default set of color specs.
|
||||
let mut specs = vec![
|
||||
#[cfg(unix)]
|
||||
"path:fg:magenta".parse().unwrap(),
|
||||
#[cfg(windows)]
|
||||
"path:fg:cyan".parse().unwrap(),
|
||||
"line:fg:green".parse().unwrap(),
|
||||
"match:fg:red".parse().unwrap(),
|
||||
"match:style:bold".parse().unwrap(),
|
||||
];
|
||||
for spec_str in self.values_of_lossy_vec("colors") {
|
||||
specs.push(try!(spec_str.parse()));
|
||||
specs.push(spec_str.parse()?);
|
||||
}
|
||||
Ok(ColorSpecs::new(&specs))
|
||||
}
|
||||
@@ -748,13 +818,13 @@ impl<'a> ArgMatches<'a> {
|
||||
/// A `None` encoding implies that the encoding should be automatically
|
||||
/// detected on a per-file basis.
|
||||
fn encoding(&self) -> Result<Option<&'static Encoding>> {
|
||||
match self.0.value_of_lossy("encoding") {
|
||||
match self.value_of_lossy("encoding") {
|
||||
None => Ok(None),
|
||||
Some(label) => {
|
||||
if label == "auto" {
|
||||
return Ok(None);
|
||||
}
|
||||
match Encoding::for_label(label.as_bytes()) {
|
||||
match Encoding::for_label_no_replacement(label.as_bytes()) {
|
||||
Some(enc) => Ok(Some(enc)),
|
||||
None => Err(From::from(
|
||||
format!("unsupported encoding: {}", label))),
|
||||
@@ -763,12 +833,25 @@ impl<'a> ArgMatches<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns whether status should be tracked for this run of ripgrep
|
||||
|
||||
/// This is automatically disabled if we're asked to only list the
|
||||
/// files that wil be searched, files with matches or files
|
||||
/// without matches.
|
||||
fn stats(&self) -> bool {
|
||||
if self.is_present("files-with-matches") ||
|
||||
self.is_present("files-without-match") {
|
||||
return false;
|
||||
}
|
||||
self.is_present("stats")
|
||||
}
|
||||
|
||||
/// Returns the approximate number of threads that ripgrep should use.
|
||||
fn threads(&self) -> Result<usize> {
|
||||
if self.is_present("sort-files") {
|
||||
return Ok(1);
|
||||
}
|
||||
let threads = try!(self.usize_of("threads")).unwrap_or(0);
|
||||
let threads = self.usize_of("threads")?.unwrap_or(0);
|
||||
Ok(if threads == 0 {
|
||||
cmp::min(12, num_cpus::get())
|
||||
} else {
|
||||
@@ -788,19 +871,33 @@ impl<'a> ArgMatches<'a> {
|
||||
let casei =
|
||||
self.is_present("ignore-case")
|
||||
&& !self.is_present("case-sensitive");
|
||||
GrepBuilder::new(&try!(self.pattern()))
|
||||
let mut gb = GrepBuilder::new(&self.pattern()?)
|
||||
.case_smart(smart)
|
||||
.case_insensitive(casei)
|
||||
.line_terminator(b'\n')
|
||||
.build()
|
||||
.map_err(From::from)
|
||||
.line_terminator(b'\n');
|
||||
|
||||
if let Some(limit) = self.dfa_size_limit()? {
|
||||
gb = gb.dfa_size_limit(limit);
|
||||
}
|
||||
if let Some(limit) = self.regex_size_limit()? {
|
||||
gb = gb.size_limit(limit);
|
||||
}
|
||||
Ok(gb.build()?)
|
||||
}
|
||||
|
||||
/// Builds the set of glob overrides from the command line flags.
|
||||
fn overrides(&self) -> Result<Override> {
|
||||
let mut ovr = OverrideBuilder::new(try!(env::current_dir()));
|
||||
let mut ovr = OverrideBuilder::new(env::current_dir()?);
|
||||
for glob in self.values_of_lossy_vec("glob") {
|
||||
try!(ovr.add(&glob));
|
||||
ovr.add(&glob)?;
|
||||
}
|
||||
// this is smelly. In the long run it might make sense
|
||||
// to change overridebuilder to be like globsetbuilder
|
||||
// but this would be a breaking change to the ignore crate
|
||||
// so it is being shelved for now...
|
||||
ovr.case_insensitive(true)?;
|
||||
for glob in self.values_of_lossy_vec("iglob") {
|
||||
ovr.add(&glob)?;
|
||||
}
|
||||
ovr.build().map_err(From::from)
|
||||
}
|
||||
@@ -813,7 +910,7 @@ impl<'a> ArgMatches<'a> {
|
||||
btypes.clear(&ty);
|
||||
}
|
||||
for def in self.values_of_lossy_vec("type-add") {
|
||||
try!(btypes.add_def(&def));
|
||||
btypes.add_def(&def)?;
|
||||
}
|
||||
for ty in self.values_of_lossy_vec("type") {
|
||||
btypes.select(&ty);
|
||||
@@ -824,31 +921,64 @@ impl<'a> ArgMatches<'a> {
|
||||
btypes.build().map_err(From::from)
|
||||
}
|
||||
|
||||
/// Parses the max-filesize argument option into a byte count.
|
||||
fn max_filesize(&self) -> Result<Option<u64>> {
|
||||
use regex::Regex;
|
||||
|
||||
let max_filesize = match self.value_of_lossy("max-filesize") {
|
||||
/// Parses an argument of the form `[0-9]+(KMG)?`.
|
||||
///
|
||||
/// This always returns the result as a type `u64`. This must be converted
|
||||
/// to the appropriate type by the caller.
|
||||
fn parse_human_readable_size_arg(
|
||||
&self,
|
||||
arg_name: &str,
|
||||
) -> Result<Option<u64>> {
|
||||
let arg_value = match self.value_of_lossy(arg_name) {
|
||||
Some(x) => x,
|
||||
None => return Ok(None)
|
||||
};
|
||||
let re = regex::Regex::new("^([0-9]+)([KMG])?$").unwrap();
|
||||
let caps =
|
||||
re.captures(&arg_value).ok_or_else(|| {
|
||||
format!("invalid format for {}", arg_name)
|
||||
})?;
|
||||
|
||||
let re = Regex::new("^([0-9]+)([KMG])?$").unwrap();
|
||||
let caps = try!(re.captures(&max_filesize)
|
||||
.ok_or("invalid format for max-filesize argument"));
|
||||
|
||||
let value = try!(caps[1].parse::<u64>().map_err(|err| err.to_string()));
|
||||
let value = caps[1].parse::<u64>()?;
|
||||
let suffix = caps.get(2).map(|x| x.as_str());
|
||||
|
||||
let v_10 = value.checked_mul(1024);
|
||||
let v_20 = v_10.and_then(|x| x.checked_mul(1024));
|
||||
let v_30 = v_20.and_then(|x| x.checked_mul(1024));
|
||||
|
||||
let try_suffix = |x: Option<u64>| {
|
||||
if x.is_some() {
|
||||
Ok(x)
|
||||
} else {
|
||||
Err(From::from(format!("number too large for {}", arg_name)))
|
||||
}
|
||||
};
|
||||
match suffix {
|
||||
None => Ok(Some(value)),
|
||||
Some("K") => Ok(Some(value * 1024)),
|
||||
Some("M") => Ok(Some(value * 1024 * 1024)),
|
||||
Some("G") => Ok(Some(value * 1024 * 1024 * 1024)),
|
||||
_ => Err(From::from("invalid suffix for max-filesize argument"))
|
||||
Some("K") => try_suffix(v_10),
|
||||
Some("M") => try_suffix(v_20),
|
||||
Some("G") => try_suffix(v_30),
|
||||
_ => Err(From::from(format!("invalid suffix for {}", arg_name)))
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse the dfa-size-limit argument option into a byte count.
|
||||
fn dfa_size_limit(&self) -> Result<Option<usize>> {
|
||||
let r = self.parse_human_readable_size_arg("dfa-size-limit")?;
|
||||
human_readable_to_usize("dfa-size-limit", r)
|
||||
}
|
||||
|
||||
/// Parse the regex-size-limit argument option into a byte count.
|
||||
fn regex_size_limit(&self) -> Result<Option<usize>> {
|
||||
let r = self.parse_human_readable_size_arg("regex-size-limit")?;
|
||||
human_readable_to_usize("regex-size-limit", r)
|
||||
}
|
||||
|
||||
/// Parses the max-filesize argument option into a byte count.
|
||||
fn max_filesize(&self) -> Result<Option<u64>> {
|
||||
self.parse_human_readable_size_arg("max-filesize")
|
||||
}
|
||||
|
||||
/// Returns true if ignore files should be ignored.
|
||||
fn no_ignore(&self) -> bool {
|
||||
self.is_present("no-ignore")
|
||||
@@ -883,6 +1013,24 @@ impl<'a> ArgMatches<'a> {
|
||||
self.values_of_lossy(name).unwrap_or_else(Vec::new)
|
||||
}
|
||||
|
||||
/// Safely reads an arg value with the given name, and if it's present,
|
||||
/// tries to parse it as a usize value.
|
||||
///
|
||||
/// If the number is zero, then it is considered absent and `None` is
|
||||
/// returned.
|
||||
fn usize_of_nonzero(&self, name: &str) -> Result<Option<usize>> {
|
||||
match self.value_of_lossy(name) {
|
||||
None => Ok(None),
|
||||
Some(v) => v.parse().map_err(From::from).map(|n| {
|
||||
if n == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(n)
|
||||
}
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Safely reads an arg value with the given name, and if it's present,
|
||||
/// tries to parse it as a usize value.
|
||||
fn usize_of(&self, name: &str) -> Result<Option<usize>> {
|
||||
@@ -891,6 +1039,35 @@ impl<'a> ArgMatches<'a> {
|
||||
Some(v) => v.parse().map(Some).map_err(From::from),
|
||||
}
|
||||
}
|
||||
|
||||
// The following methods mostly dispatch to the underlying clap methods
|
||||
// directly. Methods that would otherwise get a single value will fetch
|
||||
// all values and return the last one. (Clap returns the first one.) We
|
||||
// only define the ones we need.
|
||||
|
||||
fn is_present(&self, name: &str) -> bool {
|
||||
self.0.is_present(name)
|
||||
}
|
||||
|
||||
fn occurrences_of(&self, name: &str) -> u64 {
|
||||
self.0.occurrences_of(name)
|
||||
}
|
||||
|
||||
fn value_of_lossy(&self, name: &str) -> Option<String> {
|
||||
self.0.value_of_lossy(name).map(|s| s.into_owned())
|
||||
}
|
||||
|
||||
fn values_of_lossy(&self, name: &str) -> Option<Vec<String>> {
|
||||
self.0.values_of_lossy(name)
|
||||
}
|
||||
|
||||
fn value_of_os(&'a self, name: &str) -> Option<&'a OsStr> {
|
||||
self.0.value_of_os(name)
|
||||
}
|
||||
|
||||
fn values_of_os(&'a self, name: &str) -> Option<clap::OsValues<'a>> {
|
||||
self.0.values_of_os(name)
|
||||
}
|
||||
}
|
||||
|
||||
fn pattern_to_str(s: &OsStr) -> Result<&str> {
|
||||
@@ -943,6 +1120,27 @@ impl QuietMatched {
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert the result of a `parse_human_readable_size_arg` call into
|
||||
/// a `usize`, failing if the type does not fit.
|
||||
fn human_readable_to_usize(
|
||||
arg_name: &str,
|
||||
value: Option<u64>,
|
||||
) -> Result<Option<usize>> {
|
||||
use std::usize;
|
||||
|
||||
match value {
|
||||
None => Ok(None),
|
||||
Some(v) => {
|
||||
if v <= usize::MAX as u64 {
|
||||
Ok(Some(v as usize))
|
||||
} else {
|
||||
let msg = format!("number too large for {}", arg_name);
|
||||
Err(From::from(msg))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if and only if stdin is deemed searchable.
|
||||
#[cfg(unix)]
|
||||
fn stdin_is_readable() -> bool {
|
||||
@@ -963,3 +1161,44 @@ fn stdin_is_readable() -> bool {
|
||||
// always return true.
|
||||
true
|
||||
}
|
||||
|
||||
/// Returns true if and only if this path points to a directory.
|
||||
///
|
||||
/// This works around a bug in Rust's standard library:
|
||||
/// https://github.com/rust-lang/rust/issues/46484
|
||||
#[cfg(windows)]
|
||||
fn path_is_dir(path: &Path) -> bool {
|
||||
fs::metadata(path).map(|md| metadata_is_dir(&md)).unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Returns true if and only if this entry points to a directory.
|
||||
#[cfg(not(windows))]
|
||||
fn path_is_dir(path: &Path) -> bool {
|
||||
path.is_dir()
|
||||
}
|
||||
|
||||
/// Returns true if and only if this path points to a file.
|
||||
///
|
||||
/// This works around a bug in Rust's standard library:
|
||||
/// https://github.com/rust-lang/rust/issues/46484
|
||||
#[cfg(windows)]
|
||||
fn path_is_file(path: &Path) -> bool {
|
||||
!path_is_dir(path)
|
||||
}
|
||||
|
||||
/// Returns true if and only if this entry points to a directory.
|
||||
#[cfg(not(windows))]
|
||||
fn path_is_file(path: &Path) -> bool {
|
||||
path.is_file()
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given metadata points to a directory.
|
||||
///
|
||||
/// This works around a bug in Rust's standard library:
|
||||
/// https://github.com/rust-lang/rust/issues/46484
|
||||
#[cfg(windows)]
|
||||
fn metadata_is_dir(md: &fs::Metadata) -> bool {
|
||||
use std::os::windows::fs::MetadataExt;
|
||||
use winapi::um::winnt::FILE_ATTRIBUTE_DIRECTORY;
|
||||
md.file_attributes() & FILE_ATTRIBUTE_DIRECTORY != 0
|
||||
}
|
||||
|
||||
195
src/config.rs
Normal file
195
src/config.rs
Normal file
@@ -0,0 +1,195 @@
|
||||
// This module provides routines for reading ripgrep config "rc" files. The
|
||||
// primary output of these routines is a sequence of arguments, where each
|
||||
// argument corresponds precisely to one shell argument.
|
||||
|
||||
use std::env;
|
||||
use std::error::Error;
|
||||
use std::fs::File;
|
||||
use std::io::{self, BufRead};
|
||||
use std::ffi::OsString;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use Result;
|
||||
|
||||
/// Return a sequence of arguments derived from ripgrep rc configuration files.
|
||||
///
|
||||
/// If no_messages is false and there was a problem reading a config file,
|
||||
/// then errors are printed to stderr.
|
||||
pub fn args(no_messages: bool) -> Vec<OsString> {
|
||||
let config_path = match env::var_os("RIPGREP_CONFIG_PATH") {
|
||||
None => return vec![],
|
||||
Some(config_path) => {
|
||||
if config_path.is_empty() {
|
||||
return vec![];
|
||||
}
|
||||
PathBuf::from(config_path)
|
||||
}
|
||||
};
|
||||
let (args, errs) = match parse(&config_path) {
|
||||
Ok((args, errs)) => (args, errs),
|
||||
Err(err) => {
|
||||
if !no_messages {
|
||||
eprintln!("{}", err);
|
||||
}
|
||||
return vec![];
|
||||
}
|
||||
};
|
||||
if !no_messages && !errs.is_empty() {
|
||||
for err in errs {
|
||||
eprintln!("{}:{}", config_path.display(), err);
|
||||
}
|
||||
}
|
||||
debug!(
|
||||
"{}: arguments loaded from config file: {:?}",
|
||||
config_path.display(), args);
|
||||
args
|
||||
}
|
||||
|
||||
/// Parse a single ripgrep rc file from the given path.
|
||||
///
|
||||
/// On success, this returns a set of shell arguments, in order, that should
|
||||
/// be pre-pended to the arguments given to ripgrep at the command line.
|
||||
///
|
||||
/// If the file could not be read, then an error is returned. If there was
|
||||
/// a problem parsing one or more lines in the file, then errors are returned
|
||||
/// for each line in addition to successfully parsed arguments.
|
||||
fn parse<P: AsRef<Path>>(
|
||||
path: P,
|
||||
) -> Result<(Vec<OsString>, Vec<Box<Error>>)> {
|
||||
let path = path.as_ref();
|
||||
match File::open(&path) {
|
||||
Ok(file) => parse_reader(file),
|
||||
Err(err) => errored!("{}: {}", path.display(), err),
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse a single ripgrep rc file from the given reader.
|
||||
///
|
||||
/// Callers should not provided a buffered reader, as this routine will use its
|
||||
/// own buffer internally.
|
||||
///
|
||||
/// On success, this returns a set of shell arguments, in order, that should
|
||||
/// be pre-pended to the arguments given to ripgrep at the command line.
|
||||
///
|
||||
/// If the reader could not be read, then an error is returned. If there was a
|
||||
/// problem parsing one or more lines, then errors are returned for each line
|
||||
/// in addition to successfully parsed arguments.
|
||||
fn parse_reader<R: io::Read>(
|
||||
rdr: R,
|
||||
) -> Result<(Vec<OsString>, Vec<Box<Error>>)> {
|
||||
let mut bufrdr = io::BufReader::new(rdr);
|
||||
let (mut args, mut errs) = (vec![], vec![]);
|
||||
let mut line = vec![];
|
||||
let mut line_number = 0;
|
||||
while {
|
||||
line.clear();
|
||||
line_number += 1;
|
||||
bufrdr.read_until(b'\n', &mut line)? > 0
|
||||
} {
|
||||
trim(&mut line);
|
||||
if line.is_empty() || line[0] == b'#' {
|
||||
continue;
|
||||
}
|
||||
match bytes_to_os_string(&line) {
|
||||
Ok(osstr) => {
|
||||
args.push(osstr);
|
||||
}
|
||||
Err(err) => {
|
||||
errs.push(format!("{}: {}", line_number, err).into());
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok((args, errs))
|
||||
}
|
||||
|
||||
/// Trim the given bytes of whitespace according to the ASCII definition.
|
||||
fn trim(x: &mut Vec<u8>) {
|
||||
let upto = x.iter().take_while(|b| is_space(**b)).count();
|
||||
x.drain(..upto);
|
||||
let revto = x.len() - x.iter().rev().take_while(|b| is_space(**b)).count();
|
||||
x.drain(revto..);
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given byte is an ASCII space character.
|
||||
fn is_space(b: u8) -> bool {
|
||||
b == b'\t'
|
||||
|| b == b'\n'
|
||||
|| b == b'\x0B'
|
||||
|| b == b'\x0C'
|
||||
|| b == b'\r'
|
||||
|| b == b' '
|
||||
}
|
||||
|
||||
/// On Unix, get an OsString from raw bytes.
|
||||
#[cfg(unix)]
|
||||
fn bytes_to_os_string(bytes: &[u8]) -> Result<OsString> {
|
||||
use std::os::unix::ffi::OsStringExt;
|
||||
Ok(OsString::from_vec(bytes.to_vec()))
|
||||
}
|
||||
|
||||
/// On non-Unix (like Windows), require UTF-8.
|
||||
#[cfg(not(unix))]
|
||||
fn bytes_to_os_string(bytes: &[u8]) -> Result<OsString> {
|
||||
String::from_utf8(bytes.to_vec()).map(OsString::from).map_err(From::from)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::ffi::OsString;
|
||||
use super::parse_reader;
|
||||
|
||||
#[test]
|
||||
fn basic() {
|
||||
let (args, errs) = parse_reader(&b"\
|
||||
# Test
|
||||
--context=0
|
||||
--smart-case
|
||||
-u
|
||||
|
||||
|
||||
# --bar
|
||||
--foo
|
||||
"[..]).unwrap();
|
||||
assert!(errs.is_empty());
|
||||
let args: Vec<String> =
|
||||
args.into_iter().map(|s| s.into_string().unwrap()).collect();
|
||||
assert_eq!(args, vec![
|
||||
"--context=0", "--smart-case", "-u", "--foo",
|
||||
]);
|
||||
}
|
||||
|
||||
// We test that we can handle invalid UTF-8 on Unix-like systems.
|
||||
#[test]
|
||||
#[cfg(unix)]
|
||||
fn error() {
|
||||
use std::os::unix::ffi::OsStringExt;
|
||||
|
||||
let (args, errs) = parse_reader(&b"\
|
||||
quux
|
||||
foo\xFFbar
|
||||
baz
|
||||
"[..]).unwrap();
|
||||
assert!(errs.is_empty());
|
||||
assert_eq!(args, vec![
|
||||
OsString::from("quux"),
|
||||
OsString::from_vec(b"foo\xFFbar".to_vec()),
|
||||
OsString::from("baz"),
|
||||
]);
|
||||
}
|
||||
|
||||
// ... but test that invalid UTF-8 fails on Windows.
|
||||
#[test]
|
||||
#[cfg(not(unix))]
|
||||
fn error() {
|
||||
let (args, errs) = parse_reader(&b"\
|
||||
quux
|
||||
foo\xFFbar
|
||||
baz
|
||||
"[..]).unwrap();
|
||||
assert_eq!(errs.len(), 1);
|
||||
assert_eq!(args, vec![
|
||||
OsString::from("quux"),
|
||||
OsString::from("baz"),
|
||||
]);
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,3 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
use std::cmp;
|
||||
use std::io::{self, Read};
|
||||
|
||||
@@ -34,7 +32,7 @@ impl Bom {
|
||||
}
|
||||
}
|
||||
|
||||
/// BomPeeker wraps `R` and satisfies the `io::Read` interface while also
|
||||
/// `BomPeeker` wraps `R` and satisfies the `io::Read` interface while also
|
||||
/// providing a peek at the BOM if one exists. Peeking at the BOM does not
|
||||
/// advance the reader.
|
||||
struct BomPeeker<R> {
|
||||
@@ -64,7 +62,7 @@ impl<R: io::Read> BomPeeker<R> {
|
||||
}
|
||||
self.bom = Some(Bom { bytes: [0; 3], len: 0 });
|
||||
let mut buf = [0u8; 3];
|
||||
let bom_len = try!(read_full(&mut self.rdr, &mut buf));
|
||||
let bom_len = read_full(&mut self.rdr, &mut buf)?;
|
||||
self.bom = Some(Bom { bytes: buf, len: bom_len });
|
||||
Ok(self.bom.unwrap())
|
||||
}
|
||||
@@ -73,7 +71,7 @@ impl<R: io::Read> BomPeeker<R> {
|
||||
impl<R: io::Read> io::Read for BomPeeker<R> {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
if self.nread < 3 {
|
||||
let bom = try!(self.peek_bom());
|
||||
let bom = self.peek_bom()?;
|
||||
let bom = bom.as_slice();
|
||||
if self.nread < bom.len() {
|
||||
let rest = &bom[self.nread..];
|
||||
@@ -83,13 +81,13 @@ impl<R: io::Read> io::Read for BomPeeker<R> {
|
||||
return Ok(len);
|
||||
}
|
||||
}
|
||||
let nread = try!(self.rdr.read(buf));
|
||||
let nread = self.rdr.read(buf)?;
|
||||
self.nread += nread;
|
||||
Ok(nread)
|
||||
}
|
||||
}
|
||||
|
||||
/// Like io::Read::read_exact, except it never returns UnexpectedEof and
|
||||
/// Like `io::Read::read_exact`, except it never returns `UnexpectedEof` and
|
||||
/// instead returns the number of bytes read if EOF is seen before filling
|
||||
/// `buf`.
|
||||
fn read_full<R: io::Read>(
|
||||
@@ -114,12 +112,12 @@ fn read_full<R: io::Read>(
|
||||
|
||||
/// A reader that transcodes to UTF-8. The source encoding is determined by
|
||||
/// inspecting the BOM from the stream read from `R`, if one exists. If a
|
||||
/// UTF-16 BOM exists, then the source stream is trancoded to UTF-8 with
|
||||
/// UTF-16 BOM exists, then the source stream is transcoded to UTF-8 with
|
||||
/// invalid UTF-16 sequences translated to the Unicode replacement character.
|
||||
/// In all other cases, the underlying reader is passed through unchanged.
|
||||
///
|
||||
/// `R` is the type of the underlying reader and `B` is the type of an internal
|
||||
/// buffer used to store the results of trancoding.
|
||||
/// buffer used to store the results of transcoding.
|
||||
///
|
||||
/// Note that not all methods on `io::Read` work with this implementation.
|
||||
/// For example, the `bytes` adapter method attempts to read a single byte at
|
||||
@@ -198,7 +196,7 @@ impl<R: io::Read, B: AsMut<[u8]>> DecodeReader<R, B> {
|
||||
}
|
||||
self.pos = 0;
|
||||
self.buflen +=
|
||||
try!(self.rdr.read(&mut self.buf.as_mut()[self.buflen..]));
|
||||
self.rdr.read(&mut self.buf.as_mut()[self.buflen..])?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -221,7 +219,7 @@ impl<R: io::Read, B: AsMut<[u8]>> DecodeReader<R, B> {
|
||||
return Ok(0);
|
||||
}
|
||||
if self.pos >= self.buflen {
|
||||
try!(self.fill());
|
||||
self.fill()?;
|
||||
}
|
||||
let mut nwrite = 0;
|
||||
loop {
|
||||
@@ -237,7 +235,7 @@ impl<R: io::Read, B: AsMut<[u8]>> DecodeReader<R, B> {
|
||||
}
|
||||
// Otherwise, we know that our internal buffer has insufficient
|
||||
// data to transcode at least one char, so we attempt to refill it.
|
||||
try!(self.fill());
|
||||
self.fill()?;
|
||||
// Quit on EOF.
|
||||
if self.buflen == 0 {
|
||||
self.pos = 0;
|
||||
@@ -253,7 +251,7 @@ impl<R: io::Read, B: AsMut<[u8]>> DecodeReader<R, B> {
|
||||
|
||||
#[inline(never)] // impacts perf...
|
||||
fn detect(&mut self) -> io::Result<()> {
|
||||
let bom = try!(self.rdr.peek_bom());
|
||||
let bom = self.rdr.peek_bom()?;
|
||||
self.decoder = bom.decoder();
|
||||
Ok(())
|
||||
}
|
||||
@@ -263,7 +261,7 @@ impl<R: io::Read, B: AsMut<[u8]>> io::Read for DecodeReader<R, B> {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
if self.first {
|
||||
self.first = false;
|
||||
try!(self.detect());
|
||||
self.detect()?;
|
||||
}
|
||||
if self.decoder.is_none() {
|
||||
return self.rdr.read(buf);
|
||||
@@ -290,10 +288,6 @@ mod tests {
|
||||
|
||||
use super::{Bom, BomPeeker, DecodeReader};
|
||||
|
||||
fn utf8(bytes: &[u8]) -> &str {
|
||||
::std::str::from_utf8(bytes).unwrap()
|
||||
}
|
||||
|
||||
fn read_to_string<R: Read>(mut rdr: R) -> String {
|
||||
let mut s = String::new();
|
||||
rdr.read_to_string(&mut s).unwrap();
|
||||
@@ -453,7 +447,8 @@ mod tests {
|
||||
test_trans_simple!(trans_simple_utf16be, "utf-16be", b"\x04\x16", "Ж");
|
||||
test_trans_simple!(trans_simple_chinese, "chinese", b"\xA7\xA8", "Ж");
|
||||
test_trans_simple!(trans_simple_korean, "korean", b"\xAC\xA8", "Ж");
|
||||
test_trans_simple!(trans_simple_big5_hkscs, "big5-hkscs", b"\xC7\xFA", "Ж");
|
||||
test_trans_simple!(
|
||||
trans_simple_big5_hkscs, "big5-hkscs", b"\xC7\xFA", "Ж");
|
||||
test_trans_simple!(trans_simple_gbk, "gbk", b"\xA7\xA8", "Ж");
|
||||
test_trans_simple!(trans_simple_sjis, "sjis", b"\x84\x47", "Ж");
|
||||
test_trans_simple!(trans_simple_eucjp, "euc-jp", b"\xA7\xA8", "Ж");
|
||||
|
||||
191
src/decompressor.rs
Normal file
191
src/decompressor.rs
Normal file
@@ -0,0 +1,191 @@
|
||||
use std::collections::HashMap;
|
||||
use std::ffi::OsStr;
|
||||
use std::fmt;
|
||||
use std::io::{self, Read};
|
||||
use std::path::Path;
|
||||
use std::process::{self, Stdio};
|
||||
|
||||
use globset::{Glob, GlobSet, GlobSetBuilder};
|
||||
|
||||
/// A decompression command, contains the command to be spawned as well as any
|
||||
/// necessary CLI args.
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
struct DecompressionCommand {
|
||||
cmd: &'static str,
|
||||
args: &'static [&'static str],
|
||||
}
|
||||
|
||||
impl DecompressionCommand {
|
||||
/// Create a new decompress command
|
||||
fn new(
|
||||
cmd: &'static str,
|
||||
args: &'static [&'static str],
|
||||
) -> DecompressionCommand {
|
||||
DecompressionCommand {
|
||||
cmd, args
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for DecompressionCommand {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{} {}", self.cmd, self.args.join(" "))
|
||||
}
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref DECOMPRESSION_COMMANDS: HashMap<
|
||||
&'static str,
|
||||
DecompressionCommand,
|
||||
> = {
|
||||
let mut m = HashMap::new();
|
||||
|
||||
const ARGS: &[&str] = &["-d", "-c"];
|
||||
m.insert("gz", DecompressionCommand::new("gzip", ARGS));
|
||||
m.insert("bz2", DecompressionCommand::new("bzip2", ARGS));
|
||||
m.insert("xz", DecompressionCommand::new("xz", ARGS));
|
||||
|
||||
const LZMA_ARGS: &[&str] = &["--format=lzma", "-d", "-c"];
|
||||
m.insert("lzma", DecompressionCommand::new("xz", LZMA_ARGS));
|
||||
|
||||
m
|
||||
};
|
||||
static ref SUPPORTED_COMPRESSION_FORMATS: GlobSet = {
|
||||
let mut builder = GlobSetBuilder::new();
|
||||
builder.add(Glob::new("*.gz").unwrap());
|
||||
builder.add(Glob::new("*.bz2").unwrap());
|
||||
builder.add(Glob::new("*.xz").unwrap());
|
||||
builder.add(Glob::new("*.lzma").unwrap());
|
||||
builder.build().unwrap()
|
||||
};
|
||||
static ref TAR_ARCHIVE_FORMATS: GlobSet = {
|
||||
let mut builder = GlobSetBuilder::new();
|
||||
builder.add(Glob::new("*.tar.gz").unwrap());
|
||||
builder.add(Glob::new("*.tar.xz").unwrap());
|
||||
builder.add(Glob::new("*.tar.bz2").unwrap());
|
||||
builder.add(Glob::new("*.tgz").unwrap());
|
||||
builder.add(Glob::new("*.txz").unwrap());
|
||||
builder.add(Glob::new("*.tbz2").unwrap());
|
||||
builder.build().unwrap()
|
||||
};
|
||||
}
|
||||
|
||||
/// DecompressionReader provides an `io::Read` implementation for a limited
|
||||
/// set of compression formats.
|
||||
#[derive(Debug)]
|
||||
pub struct DecompressionReader {
|
||||
cmd: DecompressionCommand,
|
||||
child: process::Child,
|
||||
done: bool,
|
||||
}
|
||||
|
||||
impl DecompressionReader {
|
||||
/// Returns a handle to the stdout of the spawned decompression process for
|
||||
/// `path`, which can be directly searched in the worker. When the returned
|
||||
/// value is exhausted, the underlying process is reaped. If the underlying
|
||||
/// process fails, then its stderr is read and converted into a normal
|
||||
/// io::Error.
|
||||
///
|
||||
/// If there is any error in spawning the decompression command, then
|
||||
/// return `None`, after outputting any necessary debug or error messages.
|
||||
pub fn from_path(path: &Path) -> Option<DecompressionReader> {
|
||||
if is_tar_archive(path) {
|
||||
debug!("{}: skipping tar archive", path.display());
|
||||
return None;
|
||||
}
|
||||
let extension = match path.extension().and_then(OsStr::to_str) {
|
||||
Some(extension) => extension,
|
||||
None => {
|
||||
debug!(
|
||||
"{}: failed to get compresson extension", path.display());
|
||||
return None;
|
||||
}
|
||||
};
|
||||
let decompression_cmd = match DECOMPRESSION_COMMANDS.get(extension) {
|
||||
Some(cmd) => cmd,
|
||||
None => {
|
||||
debug!(
|
||||
"{}: failed to get decompression command", path.display());
|
||||
return None;
|
||||
}
|
||||
};
|
||||
let cmd = process::Command::new(decompression_cmd.cmd)
|
||||
.args(decompression_cmd.args)
|
||||
.arg(path)
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.spawn();
|
||||
let child = match cmd {
|
||||
Ok(process) => process,
|
||||
Err(_) => {
|
||||
debug!(
|
||||
"{}: decompression command '{}' not found",
|
||||
path.display(), decompression_cmd.cmd);
|
||||
return None;
|
||||
}
|
||||
};
|
||||
Some(DecompressionReader::new(*decompression_cmd, child))
|
||||
}
|
||||
|
||||
fn new(
|
||||
cmd: DecompressionCommand,
|
||||
child: process::Child,
|
||||
) -> DecompressionReader {
|
||||
DecompressionReader {
|
||||
cmd: cmd,
|
||||
child: child,
|
||||
done: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn read_error(&mut self) -> io::Result<io::Error> {
|
||||
let mut errbytes = vec![];
|
||||
self.child.stderr.as_mut().unwrap().read_to_end(&mut errbytes)?;
|
||||
let errstr = String::from_utf8_lossy(&errbytes);
|
||||
let errstr = errstr.trim();
|
||||
|
||||
Ok(if errstr.is_empty() {
|
||||
let msg = format!("decompression command failed: '{}'", self.cmd);
|
||||
io::Error::new(io::ErrorKind::Other, msg)
|
||||
} else {
|
||||
let msg = format!(
|
||||
"decompression command '{}' failed: {}", self.cmd, errstr);
|
||||
io::Error::new(io::ErrorKind::Other, msg)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl io::Read for DecompressionReader {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
if self.done {
|
||||
return Ok(0);
|
||||
}
|
||||
let nread = self.child.stdout.as_mut().unwrap().read(buf)?;
|
||||
if nread == 0 {
|
||||
self.done = true;
|
||||
// Reap the child now that we're done reading.
|
||||
// If the command failed, report stderr as an error.
|
||||
if !self.child.wait()?.success() {
|
||||
return Err(self.read_error()?);
|
||||
}
|
||||
}
|
||||
Ok(nread)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if the given path contains a supported compression format or
|
||||
/// is a TAR archive.
|
||||
pub fn is_compressed(path: &Path) -> bool {
|
||||
is_supported_compression_format(path) || is_tar_archive(path)
|
||||
}
|
||||
|
||||
/// Returns true if the given path matches any one of the supported compression
|
||||
/// formats
|
||||
fn is_supported_compression_format(path: &Path) -> bool {
|
||||
SUPPORTED_COMPRESSION_FORMATS.is_match(path)
|
||||
}
|
||||
|
||||
/// Returns true if the given path matches any of the known TAR file formats.
|
||||
fn is_tar_archive(path: &Path) -> bool {
|
||||
TAR_ARCHIVE_FORMATS.is_match(path)
|
||||
}
|
||||
57
src/logger.rs
Normal file
57
src/logger.rs
Normal file
@@ -0,0 +1,57 @@
|
||||
// This module defines a super simple logger that works with the `log` crate.
|
||||
// We don't need anything fancy; just basic log levels and the ability to
|
||||
// print to stderr. We therefore avoid bringing in extra dependencies just
|
||||
// for this functionality.
|
||||
|
||||
use log::{self, Log};
|
||||
|
||||
/// The simplest possible logger that logs to stderr.
|
||||
///
|
||||
/// This logger does no filtering. Instead, it relies on the `log` crates
|
||||
/// filtering via its global max_level setting.
|
||||
#[derive(Debug)]
|
||||
pub struct Logger(());
|
||||
|
||||
const LOGGER: &'static Logger = &Logger(());
|
||||
|
||||
impl Logger {
|
||||
/// Create a new logger that logs to stderr and initialize it as the
|
||||
/// global logger. If there was a problem setting the logger, then an
|
||||
/// error is returned.
|
||||
pub fn init() -> Result<(), log::SetLoggerError> {
|
||||
log::set_logger(LOGGER)
|
||||
}
|
||||
}
|
||||
|
||||
impl Log for Logger {
|
||||
fn enabled(&self, _: &log::Metadata) -> bool {
|
||||
// We set the log level via log::set_max_level, so we don't need to
|
||||
// implement filtering here.
|
||||
true
|
||||
}
|
||||
|
||||
fn log(&self, record: &log::Record) {
|
||||
match (record.file(), record.line()) {
|
||||
(Some(file), Some(line)) => {
|
||||
eprintln!(
|
||||
"{}/{}/{}:{}: {}",
|
||||
record.level(), record.target(),
|
||||
file, line, record.args());
|
||||
}
|
||||
(Some(file), None) => {
|
||||
eprintln!(
|
||||
"{}/{}/{}: {}",
|
||||
record.level(), record.target(), file, record.args());
|
||||
}
|
||||
_ => {
|
||||
eprintln!(
|
||||
"{}/{}: {}",
|
||||
record.level(), record.target(), record.args());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn flush(&self) {
|
||||
// We use eprintln! which is flushed on every call.
|
||||
}
|
||||
}
|
||||
184
src/main.rs
184
src/main.rs
@@ -3,7 +3,7 @@ extern crate bytecount;
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate encoding_rs;
|
||||
extern crate env_logger;
|
||||
extern crate globset;
|
||||
extern crate grep;
|
||||
extern crate ignore;
|
||||
#[macro_use]
|
||||
@@ -17,6 +17,8 @@ extern crate num_cpus;
|
||||
extern crate regex;
|
||||
extern crate same_file;
|
||||
extern crate termcolor;
|
||||
#[cfg(windows)]
|
||||
extern crate winapi;
|
||||
|
||||
use std::error::Error;
|
||||
use std::process;
|
||||
@@ -25,6 +27,7 @@ use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::mpsc;
|
||||
use std::thread;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use args::Args;
|
||||
use worker::Work;
|
||||
@@ -35,16 +38,12 @@ macro_rules! errored {
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! eprintln {
|
||||
($($tt:tt)*) => {{
|
||||
use std::io::Write;
|
||||
let _ = writeln!(&mut ::std::io::stderr(), $($tt)*);
|
||||
}}
|
||||
}
|
||||
|
||||
mod app;
|
||||
mod args;
|
||||
mod config;
|
||||
mod decoder;
|
||||
mod decompressor;
|
||||
mod logger;
|
||||
mod pathutil;
|
||||
mod printer;
|
||||
mod search_buffer;
|
||||
@@ -52,9 +51,10 @@ mod search_stream;
|
||||
mod unescape;
|
||||
mod worker;
|
||||
|
||||
pub type Result<T> = result::Result<T, Box<Error + Send + Sync>>;
|
||||
pub type Result<T> = result::Result<T, Box<Error>>;
|
||||
|
||||
fn main() {
|
||||
reset_sigpipe();
|
||||
match Args::parse().map(Arc::new).and_then(run) {
|
||||
Ok(0) => process::exit(1),
|
||||
Ok(_) => process::exit(0),
|
||||
@@ -72,31 +72,34 @@ fn run(args: Arc<Args>) -> Result<u64> {
|
||||
let threads = args.threads();
|
||||
if args.files() {
|
||||
if threads == 1 || args.is_one_path() {
|
||||
run_files_one_thread(args)
|
||||
run_files_one_thread(&args)
|
||||
} else {
|
||||
run_files_parallel(args)
|
||||
}
|
||||
} else if args.type_list() {
|
||||
run_types(args)
|
||||
run_types(&args)
|
||||
} else if threads == 1 || args.is_one_path() {
|
||||
run_one_thread(args)
|
||||
run_one_thread(&args)
|
||||
} else {
|
||||
run_parallel(args)
|
||||
run_parallel(&args)
|
||||
}
|
||||
}
|
||||
|
||||
fn run_parallel(args: Arc<Args>) -> Result<u64> {
|
||||
fn run_parallel(args: &Arc<Args>) -> Result<u64> {
|
||||
let start_time = Instant::now();
|
||||
let bufwtr = Arc::new(args.buffer_writer());
|
||||
let quiet_matched = args.quiet_matched();
|
||||
let paths_searched = Arc::new(AtomicUsize::new(0));
|
||||
let match_count = Arc::new(AtomicUsize::new(0));
|
||||
let match_line_count = Arc::new(AtomicUsize::new(0));
|
||||
let paths_matched = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
args.walker_parallel().run(|| {
|
||||
let args = args.clone();
|
||||
let args = Arc::clone(args);
|
||||
let quiet_matched = quiet_matched.clone();
|
||||
let paths_searched = paths_searched.clone();
|
||||
let match_count = match_count.clone();
|
||||
let bufwtr = bufwtr.clone();
|
||||
let match_line_count = match_line_count.clone();
|
||||
let paths_matched = paths_matched.clone();
|
||||
let bufwtr = Arc::clone(&bufwtr);
|
||||
let mut buf = bufwtr.buffer();
|
||||
let mut worker = args.worker();
|
||||
Box::new(move |result| {
|
||||
@@ -108,6 +111,7 @@ fn run_parallel(args: Arc<Args>) -> Result<u64> {
|
||||
let dent = match get_or_log_dir_entry(
|
||||
result,
|
||||
args.stdout_handle(),
|
||||
args.files(),
|
||||
args.no_messages(),
|
||||
) {
|
||||
None => return Continue,
|
||||
@@ -125,10 +129,13 @@ fn run_parallel(args: Arc<Args>) -> Result<u64> {
|
||||
} else {
|
||||
worker.run(&mut printer, Work::DirEntry(dent))
|
||||
};
|
||||
match_count.fetch_add(count as usize, Ordering::SeqCst);
|
||||
match_line_count.fetch_add(count as usize, Ordering::SeqCst);
|
||||
if quiet_matched.set_match(count > 0) {
|
||||
return Quit;
|
||||
}
|
||||
if args.stats() && count > 0 {
|
||||
paths_matched.fetch_add(1, Ordering::SeqCst);
|
||||
}
|
||||
}
|
||||
// BUG(burntsushi): We should handle this error instead of ignoring
|
||||
// it. See: https://github.com/BurntSushi/ripgrep/issues/200
|
||||
@@ -141,26 +148,40 @@ fn run_parallel(args: Arc<Args>) -> Result<u64> {
|
||||
eprint_nothing_searched();
|
||||
}
|
||||
}
|
||||
Ok(match_count.load(Ordering::SeqCst) as u64)
|
||||
let match_line_count = match_line_count.load(Ordering::SeqCst) as u64;
|
||||
let paths_searched = paths_searched.load(Ordering::SeqCst) as u64;
|
||||
let paths_matched = paths_matched.load(Ordering::SeqCst) as u64;
|
||||
if args.stats() {
|
||||
print_stats(
|
||||
match_line_count,
|
||||
paths_searched,
|
||||
paths_matched,
|
||||
start_time.elapsed(),
|
||||
);
|
||||
}
|
||||
Ok(match_line_count)
|
||||
}
|
||||
|
||||
fn run_one_thread(args: Arc<Args>) -> Result<u64> {
|
||||
fn run_one_thread(args: &Arc<Args>) -> Result<u64> {
|
||||
let start_time = Instant::now();
|
||||
let stdout = args.stdout();
|
||||
let mut stdout = stdout.lock();
|
||||
let mut worker = args.worker();
|
||||
let mut paths_searched: u64 = 0;
|
||||
let mut match_count = 0;
|
||||
let mut match_line_count = 0;
|
||||
let mut paths_matched: u64 = 0;
|
||||
for result in args.walker() {
|
||||
let dent = match get_or_log_dir_entry(
|
||||
result,
|
||||
args.stdout_handle(),
|
||||
args.files(),
|
||||
args.no_messages(),
|
||||
) {
|
||||
None => continue,
|
||||
Some(dent) => dent,
|
||||
};
|
||||
let mut printer = args.printer(&mut stdout);
|
||||
if match_count > 0 {
|
||||
if match_line_count > 0 {
|
||||
if args.quiet() {
|
||||
break;
|
||||
}
|
||||
@@ -169,41 +190,56 @@ fn run_one_thread(args: Arc<Args>) -> Result<u64> {
|
||||
}
|
||||
}
|
||||
paths_searched += 1;
|
||||
match_count +=
|
||||
let count =
|
||||
if dent.is_stdin() {
|
||||
worker.run(&mut printer, Work::Stdin)
|
||||
} else {
|
||||
worker.run(&mut printer, Work::DirEntry(dent))
|
||||
};
|
||||
match_line_count += count;
|
||||
if args.stats() && count > 0 {
|
||||
paths_matched += 1;
|
||||
}
|
||||
}
|
||||
if !args.paths().is_empty() && paths_searched == 0 {
|
||||
if !args.no_messages() {
|
||||
eprint_nothing_searched();
|
||||
}
|
||||
}
|
||||
Ok(match_count)
|
||||
if args.stats() {
|
||||
print_stats(
|
||||
match_line_count,
|
||||
paths_searched,
|
||||
paths_matched,
|
||||
start_time.elapsed(),
|
||||
);
|
||||
}
|
||||
Ok(match_line_count)
|
||||
}
|
||||
|
||||
fn run_files_parallel(args: Arc<Args>) -> Result<u64> {
|
||||
let print_args = args.clone();
|
||||
let print_args = Arc::clone(&args);
|
||||
let (tx, rx) = mpsc::channel::<ignore::DirEntry>();
|
||||
let print_thread = thread::spawn(move || {
|
||||
let stdout = print_args.stdout();
|
||||
let mut printer = print_args.printer(stdout.lock());
|
||||
let mut file_count = 0;
|
||||
for dent in rx.iter() {
|
||||
if !print_args.quiet() {
|
||||
printer.path(dent.path());
|
||||
}
|
||||
file_count += 1;
|
||||
}
|
||||
file_count
|
||||
});
|
||||
args.walker_parallel().run(move || {
|
||||
let args = args.clone();
|
||||
let args = Arc::clone(&args);
|
||||
let tx = tx.clone();
|
||||
Box::new(move |result| {
|
||||
if let Some(dent) = get_or_log_dir_entry(
|
||||
result,
|
||||
args.stdout_handle(),
|
||||
args.files(),
|
||||
args.no_messages(),
|
||||
) {
|
||||
tx.send(dent).unwrap();
|
||||
@@ -214,7 +250,7 @@ fn run_files_parallel(args: Arc<Args>) -> Result<u64> {
|
||||
Ok(print_thread.join().unwrap())
|
||||
}
|
||||
|
||||
fn run_files_one_thread(args: Arc<Args>) -> Result<u64> {
|
||||
fn run_files_one_thread(args: &Arc<Args>) -> Result<u64> {
|
||||
let stdout = args.stdout();
|
||||
let mut printer = args.printer(stdout.lock());
|
||||
let mut file_count = 0;
|
||||
@@ -222,18 +258,21 @@ fn run_files_one_thread(args: Arc<Args>) -> Result<u64> {
|
||||
let dent = match get_or_log_dir_entry(
|
||||
result,
|
||||
args.stdout_handle(),
|
||||
args.files(),
|
||||
args.no_messages(),
|
||||
) {
|
||||
None => continue,
|
||||
Some(dent) => dent,
|
||||
};
|
||||
if !args.quiet() {
|
||||
printer.path(dent.path());
|
||||
}
|
||||
file_count += 1;
|
||||
}
|
||||
Ok(file_count)
|
||||
}
|
||||
|
||||
fn run_types(args: Arc<Args>) -> Result<u64> {
|
||||
fn run_types(args: &Arc<Args>) -> Result<u64> {
|
||||
let stdout = args.stdout();
|
||||
let mut printer = args.printer(stdout.lock());
|
||||
let mut ty_count = 0;
|
||||
@@ -247,6 +286,7 @@ fn run_types(args: Arc<Args>) -> Result<u64> {
|
||||
fn get_or_log_dir_entry(
|
||||
result: result::Result<ignore::DirEntry, ignore::Error>,
|
||||
stdout_handle: Option<&same_file::Handle>,
|
||||
files_only: bool,
|
||||
no_messages: bool,
|
||||
) -> Option<ignore::DirEntry> {
|
||||
match result {
|
||||
@@ -262,20 +302,19 @@ fn get_or_log_dir_entry(
|
||||
eprintln!("{}", err);
|
||||
}
|
||||
}
|
||||
let ft = match dent.file_type() {
|
||||
None => return Some(dent), // entry is stdin
|
||||
Some(ft) => ft,
|
||||
};
|
||||
if dent.file_type().is_none() {
|
||||
return Some(dent); // entry is stdin
|
||||
}
|
||||
// A depth of 0 means the user gave the path explicitly, so we
|
||||
// should always try to search it.
|
||||
if dent.depth() == 0 && !ft.is_dir() {
|
||||
if dent.depth() == 0 && !ignore_entry_is_dir(&dent) {
|
||||
return Some(dent);
|
||||
} else if !ft.is_file() {
|
||||
} else if !ignore_entry_is_file(&dent) {
|
||||
return None;
|
||||
}
|
||||
// If we are redirecting stdout to a file, then don't search that
|
||||
// file.
|
||||
if is_stdout_file(&dent, stdout_handle, no_messages) {
|
||||
if !files_only && is_stdout_file(&dent, stdout_handle, no_messages) {
|
||||
return None;
|
||||
}
|
||||
Some(dent)
|
||||
@@ -283,6 +322,45 @@ fn get_or_log_dir_entry(
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given `ignore::DirEntry` points to a
|
||||
/// directory.
|
||||
///
|
||||
/// This works around a bug in Rust's standard library:
|
||||
/// https://github.com/rust-lang/rust/issues/46484
|
||||
#[cfg(windows)]
|
||||
fn ignore_entry_is_dir(dent: &ignore::DirEntry) -> bool {
|
||||
use std::os::windows::fs::MetadataExt;
|
||||
use winapi::um::winnt::FILE_ATTRIBUTE_DIRECTORY;
|
||||
|
||||
dent.metadata().map(|md| {
|
||||
md.file_attributes() & FILE_ATTRIBUTE_DIRECTORY != 0
|
||||
}).unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given `ignore::DirEntry` points to a
|
||||
/// directory.
|
||||
#[cfg(not(windows))]
|
||||
fn ignore_entry_is_dir(dent: &ignore::DirEntry) -> bool {
|
||||
dent.file_type().map_or(false, |ft| ft.is_dir())
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given `ignore::DirEntry` points to a
|
||||
/// file.
|
||||
///
|
||||
/// This works around a bug in Rust's standard library:
|
||||
/// https://github.com/rust-lang/rust/issues/46484
|
||||
#[cfg(windows)]
|
||||
fn ignore_entry_is_file(dent: &ignore::DirEntry) -> bool {
|
||||
!ignore_entry_is_dir(dent)
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given `ignore::DirEntry` points to a
|
||||
/// file.
|
||||
#[cfg(not(windows))]
|
||||
fn ignore_entry_is_file(dent: &ignore::DirEntry) -> bool {
|
||||
dent.file_type().map_or(false, |ft| ft.is_file())
|
||||
}
|
||||
|
||||
fn is_stdout_file(
|
||||
dent: &ignore::DirEntry,
|
||||
stdout_handle: Option<&same_file::Handle>,
|
||||
@@ -326,3 +404,37 @@ fn eprint_nothing_searched() {
|
||||
applied a filter you didn't expect. \
|
||||
Try running again with --debug.");
|
||||
}
|
||||
|
||||
fn print_stats(
|
||||
match_count: u64,
|
||||
paths_searched: u64,
|
||||
paths_matched: u64,
|
||||
time_elapsed: Duration,
|
||||
) {
|
||||
let time_elapsed =
|
||||
time_elapsed.as_secs() as f64
|
||||
+ (time_elapsed.subsec_nanos() as f64 * 1e-9);
|
||||
println!("\n{} matched lines\n\
|
||||
{} files contained matches\n\
|
||||
{} files searched\n\
|
||||
{:.3} seconds", match_count, paths_matched,
|
||||
paths_searched, time_elapsed);
|
||||
}
|
||||
|
||||
// The Rust standard library suppresses the default SIGPIPE behavior, so that
|
||||
// writing to a closed pipe doesn't kill the process. The goal is to instead
|
||||
// handle errors through the normal result mechanism. Ripgrep needs some
|
||||
// refactoring before it will be able to do that, however, so we re-enable the
|
||||
// standard SIGPIPE behavior as a workaround. See
|
||||
// https://github.com/BurntSushi/ripgrep/issues/200.
|
||||
#[cfg(unix)]
|
||||
fn reset_sigpipe() {
|
||||
unsafe {
|
||||
libc::signal(libc::SIGPIPE, libc::SIG_DFL);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
fn reset_sigpipe() {
|
||||
// no-op
|
||||
}
|
||||
|
||||
350
src/printer.rs
350
src/printer.rs
@@ -3,29 +3,58 @@ use std::fmt;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
|
||||
use regex::bytes::{Regex, Replacer, Captures};
|
||||
use regex::bytes::{Captures, Match, Regex, Replacer};
|
||||
use termcolor::{Color, ColorSpec, ParseColorError, WriteColor};
|
||||
|
||||
use pathutil::strip_prefix;
|
||||
use ignore::types::FileTypeDef;
|
||||
|
||||
/// CountingReplacer implements the Replacer interface for Regex,
|
||||
/// Track the start and end of replacements to allow coloring them on output.
|
||||
#[derive(Debug)]
|
||||
struct Offset {
|
||||
start: usize,
|
||||
end: usize,
|
||||
}
|
||||
|
||||
impl Offset {
|
||||
fn new(start: usize, end: usize) -> Offset {
|
||||
Offset { start: start, end: end }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'m, 'r> From<&'m Match<'r>> for Offset {
|
||||
fn from(m: &'m Match<'r>) -> Self {
|
||||
Offset{ start: m.start(), end: m.end() }
|
||||
}
|
||||
}
|
||||
|
||||
/// `CountingReplacer` implements the Replacer interface for Regex,
|
||||
/// and counts how often replacement is being performed.
|
||||
struct CountingReplacer<'r> {
|
||||
replace: &'r [u8],
|
||||
count: &'r mut usize,
|
||||
offsets: &'r mut Vec<Offset>,
|
||||
}
|
||||
|
||||
impl<'r> CountingReplacer<'r> {
|
||||
fn new(replace: &'r [u8], count: &'r mut usize) -> CountingReplacer<'r> {
|
||||
CountingReplacer { replace: replace, count: count }
|
||||
fn new(
|
||||
replace: &'r [u8],
|
||||
count: &'r mut usize,
|
||||
offsets: &'r mut Vec<Offset>,
|
||||
) -> CountingReplacer<'r> {
|
||||
CountingReplacer { replace: replace, count: count, offsets: offsets, }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'r> Replacer for CountingReplacer<'r> {
|
||||
fn replace_append(&mut self, caps: &Captures, dst: &mut Vec<u8>) {
|
||||
*self.count += 1;
|
||||
let start = dst.len();
|
||||
caps.expand(self.replace, dst);
|
||||
let end = dst.len();
|
||||
if start != end {
|
||||
self.offsets.push(Offset::new(start, end));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,6 +87,8 @@ pub struct Printer<W> {
|
||||
/// Whether to print NUL bytes after a file path instead of new lines
|
||||
/// or `:`.
|
||||
null: bool,
|
||||
/// Print only the matched (non-empty) parts of a matching line
|
||||
only_matching: bool,
|
||||
/// A string to use as a replacement of each match in a matching line.
|
||||
replace: Option<Vec<u8>>,
|
||||
/// Whether to prefix each match with the corresponding file name.
|
||||
@@ -67,7 +98,11 @@ pub struct Printer<W> {
|
||||
/// The separator to use for file paths. If empty, this is ignored.
|
||||
path_separator: Option<u8>,
|
||||
/// Restrict lines to this many columns.
|
||||
max_columns: Option<usize>
|
||||
max_columns: Option<usize>,
|
||||
/// Width of line number displayed. If the number of digits in the
|
||||
/// line number is less than this, it is left padded with
|
||||
/// spaces.
|
||||
line_number_width: Option<usize>
|
||||
}
|
||||
|
||||
impl<W: WriteColor> Printer<W> {
|
||||
@@ -83,11 +118,13 @@ impl<W: WriteColor> Printer<W> {
|
||||
heading: false,
|
||||
line_per_match: false,
|
||||
null: false,
|
||||
only_matching: false,
|
||||
replace: None,
|
||||
with_filename: false,
|
||||
colors: ColorSpecs::default(),
|
||||
path_separator: None,
|
||||
max_columns: None,
|
||||
line_number_width: None
|
||||
}
|
||||
}
|
||||
|
||||
@@ -144,6 +181,12 @@ impl<W: WriteColor> Printer<W> {
|
||||
self
|
||||
}
|
||||
|
||||
/// Print only the matched (non-empty) parts of a matching line
|
||||
pub fn only_matching(mut self, yes: bool) -> Printer<W> {
|
||||
self.only_matching = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// A separator to use when printing file paths. When empty, use the
|
||||
/// default separator for the current platform. (/ on Unix, \ on Windows.)
|
||||
pub fn path_separator(mut self, sep: Option<u8>) -> Printer<W> {
|
||||
@@ -153,9 +196,6 @@ impl<W: WriteColor> Printer<W> {
|
||||
|
||||
/// Replace every match in each matching line with the replacement string
|
||||
/// given.
|
||||
///
|
||||
/// The replacement string syntax is documented here:
|
||||
/// https://doc.rust-lang.org/regex/regex/bytes/struct.Captures.html#method.expand
|
||||
pub fn replace(mut self, replacement: Vec<u8>) -> Printer<W> {
|
||||
self.replace = Some(replacement);
|
||||
self
|
||||
@@ -173,6 +213,12 @@ impl<W: WriteColor> Printer<W> {
|
||||
self
|
||||
}
|
||||
|
||||
/// Configure the width of the displayed line number
|
||||
pub fn line_number_width(mut self, line_number_width: Option<usize>) -> Printer<W> {
|
||||
self.line_number_width = line_number_width;
|
||||
self
|
||||
}
|
||||
|
||||
/// Returns true if and only if something has been printed.
|
||||
pub fn has_printed(&self) -> bool {
|
||||
self.has_printed
|
||||
@@ -204,22 +250,14 @@ impl<W: WriteColor> Printer<W> {
|
||||
pub fn path<P: AsRef<Path>>(&mut self, path: P) {
|
||||
let path = strip_prefix("./", path.as_ref()).unwrap_or(path.as_ref());
|
||||
self.write_path(path);
|
||||
if self.null {
|
||||
self.write(b"\x00");
|
||||
} else {
|
||||
self.write_eol();
|
||||
}
|
||||
self.write_path_eol();
|
||||
}
|
||||
|
||||
/// Prints the given path and a count of the number of matches found.
|
||||
pub fn path_count<P: AsRef<Path>>(&mut self, path: P, count: u64) {
|
||||
if self.with_filename {
|
||||
self.write_path(path);
|
||||
if self.null {
|
||||
self.write(b"\x00");
|
||||
} else {
|
||||
self.write(b":");
|
||||
}
|
||||
self.write_path_sep(b':');
|
||||
}
|
||||
self.write(count.to_string().as_bytes());
|
||||
self.write_eol();
|
||||
@@ -227,13 +265,11 @@ impl<W: WriteColor> Printer<W> {
|
||||
|
||||
/// Prints the context separator.
|
||||
pub fn context_separate(&mut self) {
|
||||
// N.B. We can't use `write` here because of borrowing restrictions.
|
||||
if self.context_separator.is_empty() {
|
||||
return;
|
||||
}
|
||||
self.has_printed = true;
|
||||
let _ = self.wtr.write_all(&self.context_separator);
|
||||
let _ = self.wtr.write_all(&[self.eol]);
|
||||
self.write_eol();
|
||||
}
|
||||
|
||||
pub fn matched<P: AsRef<Path>>(
|
||||
@@ -244,27 +280,21 @@ impl<W: WriteColor> Printer<W> {
|
||||
start: usize,
|
||||
end: usize,
|
||||
line_number: Option<u64>,
|
||||
byte_offset: Option<u64>
|
||||
) {
|
||||
if !self.line_per_match {
|
||||
let column =
|
||||
if self.column {
|
||||
Some(re.find(&buf[start..end])
|
||||
.map(|m| m.start()).unwrap_or(0) as u64)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
if !self.line_per_match && !self.only_matching {
|
||||
let mat = re
|
||||
.find(&buf[start..end])
|
||||
.map(|m| (m.start(), m.end()))
|
||||
.unwrap_or((0, 0));
|
||||
return self.write_match(
|
||||
re, path, buf, start, end, line_number, column);
|
||||
re, path, buf, start, end, line_number,
|
||||
byte_offset, mat.0, mat.1);
|
||||
}
|
||||
for m in re.find_iter(&buf[start..end]) {
|
||||
let column =
|
||||
if self.column {
|
||||
Some(m.start() as u64)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
self.write_match(
|
||||
re, path.as_ref(), buf, start, end, line_number, column);
|
||||
re, path.as_ref(), buf, start, end, line_number,
|
||||
byte_offset, m.start(), m.end());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -276,67 +306,91 @@ impl<W: WriteColor> Printer<W> {
|
||||
start: usize,
|
||||
end: usize,
|
||||
line_number: Option<u64>,
|
||||
column: Option<u64>,
|
||||
byte_offset: Option<u64>,
|
||||
match_start: usize,
|
||||
match_end: usize,
|
||||
) {
|
||||
if self.heading && self.with_filename && !self.has_printed {
|
||||
self.write_file_sep();
|
||||
self.write_heading(path.as_ref());
|
||||
self.write_path(path);
|
||||
self.write_path_eol();
|
||||
} else if !self.heading && self.with_filename {
|
||||
self.write_non_heading_path(path.as_ref());
|
||||
self.write_path(path);
|
||||
self.write_path_sep(b':');
|
||||
}
|
||||
if let Some(line_number) = line_number {
|
||||
self.line_number(line_number, b':');
|
||||
}
|
||||
if let Some(c) = column {
|
||||
self.write((c + 1).to_string().as_bytes());
|
||||
self.write(b":");
|
||||
if self.column {
|
||||
self.column_number(match_start as u64 + 1, b':');
|
||||
}
|
||||
if let Some(byte_offset) = byte_offset {
|
||||
if self.only_matching {
|
||||
self.write_byte_offset(
|
||||
byte_offset + ((start + match_start) as u64), b':');
|
||||
} else {
|
||||
self.write_byte_offset(byte_offset + (start as u64), b':');
|
||||
}
|
||||
}
|
||||
if self.replace.is_some() {
|
||||
let mut count = 0;
|
||||
let mut offsets = Vec::new();
|
||||
let line = {
|
||||
let replacer = CountingReplacer::new(
|
||||
self.replace.as_ref().unwrap(), &mut count);
|
||||
self.replace.as_ref().unwrap(), &mut count, &mut offsets);
|
||||
if self.only_matching {
|
||||
re.replace_all(
|
||||
&buf[start + match_start..start + match_end], replacer)
|
||||
} else {
|
||||
re.replace_all(&buf[start..end], replacer)
|
||||
}
|
||||
};
|
||||
if self.max_columns.map_or(false, |m| line.len() > m) {
|
||||
let _ = self.wtr.set_color(self.colors.matched());
|
||||
let msg = format!(
|
||||
"[Omitted long line with {} replacements]", count);
|
||||
self.write(msg.as_bytes());
|
||||
let _ = self.wtr.reset();
|
||||
self.write_colored(msg.as_bytes(), |colors| colors.matched());
|
||||
self.write_eol();
|
||||
return;
|
||||
}
|
||||
self.write(&line);
|
||||
if line.last() != Some(&self.eol) {
|
||||
self.write_eol();
|
||||
}
|
||||
self.write_matched_line(offsets, &*line, false);
|
||||
} else {
|
||||
self.write_matched_line(re, &buf[start..end]);
|
||||
// write_matched_line guarantees to write a newline.
|
||||
let buf = if self.only_matching {
|
||||
&buf[start + match_start..start + match_end]
|
||||
} else {
|
||||
&buf[start..end]
|
||||
};
|
||||
if self.max_columns.map_or(false, |m| buf.len() > m) {
|
||||
let count = re.find_iter(buf).count();
|
||||
let msg = format!("[Omitted long line with {} matches]", count);
|
||||
self.write_colored(msg.as_bytes(), |colors| colors.matched());
|
||||
self.write_eol();
|
||||
return;
|
||||
}
|
||||
let only_match = self.only_matching;
|
||||
self.write_matched_line(
|
||||
re.find_iter(buf).map(|x| Offset::from(&x)), buf, only_match);
|
||||
}
|
||||
}
|
||||
|
||||
fn write_matched_line(&mut self, re: &Regex, buf: &[u8]) {
|
||||
if self.max_columns.map_or(false, |m| buf.len() > m) {
|
||||
let count = re.find_iter(buf).count();
|
||||
let _ = self.wtr.set_color(self.colors.matched());
|
||||
let msg = format!("[Omitted long line with {} matches]", count);
|
||||
self.write(msg.as_bytes());
|
||||
let _ = self.wtr.reset();
|
||||
self.write_eol();
|
||||
return;
|
||||
}
|
||||
fn write_matched_line<I>(&mut self, offsets: I, buf: &[u8], only_match: bool)
|
||||
where I: IntoIterator<Item=Offset>,
|
||||
{
|
||||
if !self.wtr.supports_color() || self.colors.matched().is_none() {
|
||||
self.write(buf);
|
||||
} else if only_match {
|
||||
self.write_colored(buf, |colors| colors.matched());
|
||||
} else {
|
||||
let mut last_written = 0;
|
||||
for m in re.find_iter(buf) {
|
||||
self.write(&buf[last_written..m.start()]);
|
||||
let _ = self.wtr.set_color(self.colors.matched());
|
||||
self.write(&buf[m.start()..m.end()]);
|
||||
let _ = self.wtr.reset();
|
||||
last_written = m.end();
|
||||
for o in offsets {
|
||||
self.write(&buf[last_written..o.start]);
|
||||
// This conditional checks if the match is both empty *and*
|
||||
// past the end of the line. In this case, we never want to
|
||||
// emit an additional color escape.
|
||||
if o.start != o.end || o.end != buf.len() {
|
||||
self.write_colored(
|
||||
&buf[o.start..o.end], |colors| colors.matched());
|
||||
}
|
||||
last_written = o.end;
|
||||
}
|
||||
self.write(&buf[last_written..]);
|
||||
}
|
||||
@@ -352,23 +406,24 @@ impl<W: WriteColor> Printer<W> {
|
||||
start: usize,
|
||||
end: usize,
|
||||
line_number: Option<u64>,
|
||||
byte_offset: Option<u64>,
|
||||
) {
|
||||
if self.heading && self.with_filename && !self.has_printed {
|
||||
self.write_file_sep();
|
||||
self.write_heading(path.as_ref());
|
||||
self.write_path(path);
|
||||
self.write_path_eol();
|
||||
} else if !self.heading && self.with_filename {
|
||||
self.write_path(path.as_ref());
|
||||
if self.null {
|
||||
self.write(b"\x00");
|
||||
} else {
|
||||
self.write(b"-");
|
||||
}
|
||||
self.write_path(path);
|
||||
self.write_path_sep(b'-');
|
||||
}
|
||||
if let Some(line_number) = line_number {
|
||||
self.line_number(line_number, b'-');
|
||||
}
|
||||
if let Some(byte_offset) = byte_offset {
|
||||
self.write_byte_offset(byte_offset + (start as u64), b'-');
|
||||
}
|
||||
if self.max_columns.map_or(false, |m| end - start > m) {
|
||||
self.write(format!("[Omitted long context line]").as_bytes());
|
||||
self.write(b"[Omitted long context line]");
|
||||
self.write_eol();
|
||||
return;
|
||||
}
|
||||
@@ -378,10 +433,19 @@ impl<W: WriteColor> Printer<W> {
|
||||
}
|
||||
}
|
||||
|
||||
fn write_heading<P: AsRef<Path>>(&mut self, path: P) {
|
||||
let _ = self.wtr.set_color(self.colors.path());
|
||||
self.write_path(path.as_ref());
|
||||
let _ = self.wtr.reset();
|
||||
fn separator(&mut self, sep: &[u8]) {
|
||||
self.write(sep);
|
||||
}
|
||||
|
||||
fn write_path_sep(&mut self, sep: u8) {
|
||||
if self.null {
|
||||
self.write(b"\x00");
|
||||
} else {
|
||||
self.separator(&[sep]);
|
||||
}
|
||||
}
|
||||
|
||||
fn write_path_eol(&mut self) {
|
||||
if self.null {
|
||||
self.write(b"\x00");
|
||||
} else {
|
||||
@@ -389,52 +453,52 @@ impl<W: WriteColor> Printer<W> {
|
||||
}
|
||||
}
|
||||
|
||||
fn write_non_heading_path<P: AsRef<Path>>(&mut self, path: P) {
|
||||
let _ = self.wtr.set_color(self.colors.path());
|
||||
self.write_path(path.as_ref());
|
||||
let _ = self.wtr.reset();
|
||||
if self.null {
|
||||
self.write(b"\x00");
|
||||
} else {
|
||||
self.write(b":");
|
||||
}
|
||||
}
|
||||
|
||||
fn line_number(&mut self, n: u64, sep: u8) {
|
||||
let _ = self.wtr.set_color(self.colors.line());
|
||||
self.write(n.to_string().as_bytes());
|
||||
let _ = self.wtr.reset();
|
||||
self.write(&[sep]);
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn write_path<P: AsRef<Path>>(&mut self, path: P) {
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
let path = path.as_ref().as_os_str().as_bytes();
|
||||
match self.path_separator {
|
||||
None => self.write(path),
|
||||
Some(sep) => self.write_path_with_sep(path, sep),
|
||||
}
|
||||
self.write_path_replace_separator(path);
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
fn write_path<P: AsRef<Path>>(&mut self, path: P) {
|
||||
let path = path.as_ref().to_string_lossy();
|
||||
self.write_path_replace_separator(path.as_bytes());
|
||||
}
|
||||
|
||||
fn write_path_replace_separator(&mut self, path: &[u8]) {
|
||||
match self.path_separator {
|
||||
None => self.write(path.as_bytes()),
|
||||
Some(sep) => self.write_path_with_sep(path.as_bytes(), sep),
|
||||
None => self.write_colored(path, |colors| colors.path()),
|
||||
Some(sep) => {
|
||||
let transformed_path: Vec<_> = path.iter().map(|&b| {
|
||||
if b == b'/' || (cfg!(windows) && b == b'\\') {
|
||||
sep
|
||||
} else {
|
||||
b
|
||||
}
|
||||
}).collect();
|
||||
self.write_colored(&transformed_path, |colors| colors.path());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn write_path_with_sep(&mut self, path: &[u8], sep: u8) {
|
||||
let mut path = path.to_vec();
|
||||
for b in &mut path {
|
||||
if *b == b'/' || (cfg!(windows) && *b == b'\\') {
|
||||
*b = sep;
|
||||
fn line_number(&mut self, n: u64, sep: u8) {
|
||||
let mut line_number = n.to_string();
|
||||
if let Some(width) = self.line_number_width {
|
||||
line_number = format!("{:>width$}", line_number, width = width);
|
||||
}
|
||||
self.write_colored(line_number.as_bytes(), |colors| colors.line());
|
||||
self.separator(&[sep]);
|
||||
}
|
||||
self.write(&path);
|
||||
|
||||
fn column_number(&mut self, n: u64, sep: u8) {
|
||||
self.write_colored(n.to_string().as_bytes(), |colors| colors.column());
|
||||
self.separator(&[sep]);
|
||||
}
|
||||
|
||||
fn write_byte_offset(&mut self, o: u64, sep: u8) {
|
||||
self.write_colored(o.to_string().as_bytes(), |colors| colors.column());
|
||||
self.separator(&[sep]);
|
||||
}
|
||||
|
||||
fn write(&mut self, buf: &[u8]) {
|
||||
@@ -447,6 +511,14 @@ impl<W: WriteColor> Printer<W> {
|
||||
self.write(&[eol]);
|
||||
}
|
||||
|
||||
fn write_colored<F>(&mut self, buf: &[u8], get_color: F)
|
||||
where F: Fn(&ColorSpecs) -> &ColorSpec
|
||||
{
|
||||
let _ = self.wtr.set_color(get_color(&self.colors));
|
||||
self.write(buf);
|
||||
let _ = self.wtr.reset();
|
||||
}
|
||||
|
||||
fn write_file_sep(&mut self) {
|
||||
if let Some(ref sep) = self.file_separator {
|
||||
self.has_printed = true;
|
||||
@@ -492,7 +564,7 @@ impl fmt::Display for Error {
|
||||
match *self {
|
||||
Error::UnrecognizedOutType(ref name) => {
|
||||
write!(f, "Unrecognized output type '{}'. Choose from: \
|
||||
path, line, match.", name)
|
||||
path, line, column, match.", name)
|
||||
}
|
||||
Error::UnrecognizedSpecType(ref name) => {
|
||||
write!(f, "Unrecognized spec type '{}'. Choose from: \
|
||||
@@ -503,11 +575,14 @@ impl fmt::Display for Error {
|
||||
}
|
||||
Error::UnrecognizedStyle(ref name) => {
|
||||
write!(f, "Unrecognized style attribute '{}'. Choose from: \
|
||||
nobold, bold.", name)
|
||||
nobold, bold, nointense, intense, nounderline, \
|
||||
underline.", name)
|
||||
}
|
||||
Error::InvalidFormat(ref original) => {
|
||||
write!(f, "Invalid color speci format: '{}'. Valid format \
|
||||
is '(path|line|match):(fg|bg|style):(value)'.",
|
||||
write!(
|
||||
f,
|
||||
"Invalid color spec format: '{}'. Valid format \
|
||||
is '(path|line|column|match):(fg|bg|style):(value)'.",
|
||||
original)
|
||||
}
|
||||
}
|
||||
@@ -525,6 +600,7 @@ impl From<ParseColorError> for Error {
|
||||
pub struct ColorSpecs {
|
||||
path: ColorSpec,
|
||||
line: ColorSpec,
|
||||
column: ColorSpec,
|
||||
matched: ColorSpec,
|
||||
}
|
||||
|
||||
@@ -554,7 +630,7 @@ pub struct ColorSpecs {
|
||||
/// The format of a `Spec` is a triple: `{type}:{attribute}:{value}`. Each
|
||||
/// component is defined as follows:
|
||||
///
|
||||
/// * `{type}` can be one of `path`, `line` or `match`.
|
||||
/// * `{type}` can be one of `path`, `line`, `column` or `match`.
|
||||
/// * `{attribute}` can be one of `fg`, `bg` or `style`. `{attribute}` may also
|
||||
/// be the special value `none`, in which case, `{value}` can be omitted.
|
||||
/// * `{value}` is either a color name (for `fg`/`bg`) or a style instruction.
|
||||
@@ -572,7 +648,8 @@ pub struct ColorSpecs {
|
||||
/// Valid colors are `black`, `blue`, `green`, `red`, `cyan`, `magenta`,
|
||||
/// `yellow`, `white`.
|
||||
///
|
||||
/// Valid style instructions are `nobold` and `bold`.
|
||||
/// Valid style instructions are `nobold`, `bold`, `intense`, `nointense`,
|
||||
/// `underline`, `nounderline`.
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct Spec {
|
||||
ty: OutType,
|
||||
@@ -593,6 +670,7 @@ enum SpecValue {
|
||||
enum OutType {
|
||||
Path,
|
||||
Line,
|
||||
Column,
|
||||
Match,
|
||||
}
|
||||
|
||||
@@ -612,6 +690,8 @@ enum Style {
|
||||
NoBold,
|
||||
Intense,
|
||||
NoIntense,
|
||||
Underline,
|
||||
NoUnderline
|
||||
}
|
||||
|
||||
impl ColorSpecs {
|
||||
@@ -623,6 +703,7 @@ impl ColorSpecs {
|
||||
match user_spec.ty {
|
||||
OutType::Path => user_spec.merge_into(&mut specs.path),
|
||||
OutType::Line => user_spec.merge_into(&mut specs.line),
|
||||
OutType::Column => user_spec.merge_into(&mut specs.column),
|
||||
OutType::Match => user_spec.merge_into(&mut specs.matched),
|
||||
}
|
||||
}
|
||||
@@ -639,6 +720,11 @@ impl ColorSpecs {
|
||||
&self.line
|
||||
}
|
||||
|
||||
/// Return the color specification for coloring column numbers.
|
||||
fn column(&self) -> &ColorSpec {
|
||||
&self.column
|
||||
}
|
||||
|
||||
/// Return the color specification for coloring matched text.
|
||||
fn matched(&self) -> &ColorSpec {
|
||||
&self.matched
|
||||
@@ -665,6 +751,8 @@ impl SpecValue {
|
||||
Style::NoBold => { cspec.set_bold(false); }
|
||||
Style::Intense => { cspec.set_intense(true); }
|
||||
Style::NoIntense => { cspec.set_intense(false); }
|
||||
Style::Underline => { cspec.set_underline(true); }
|
||||
Style::NoUnderline => { cspec.set_underline(false); }
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -679,28 +767,28 @@ impl FromStr for Spec {
|
||||
if pieces.len() <= 1 || pieces.len() > 3 {
|
||||
return Err(Error::InvalidFormat(s.to_string()));
|
||||
}
|
||||
let otype: OutType = try!(pieces[0].parse());
|
||||
match try!(pieces[1].parse()) {
|
||||
let otype: OutType = pieces[0].parse()?;
|
||||
match pieces[1].parse()? {
|
||||
SpecType::None => Ok(Spec { ty: otype, value: SpecValue::None }),
|
||||
SpecType::Style => {
|
||||
if pieces.len() < 3 {
|
||||
return Err(Error::InvalidFormat(s.to_string()));
|
||||
}
|
||||
let style: Style = try!(pieces[2].parse());
|
||||
let style: Style = pieces[2].parse()?;
|
||||
Ok(Spec { ty: otype, value: SpecValue::Style(style) })
|
||||
}
|
||||
SpecType::Fg => {
|
||||
if pieces.len() < 3 {
|
||||
return Err(Error::InvalidFormat(s.to_string()));
|
||||
}
|
||||
let color: Color = try!(pieces[2].parse());
|
||||
let color: Color = pieces[2].parse()?;
|
||||
Ok(Spec { ty: otype, value: SpecValue::Fg(color) })
|
||||
}
|
||||
SpecType::Bg => {
|
||||
if pieces.len() < 3 {
|
||||
return Err(Error::InvalidFormat(s.to_string()));
|
||||
}
|
||||
let color: Color = try!(pieces[2].parse());
|
||||
let color: Color = pieces[2].parse()?;
|
||||
Ok(Spec { ty: otype, value: SpecValue::Bg(color) })
|
||||
}
|
||||
}
|
||||
@@ -714,6 +802,7 @@ impl FromStr for OutType {
|
||||
match &*s.to_lowercase() {
|
||||
"path" => Ok(OutType::Path),
|
||||
"line" => Ok(OutType::Line),
|
||||
"column" => Ok(OutType::Column),
|
||||
"match" => Ok(OutType::Match),
|
||||
_ => Err(Error::UnrecognizedOutType(s.to_string())),
|
||||
}
|
||||
@@ -743,6 +832,8 @@ impl FromStr for Style {
|
||||
"nobold" => Ok(Style::NoBold),
|
||||
"intense" => Ok(Style::Intense),
|
||||
"nointense" => Ok(Style::NoIntense),
|
||||
"underline" => Ok(Style::Underline),
|
||||
"nounderline" => Ok(Style::NoUnderline),
|
||||
_ => Err(Error::UnrecognizedStyle(s.to_string())),
|
||||
}
|
||||
}
|
||||
@@ -765,6 +856,7 @@ mod tests {
|
||||
assert_eq!(ColorSpecs::new(user_specs), ColorSpecs {
|
||||
path: ColorSpec::default(),
|
||||
line: ColorSpec::default(),
|
||||
column: ColorSpec::default(),
|
||||
matched: expect_matched,
|
||||
});
|
||||
}
|
||||
@@ -795,11 +887,23 @@ mod tests {
|
||||
value: SpecValue::Style(Style::Intense),
|
||||
});
|
||||
|
||||
let spec: Spec = "match:style:underline".parse().unwrap();
|
||||
assert_eq!(spec, Spec {
|
||||
ty: OutType::Match,
|
||||
value: SpecValue::Style(Style::Underline),
|
||||
});
|
||||
|
||||
let spec: Spec = "line:none".parse().unwrap();
|
||||
assert_eq!(spec, Spec {
|
||||
ty: OutType::Line,
|
||||
value: SpecValue::None,
|
||||
});
|
||||
|
||||
let spec: Spec = "column:bg:green".parse().unwrap();
|
||||
assert_eq!(spec, Spec {
|
||||
ty: OutType::Column,
|
||||
value: SpecValue::Bg(Color::Green),
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -3,8 +3,8 @@ The `search_buffer` module is responsible for searching a single file all in a
|
||||
single buffer. Typically, the source of the buffer is a memory map. This can
|
||||
be useful for when memory maps are faster than streaming search.
|
||||
|
||||
Note that this module doesn't quite support everything that `search_stream` does.
|
||||
Notably, showing contexts.
|
||||
Note that this module doesn't quite support everything that `search_stream`
|
||||
does. Notably, showing contexts.
|
||||
*/
|
||||
use std::cmp;
|
||||
use std::path::Path;
|
||||
@@ -21,8 +21,10 @@ pub struct BufferSearcher<'a, W: 'a> {
|
||||
grep: &'a Grep,
|
||||
path: &'a Path,
|
||||
buf: &'a [u8],
|
||||
match_count: u64,
|
||||
match_line_count: u64,
|
||||
match_count: Option<u64>,
|
||||
line_count: Option<u64>,
|
||||
byte_offset: Option<u64>,
|
||||
last_line: usize,
|
||||
}
|
||||
|
||||
@@ -39,12 +41,24 @@ impl<'a, W: WriteColor> BufferSearcher<'a, W> {
|
||||
grep: grep,
|
||||
path: path,
|
||||
buf: buf,
|
||||
match_count: 0,
|
||||
match_line_count: 0,
|
||||
match_count: None,
|
||||
line_count: None,
|
||||
byte_offset: None,
|
||||
last_line: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// If enabled, searching will print a 0-based offset of the
|
||||
/// matching line (or the actual match if -o is specified) before
|
||||
/// printing the line itself.
|
||||
///
|
||||
/// Disabled by default.
|
||||
pub fn byte_offset(mut self, yes: bool) -> Self {
|
||||
self.opts.byte_offset = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// If enabled, searching will print a count instead of each match.
|
||||
///
|
||||
/// Disabled by default.
|
||||
@@ -53,6 +67,15 @@ impl<'a, W: WriteColor> BufferSearcher<'a, W> {
|
||||
self
|
||||
}
|
||||
|
||||
/// If enabled, searching will print the count of individual matches
|
||||
/// instead of each match.
|
||||
///
|
||||
/// Disabled by default.
|
||||
pub fn count_matches(mut self, yes: bool) -> Self {
|
||||
self.opts.count_matches = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// If enabled, searching will print the path instead of each match.
|
||||
///
|
||||
/// Disabled by default.
|
||||
@@ -113,13 +136,17 @@ impl<'a, W: WriteColor> BufferSearcher<'a, W> {
|
||||
|
||||
#[inline(never)]
|
||||
pub fn run(mut self) -> u64 {
|
||||
let binary_upto = cmp::min(10240, self.buf.len());
|
||||
let binary_upto = cmp::min(10_240, self.buf.len());
|
||||
if !self.opts.text && is_binary(&self.buf[..binary_upto], true) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
self.match_count = 0;
|
||||
self.match_line_count = 0;
|
||||
self.line_count = if self.opts.line_number { Some(0) } else { None };
|
||||
// The memory map searcher uses one contiguous block of bytes, so the
|
||||
// offsets given the printer are sufficient to compute the byte offset.
|
||||
self.byte_offset = if self.opts.byte_offset { Some(0) } else { None };
|
||||
self.match_count = if self.opts.count_matches { Some(0) } else { None };
|
||||
let mut last_end = 0;
|
||||
for m in self.grep.iter(self.buf) {
|
||||
if self.opts.invert_match {
|
||||
@@ -128,29 +155,43 @@ impl<'a, W: WriteColor> BufferSearcher<'a, W> {
|
||||
self.print_match(m.start(), m.end());
|
||||
}
|
||||
last_end = m.end();
|
||||
if self.opts.terminate(self.match_count) {
|
||||
if self.opts.terminate(self.match_line_count) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if self.opts.invert_match && !self.opts.terminate(self.match_count) {
|
||||
if self.opts.invert_match && !self.opts.terminate(self.match_line_count) {
|
||||
let upto = self.buf.len();
|
||||
self.print_inverted_matches(last_end, upto);
|
||||
}
|
||||
if self.opts.count && self.match_count > 0 {
|
||||
self.printer.path_count(self.path, self.match_count);
|
||||
if self.opts.count && self.match_line_count > 0 {
|
||||
self.printer.path_count(self.path, self.match_line_count);
|
||||
} else if self.opts.count_matches
|
||||
&& self.match_count.map_or(false, |c| c > 0)
|
||||
{
|
||||
self.printer.path_count(self.path, self.match_count.unwrap());
|
||||
}
|
||||
if self.opts.files_with_matches && self.match_count > 0 {
|
||||
if self.opts.files_with_matches && self.match_line_count > 0 {
|
||||
self.printer.path(self.path);
|
||||
}
|
||||
if self.opts.files_without_matches && self.match_count == 0 {
|
||||
if self.opts.files_without_matches && self.match_line_count == 0 {
|
||||
self.printer.path(self.path);
|
||||
}
|
||||
self.match_count
|
||||
self.match_line_count
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn count_individual_matches(&mut self, start: usize, end: usize) {
|
||||
if let Some(ref mut count) = self.match_count {
|
||||
for _ in self.grep.regex().find_iter(&self.buf[start..end]) {
|
||||
*count += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn print_match(&mut self, start: usize, end: usize) {
|
||||
self.match_count += 1;
|
||||
self.match_line_count += 1;
|
||||
self.count_individual_matches(start, end);
|
||||
if self.opts.skip_matches() {
|
||||
return;
|
||||
}
|
||||
@@ -158,7 +199,7 @@ impl<'a, W: WriteColor> BufferSearcher<'a, W> {
|
||||
self.add_line(end);
|
||||
self.printer.matched(
|
||||
self.grep.regex(), self.path, self.buf,
|
||||
start, end, self.line_count);
|
||||
start, end, self.line_count, self.byte_offset);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
@@ -166,7 +207,7 @@ impl<'a, W: WriteColor> BufferSearcher<'a, W> {
|
||||
debug_assert!(self.opts.invert_match);
|
||||
let mut it = IterLines::new(self.opts.eol, start);
|
||||
while let Some((s, e)) = it.next(&self.buf[..end]) {
|
||||
if self.opts.terminate(self.match_count) {
|
||||
if self.opts.terminate(self.match_line_count) {
|
||||
return;
|
||||
}
|
||||
self.print_match(s, e);
|
||||
@@ -271,6 +312,29 @@ and exhibited clearly, with a label attached.\
|
||||
");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn byte_offset() {
|
||||
let (_, out) = search(
|
||||
"Sherlock", SHERLOCK, |s| s.byte_offset(true));
|
||||
assert_eq!(out, "\
|
||||
/baz.rs:0:For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
/baz.rs:129:be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn byte_offset_inverted() {
|
||||
let (_, out) = search("Sherlock", SHERLOCK, |s| {
|
||||
s.invert_match(true).byte_offset(true)
|
||||
});
|
||||
assert_eq!(out, "\
|
||||
/baz.rs:65:Holmeses, success in the province of detective work must always
|
||||
/baz.rs:193:can extract a clew from a wisp of straw or a flake of cigar ash;
|
||||
/baz.rs:258:but Doctor Watson has to have it taken out for him and dusted,
|
||||
/baz.rs:321:and exhibited clearly, with a label attached.
|
||||
");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn count() {
|
||||
let (count, out) = search(
|
||||
@@ -279,6 +343,13 @@ and exhibited clearly, with a label attached.\
|
||||
assert_eq!(out, "/baz.rs:2\n");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn count_matches() {
|
||||
let (_, out) = search(
|
||||
"the", SHERLOCK, |s| s.count_matches(true));
|
||||
assert_eq!(out, "/baz.rs:4\n");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn files_with_matches() {
|
||||
let (count, out) = search(
|
||||
|
||||
@@ -67,8 +67,10 @@ pub struct Searcher<'a, R, W: 'a> {
|
||||
grep: &'a Grep,
|
||||
path: &'a Path,
|
||||
haystack: R,
|
||||
match_count: u64,
|
||||
match_line_count: u64,
|
||||
match_count: Option<u64>,
|
||||
line_count: Option<u64>,
|
||||
byte_offset: Option<u64>,
|
||||
last_match: Match,
|
||||
last_printed: usize,
|
||||
last_line: usize,
|
||||
@@ -80,7 +82,9 @@ pub struct Searcher<'a, R, W: 'a> {
|
||||
pub struct Options {
|
||||
pub after_context: usize,
|
||||
pub before_context: usize,
|
||||
pub byte_offset: bool,
|
||||
pub count: bool,
|
||||
pub count_matches: bool,
|
||||
pub files_with_matches: bool,
|
||||
pub files_without_matches: bool,
|
||||
pub eol: u8,
|
||||
@@ -96,7 +100,9 @@ impl Default for Options {
|
||||
Options {
|
||||
after_context: 0,
|
||||
before_context: 0,
|
||||
byte_offset: false,
|
||||
count: false,
|
||||
count_matches: false,
|
||||
files_with_matches: false,
|
||||
files_without_matches: false,
|
||||
eol: b'\n',
|
||||
@@ -111,11 +117,11 @@ impl Default for Options {
|
||||
}
|
||||
|
||||
impl Options {
|
||||
/// Several options (--quiet, --count, --files-with-matches,
|
||||
/// Several options (--quiet, --count, --count-matches, --files-with-matches,
|
||||
/// --files-without-match) imply that we shouldn't ever display matches.
|
||||
pub fn skip_matches(&self) -> bool {
|
||||
self.count || self.files_with_matches || self.files_without_matches
|
||||
|| self.quiet
|
||||
|| self.quiet || self.count_matches
|
||||
}
|
||||
|
||||
/// Some options (--quiet, --files-with-matches, --files-without-match)
|
||||
@@ -124,12 +130,12 @@ impl Options {
|
||||
self.files_with_matches || self.files_without_matches || self.quiet
|
||||
}
|
||||
|
||||
/// Returns true if the search should terminate based on the match count.
|
||||
pub fn terminate(&self, match_count: u64) -> bool {
|
||||
if match_count > 0 && self.stop_after_first_match() {
|
||||
/// Returns true if the search should terminate based on the match line count.
|
||||
pub fn terminate(&self, match_line_count: u64) -> bool {
|
||||
if match_line_count > 0 && self.stop_after_first_match() {
|
||||
return true;
|
||||
}
|
||||
if self.max_count.map_or(false, |max| match_count >= max) {
|
||||
if self.max_count.map_or(false, |max| match_line_count >= max) {
|
||||
return true;
|
||||
}
|
||||
false
|
||||
@@ -163,8 +169,10 @@ impl<'a, R: io::Read, W: WriteColor> Searcher<'a, R, W> {
|
||||
grep: grep,
|
||||
path: path,
|
||||
haystack: haystack,
|
||||
match_count: 0,
|
||||
match_line_count: 0,
|
||||
match_count: None,
|
||||
line_count: None,
|
||||
byte_offset: None,
|
||||
last_match: Match::default(),
|
||||
last_printed: 0,
|
||||
last_line: 0,
|
||||
@@ -186,6 +194,16 @@ impl<'a, R: io::Read, W: WriteColor> Searcher<'a, R, W> {
|
||||
self
|
||||
}
|
||||
|
||||
/// If enabled, searching will print a 0-based offset of the
|
||||
/// matching line (or the actual match if -o is specified) before
|
||||
/// printing the line itself.
|
||||
///
|
||||
/// Disabled by default.
|
||||
pub fn byte_offset(mut self, yes: bool) -> Self {
|
||||
self.opts.byte_offset = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// If enabled, searching will print a count instead of each match.
|
||||
///
|
||||
/// Disabled by default.
|
||||
@@ -194,6 +212,15 @@ impl<'a, R: io::Read, W: WriteColor> Searcher<'a, R, W> {
|
||||
self
|
||||
}
|
||||
|
||||
/// If enabled, searching will print the count of individual matches
|
||||
/// instead of each match.
|
||||
///
|
||||
/// Disabled by default.
|
||||
pub fn count_matches(mut self, yes: bool) -> Self {
|
||||
self.opts.count_matches = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// If enabled, searching will print the path instead of each match.
|
||||
///
|
||||
/// Disabled by default.
|
||||
@@ -257,14 +284,16 @@ impl<'a, R: io::Read, W: WriteColor> Searcher<'a, R, W> {
|
||||
#[inline(never)]
|
||||
pub fn run(mut self) -> Result<u64, Error> {
|
||||
self.inp.reset();
|
||||
self.match_count = 0;
|
||||
self.match_line_count = 0;
|
||||
self.line_count = if self.opts.line_number { Some(0) } else { None };
|
||||
self.byte_offset = if self.opts.byte_offset { Some(0) } else { None };
|
||||
self.match_count = if self.opts.count_matches { Some(0) } else { None };
|
||||
self.last_match = Match::default();
|
||||
self.after_context_remaining = 0;
|
||||
while !self.terminate() {
|
||||
let upto = self.inp.lastnl;
|
||||
self.print_after_context(upto);
|
||||
if !try!(self.fill()) {
|
||||
if !self.fill()? {
|
||||
break;
|
||||
}
|
||||
while !self.terminate() && self.inp.pos < self.inp.lastnl {
|
||||
@@ -299,35 +328,48 @@ impl<'a, R: io::Read, W: WriteColor> Searcher<'a, R, W> {
|
||||
}
|
||||
}
|
||||
}
|
||||
if self.match_count > 0 {
|
||||
if self.after_context_remaining > 0 {
|
||||
if self.last_printed == self.inp.lastnl {
|
||||
self.fill()?;
|
||||
}
|
||||
let upto = self.inp.lastnl;
|
||||
if upto > 0 {
|
||||
self.print_after_context(upto);
|
||||
}
|
||||
}
|
||||
if self.match_line_count > 0 {
|
||||
if self.opts.count {
|
||||
self.printer.path_count(self.path, self.match_count);
|
||||
self.printer.path_count(self.path, self.match_line_count);
|
||||
} else if self.opts.count_matches {
|
||||
self.printer.path_count(self.path, self.match_count.unwrap());
|
||||
} else if self.opts.files_with_matches {
|
||||
self.printer.path(self.path);
|
||||
}
|
||||
} else if self.opts.files_without_matches {
|
||||
self.printer.path(self.path);
|
||||
}
|
||||
Ok(self.match_count)
|
||||
Ok(self.match_line_count)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn terminate(&self) -> bool {
|
||||
self.opts.terminate(self.match_count)
|
||||
self.opts.terminate(self.match_line_count)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn fill(&mut self) -> Result<bool, Error> {
|
||||
let mut keep = self.inp.lastnl;
|
||||
let keep =
|
||||
if self.opts.before_context > 0 || self.opts.after_context > 0 {
|
||||
let lines = 1 + cmp::max(
|
||||
self.opts.before_context, self.opts.after_context);
|
||||
keep = start_of_previous_lines(
|
||||
start_of_previous_lines(
|
||||
self.opts.eol,
|
||||
&self.inp.buf,
|
||||
self.inp.lastnl.saturating_sub(1),
|
||||
lines);
|
||||
}
|
||||
lines)
|
||||
} else {
|
||||
self.inp.lastnl
|
||||
};
|
||||
if keep < self.last_printed {
|
||||
self.last_printed -= keep;
|
||||
} else {
|
||||
@@ -339,9 +381,10 @@ impl<'a, R: io::Read, W: WriteColor> Searcher<'a, R, W> {
|
||||
self.count_lines(keep);
|
||||
self.last_line = 0;
|
||||
}
|
||||
let ok = try!(self.inp.fill(&mut self.haystack, keep).map_err(|err| {
|
||||
self.count_byte_offset(keep);
|
||||
let ok = self.inp.fill(&mut self.haystack, keep).map_err(|err| {
|
||||
Error::from_io(err, &self.path)
|
||||
}));
|
||||
})?;
|
||||
Ok(ok)
|
||||
}
|
||||
|
||||
@@ -400,7 +443,8 @@ impl<'a, R: io::Read, W: WriteColor> Searcher<'a, R, W> {
|
||||
|
||||
#[inline(always)]
|
||||
fn print_match(&mut self, start: usize, end: usize) {
|
||||
self.match_count += 1;
|
||||
self.match_line_count += 1;
|
||||
self.count_individual_matches(start, end);
|
||||
if self.opts.skip_matches() {
|
||||
return;
|
||||
}
|
||||
@@ -409,7 +453,7 @@ impl<'a, R: io::Read, W: WriteColor> Searcher<'a, R, W> {
|
||||
self.add_line(end);
|
||||
self.printer.matched(
|
||||
self.grep.regex(), self.path,
|
||||
&self.inp.buf, start, end, self.line_count);
|
||||
&self.inp.buf, start, end, self.line_count, self.byte_offset);
|
||||
self.last_printed = end;
|
||||
self.after_context_remaining = self.opts.after_context;
|
||||
}
|
||||
@@ -419,7 +463,8 @@ impl<'a, R: io::Read, W: WriteColor> Searcher<'a, R, W> {
|
||||
self.count_lines(start);
|
||||
self.add_line(end);
|
||||
self.printer.context(
|
||||
&self.path, &self.inp.buf, start, end, self.line_count);
|
||||
&self.path, &self.inp.buf, start, end,
|
||||
self.line_count, self.byte_offset);
|
||||
self.last_printed = end;
|
||||
}
|
||||
|
||||
@@ -437,6 +482,22 @@ impl<'a, R: io::Read, W: WriteColor> Searcher<'a, R, W> {
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn count_byte_offset(&mut self, buf_last_end: usize) {
|
||||
if let Some(ref mut byte_offset) = self.byte_offset {
|
||||
*byte_offset += buf_last_end as u64;
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn count_individual_matches(&mut self, start: usize, end: usize) {
|
||||
if let Some(ref mut count) = self.match_count {
|
||||
for _ in self.grep.regex().find_iter(&self.inp.buf[start..end]) {
|
||||
*count += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn count_lines(&mut self, upto: usize) {
|
||||
if let Some(ref mut line_count) = self.line_count {
|
||||
@@ -585,8 +646,8 @@ impl InputBuffer {
|
||||
let new_len = cmp::max(min_len, self.buf.len() * 2);
|
||||
self.buf.resize(new_len, 0);
|
||||
}
|
||||
let n = try!(rdr.read(
|
||||
&mut self.buf[self.end..self.end + self.read_size]));
|
||||
let n = rdr.read(
|
||||
&mut self.buf[self.end..self.end + self.read_size])?;
|
||||
if !self.text {
|
||||
if is_binary(&self.buf[self.end..self.end + n], self.first) {
|
||||
return Ok(false);
|
||||
@@ -996,6 +1057,48 @@ fn main() {
|
||||
assert_eq!(out, "/baz.rs:2\n");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn byte_offset() {
|
||||
let (_, out) = search_smallcap(
|
||||
"Sherlock", SHERLOCK, |s| s.byte_offset(true));
|
||||
assert_eq!(out, "\
|
||||
/baz.rs:0:For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
/baz.rs:129:be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn byte_offset_with_before_context() {
|
||||
let (_, out) = search_smallcap("dusted", SHERLOCK, |s| {
|
||||
s.line_number(true).byte_offset(true).before_context(2)
|
||||
});
|
||||
assert_eq!(out, "\
|
||||
/baz.rs-3-129-be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
/baz.rs-4-193-can extract a clew from a wisp of straw or a flake of cigar ash;
|
||||
/baz.rs:5:258:but Doctor Watson has to have it taken out for him and dusted,
|
||||
");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn byte_offset_inverted() {
|
||||
let (_, out) = search_smallcap("Sherlock", SHERLOCK, |s| {
|
||||
s.invert_match(true).byte_offset(true)
|
||||
});
|
||||
assert_eq!(out, "\
|
||||
/baz.rs:65:Holmeses, success in the province of detective work must always
|
||||
/baz.rs:193:can extract a clew from a wisp of straw or a flake of cigar ash;
|
||||
/baz.rs:258:but Doctor Watson has to have it taken out for him and dusted,
|
||||
/baz.rs:321:and exhibited clearly, with a label attached.
|
||||
");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn count_matches() {
|
||||
let (_, out) = search_smallcap(
|
||||
"the", SHERLOCK, |s| s.count_matches(true));
|
||||
assert_eq!(out, "/baz.rs:4\n");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn files_with_matches() {
|
||||
let (count, out) = search_smallcap(
|
||||
@@ -1247,6 +1350,23 @@ fn main() {
|
||||
");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn after_context_invert_one_max_count_two() {
|
||||
let (count, out) = search_smallcap("Sherlock", SHERLOCK, |s| {
|
||||
s.line_number(true)
|
||||
.invert_match(true)
|
||||
.after_context(1)
|
||||
.max_count(Some(2))
|
||||
});
|
||||
assert_eq!(2, count);
|
||||
assert_eq!(out, "\
|
||||
/baz.rs:2:Holmeses, success in the province of detective work must always
|
||||
/baz.rs-3-be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
/baz.rs:4:can extract a clew from a wisp of straw or a flake of cigar ash;
|
||||
/baz.rs-5-but Doctor Watson has to have it taken out for him and dusted,
|
||||
");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn after_context_two1() {
|
||||
let (count, out) = search_smallcap("Sherlock", SHERLOCK, |s| {
|
||||
@@ -1290,6 +1410,23 @@ fn main() {
|
||||
");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn after_context_two_max_count_two() {
|
||||
let (count, out) = search_smallcap(
|
||||
"Doctor", SHERLOCK, |s| {
|
||||
s.line_number(true).after_context(2).max_count(Some(2))
|
||||
});
|
||||
assert_eq!(2, count);
|
||||
assert_eq!(out, "\
|
||||
/baz.rs:1:For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
/baz.rs-2-Holmeses, success in the province of detective work must always
|
||||
/baz.rs-3-be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
--
|
||||
/baz.rs:5:but Doctor Watson has to have it taken out for him and dusted,
|
||||
/baz.rs-6-and exhibited clearly, with a label attached.
|
||||
");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn after_context_three1() {
|
||||
let (count, out) = search_smallcap("Sherlock", SHERLOCK, |s| {
|
||||
|
||||
@@ -14,8 +14,8 @@ enum State {
|
||||
/// Unescapes a string given on the command line. It supports a limited set of
|
||||
/// escape sequences:
|
||||
///
|
||||
/// * \t, \r and \n are mapped to their corresponding ASCII bytes.
|
||||
/// * \xZZ hexadecimal escapes are mapped to their byte.
|
||||
/// * `\t`, `\r` and `\n` are mapped to their corresponding ASCII bytes.
|
||||
/// * `\xZZ` hexadecimal escapes are mapped to their byte.
|
||||
pub fn unescape(s: &str) -> Vec<u8> {
|
||||
use self::State::*;
|
||||
|
||||
|
||||
@@ -5,10 +5,11 @@ use std::path::Path;
|
||||
use encoding_rs::Encoding;
|
||||
use grep::Grep;
|
||||
use ignore::DirEntry;
|
||||
use memmap::{Mmap, Protection};
|
||||
use memmap::Mmap;
|
||||
use termcolor::WriteColor;
|
||||
|
||||
use decoder::DecodeReader;
|
||||
use decompressor::{self, DecompressionReader};
|
||||
use pathutil::strip_prefix;
|
||||
use printer::Printer;
|
||||
use search_buffer::BufferSearcher;
|
||||
@@ -32,7 +33,9 @@ struct Options {
|
||||
encoding: Option<&'static Encoding>,
|
||||
after_context: usize,
|
||||
before_context: usize,
|
||||
byte_offset: bool,
|
||||
count: bool,
|
||||
count_matches: bool,
|
||||
files_with_matches: bool,
|
||||
files_without_matches: bool,
|
||||
eol: u8,
|
||||
@@ -42,6 +45,7 @@ struct Options {
|
||||
no_messages: bool,
|
||||
quiet: bool,
|
||||
text: bool,
|
||||
search_zip_files: bool
|
||||
}
|
||||
|
||||
impl Default for Options {
|
||||
@@ -51,7 +55,9 @@ impl Default for Options {
|
||||
encoding: None,
|
||||
after_context: 0,
|
||||
before_context: 0,
|
||||
byte_offset: false,
|
||||
count: false,
|
||||
count_matches: false,
|
||||
files_with_matches: false,
|
||||
files_without_matches: false,
|
||||
eol: b'\n',
|
||||
@@ -61,6 +67,7 @@ impl Default for Options {
|
||||
no_messages: false,
|
||||
quiet: false,
|
||||
text: false,
|
||||
search_zip_files: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -103,6 +110,16 @@ impl WorkerBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// If enabled, searching will print a 0-based offset of the
|
||||
/// matching line (or the actual match if -o is specified) before
|
||||
/// printing the line itself.
|
||||
///
|
||||
/// Disabled by default.
|
||||
pub fn byte_offset(mut self, yes: bool) -> Self {
|
||||
self.opts.byte_offset = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// If enabled, searching will print a count instead of each match.
|
||||
///
|
||||
/// Disabled by default.
|
||||
@@ -111,6 +128,15 @@ impl WorkerBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// If enabled, searching will print the count of individual matches
|
||||
/// instead of each match.
|
||||
///
|
||||
/// Disabled by default.
|
||||
pub fn count_matches(mut self, yes: bool) -> Self {
|
||||
self.opts.count_matches = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the encoding to use to read each file.
|
||||
///
|
||||
/// If the encoding is `None` (the default), then the encoding is
|
||||
@@ -190,6 +216,12 @@ impl WorkerBuilder {
|
||||
self.opts.text = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// If enabled, search through compressed files as well
|
||||
pub fn search_zip_files(mut self, yes: bool) -> Self {
|
||||
self.opts.search_zip_files = yes;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Worker is responsible for executing searches on file paths, while choosing
|
||||
@@ -218,6 +250,16 @@ impl Worker {
|
||||
}
|
||||
Work::DirEntry(dent) => {
|
||||
let mut path = dent.path();
|
||||
if self.opts.search_zip_files
|
||||
&& decompressor::is_compressed(path)
|
||||
{
|
||||
match DecompressionReader::from_path(path) {
|
||||
Some(reader) => self.search(printer, path, reader),
|
||||
None => {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let file = match File::open(path) {
|
||||
Ok(file) => file,
|
||||
Err(err) => {
|
||||
@@ -236,6 +278,7 @@ impl Worker {
|
||||
self.search(printer, path, file)
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
match result {
|
||||
Ok(count) => {
|
||||
@@ -263,7 +306,9 @@ impl Worker {
|
||||
searcher
|
||||
.after_context(self.opts.after_context)
|
||||
.before_context(self.opts.before_context)
|
||||
.byte_offset(self.opts.byte_offset)
|
||||
.count(self.opts.count)
|
||||
.count_matches(self.opts.count_matches)
|
||||
.files_with_matches(self.opts.files_with_matches)
|
||||
.files_without_matches(self.opts.files_without_matches)
|
||||
.eol(self.opts.eol)
|
||||
@@ -282,7 +327,7 @@ impl Worker {
|
||||
path: &Path,
|
||||
file: &File,
|
||||
) -> Result<u64> {
|
||||
if try!(file.metadata()).len() == 0 {
|
||||
if file.metadata()?.len() == 0 {
|
||||
// Opening a memory map with an empty file results in an error.
|
||||
// However, this may not actually be an empty file! For example,
|
||||
// /proc/cpuinfo reports itself as an empty file, but it can
|
||||
@@ -290,8 +335,11 @@ impl Worker {
|
||||
// regular read calls.
|
||||
return self.search(printer, path, file);
|
||||
}
|
||||
let mmap = try!(Mmap::open(file, Protection::Read));
|
||||
let buf = unsafe { mmap.as_slice() };
|
||||
let mmap = match self.mmap(file)? {
|
||||
None => return self.search(printer, path, file),
|
||||
Some(mmap) => mmap,
|
||||
};
|
||||
let buf = &*mmap;
|
||||
if buf.len() >= 3 && Encoding::for_bom(buf).is_some() {
|
||||
// If we have a UTF-16 bom in our memory map, then we need to fall
|
||||
// back to the stream reader, which will do transcoding.
|
||||
@@ -299,7 +347,9 @@ impl Worker {
|
||||
}
|
||||
let searcher = BufferSearcher::new(printer, &self.grep, path, buf);
|
||||
Ok(searcher
|
||||
.byte_offset(self.opts.byte_offset)
|
||||
.count(self.opts.count)
|
||||
.count_matches(self.opts.count_matches)
|
||||
.files_with_matches(self.opts.files_with_matches)
|
||||
.files_without_matches(self.opts.files_without_matches)
|
||||
.eol(self.opts.eol)
|
||||
@@ -310,4 +360,31 @@ impl Worker {
|
||||
.text(self.opts.text)
|
||||
.run())
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
fn mmap(&self, file: &File) -> Result<Option<Mmap>> {
|
||||
Ok(Some(mmap_readonly(file)?))
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn mmap(&self, file: &File) -> Result<Option<Mmap>> {
|
||||
use libc::{EOVERFLOW, ENODEV, ENOMEM};
|
||||
|
||||
let err = match mmap_readonly(file) {
|
||||
Ok(mmap) => return Ok(Some(mmap)),
|
||||
Err(err) => err,
|
||||
};
|
||||
let code = err.raw_os_error();
|
||||
if code == Some(EOVERFLOW)
|
||||
|| code == Some(ENODEV)
|
||||
|| code == Some(ENOMEM)
|
||||
{
|
||||
return Ok(None);
|
||||
}
|
||||
Err(From::from(err))
|
||||
}
|
||||
}
|
||||
|
||||
fn mmap_readonly(file: &File) -> io::Result<Mmap> {
|
||||
unsafe { Mmap::map(file) }
|
||||
}
|
||||
|
||||
3
termcolor/COPYING
Normal file
3
termcolor/COPYING
Normal file
@@ -0,0 +1,3 @@
|
||||
This project is dual-licensed under the Unlicense and MIT licenses.
|
||||
|
||||
You may use this code under the terms of either license.
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "termcolor"
|
||||
version = "0.3.1" #:version
|
||||
version = "0.3.6" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
A simple cross platform library for writing colored text to a terminal.
|
||||
@@ -17,4 +17,4 @@ name = "termcolor"
|
||||
bench = false
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
wincolor = { version = "0.1.3", path = "../wincolor" }
|
||||
wincolor = { version = "0.1.6", path = "../wincolor" }
|
||||
|
||||
21
termcolor/LICENSE-MIT
Normal file
21
termcolor/LICENSE-MIT
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Andrew Gallant
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
@@ -62,15 +62,15 @@ use std::io::Write;
|
||||
use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
|
||||
|
||||
let mut stdout = StandardStream::stdout(ColorChoice::Always);
|
||||
try!(stdout.set_color(ColorSpec::new().set_fg(Some(Color::Green))));
|
||||
try!(writeln!(&mut stdout, "green text!"));
|
||||
stdout.set_color(ColorSpec::new().set_fg(Some(Color::Green)))?;
|
||||
writeln!(&mut stdout, "green text!")?;
|
||||
```
|
||||
|
||||
### Example: using `BufferWriter`
|
||||
|
||||
A `BufferWriter` can create buffers and write buffers to stdout or stderr. It
|
||||
does *not* implement `io::Write` or `WriteColor` itself. Instead, `Buffer`
|
||||
implements `io::Write` and `io::WriteColor`.
|
||||
implements `io::Write` and `termcolor::WriteColor`.
|
||||
|
||||
This example shows how to print some green text to stderr.
|
||||
|
||||
@@ -80,7 +80,7 @@ use termcolor::{BufferWriter, Color, ColorChoice, ColorSpec, WriteColor};
|
||||
|
||||
let mut bufwtr = BufferWriter::stderr(ColorChoice::Always);
|
||||
let mut buffer = bufwtr.buffer();
|
||||
try!(buffer.set_color(ColorSpec::new().set_fg(Some(Color::Green))));
|
||||
try!(writeln!(&mut buffer, "green text!"));
|
||||
try!(bufwtr.print(&buffer));
|
||||
buffer.set_color(ColorSpec::new().set_fg(Some(Color::Green)))?;
|
||||
writeln!(&mut buffer, "green text!")?;
|
||||
bufwtr.print(&buffer)?;
|
||||
```
|
||||
|
||||
24
termcolor/UNLICENSE
Normal file
24
termcolor/UNLICENSE
Normal file
@@ -0,0 +1,24 @@
|
||||
This is free and unencumbered software released into the public domain.
|
||||
|
||||
Anyone is free to copy, modify, publish, use, compile, sell, or
|
||||
distribute this software, either in source code form or as a compiled
|
||||
binary, for any purpose, commercial or non-commercial, and by any
|
||||
means.
|
||||
|
||||
In jurisdictions that recognize copyright laws, the author or authors
|
||||
of this software dedicate any and all copyright interest in the
|
||||
software to the public domain. We make this dedication for the benefit
|
||||
of the public at large and to the detriment of our heirs and
|
||||
successors. We intend this dedication to be an overt act of
|
||||
relinquishment in perpetuity of all present and future rights to this
|
||||
software under copyright law.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
For more information, please refer to <http://unlicense.org/>
|
||||
@@ -42,8 +42,8 @@ use std::io::Write;
|
||||
use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
|
||||
|
||||
let mut stdout = StandardStream::stdout(ColorChoice::Always);
|
||||
try!(stdout.set_color(ColorSpec::new().set_fg(Some(Color::Green))));
|
||||
try!(writeln!(&mut stdout, "green text!"));
|
||||
stdout.set_color(ColorSpec::new().set_fg(Some(Color::Green)))?;
|
||||
writeln!(&mut stdout, "green text!")?;
|
||||
# Ok(()) }
|
||||
```
|
||||
|
||||
@@ -62,9 +62,9 @@ use termcolor::{BufferWriter, Color, ColorChoice, ColorSpec, WriteColor};
|
||||
|
||||
let mut bufwtr = BufferWriter::stderr(ColorChoice::Always);
|
||||
let mut buffer = bufwtr.buffer();
|
||||
try!(buffer.set_color(ColorSpec::new().set_fg(Some(Color::Green))));
|
||||
try!(writeln!(&mut buffer, "green text!"));
|
||||
try!(bufwtr.print(&buffer));
|
||||
buffer.set_color(ColorSpec::new().set_fg(Some(Color::Green)))?;
|
||||
writeln!(&mut buffer, "green text!")?;
|
||||
bufwtr.print(&buffer)?;
|
||||
# Ok(()) }
|
||||
```
|
||||
*/
|
||||
@@ -104,7 +104,7 @@ pub trait WriteColor: io::Write {
|
||||
fn reset(&mut self) -> io::Result<()>;
|
||||
}
|
||||
|
||||
impl<'a, T: WriteColor> WriteColor for &'a mut T {
|
||||
impl<'a, T: ?Sized + WriteColor> WriteColor for &'a mut T {
|
||||
fn supports_color(&self) -> bool { (&**self).supports_color() }
|
||||
fn set_color(&mut self, spec: &ColorSpec) -> io::Result<()> {
|
||||
(&mut **self).set_color(spec)
|
||||
@@ -202,15 +202,23 @@ enum IoStandardStream {
|
||||
impl IoStandardStream {
|
||||
fn new(sty: StandardStreamType) -> IoStandardStream {
|
||||
match sty {
|
||||
StandardStreamType::Stdout => IoStandardStream::Stdout(io::stdout()),
|
||||
StandardStreamType::Stderr => IoStandardStream::Stderr(io::stderr()),
|
||||
StandardStreamType::Stdout => {
|
||||
IoStandardStream::Stdout(io::stdout())
|
||||
}
|
||||
StandardStreamType::Stderr => {
|
||||
IoStandardStream::Stderr(io::stderr())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn lock(&self) -> IoStandardStreamLock {
|
||||
match *self {
|
||||
IoStandardStream::Stdout(ref s) => IoStandardStreamLock::StdoutLock(s.lock()),
|
||||
IoStandardStream::Stderr(ref s) => IoStandardStreamLock::StderrLock(s.lock()),
|
||||
IoStandardStream::Stdout(ref s) => {
|
||||
IoStandardStreamLock::StdoutLock(s.lock())
|
||||
}
|
||||
IoStandardStream::Stderr(ref s) => {
|
||||
IoStandardStreamLock::StderrLock(s.lock())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -231,7 +239,7 @@ impl io::Write for IoStandardStream {
|
||||
}
|
||||
}
|
||||
|
||||
/// Same rigamorale for the locked variants of the standard streams.
|
||||
// Same rigmarole for the locked variants of the standard streams.
|
||||
|
||||
enum IoStandardStreamLock<'a> {
|
||||
StdoutLock(io::StdoutLock<'a>),
|
||||
@@ -257,7 +265,7 @@ impl<'a> io::Write for IoStandardStreamLock<'a> {
|
||||
/// Satisfies `io::Write` and `WriteColor`, and supports optional coloring
|
||||
/// to either of the standard output streams, stdout and stderr.
|
||||
pub struct StandardStream {
|
||||
wtr: LossyStandardStream<WriterInner<'static, IoStandardStream>>,
|
||||
wtr: LossyStandardStream<WriterInner<IoStandardStream>>,
|
||||
}
|
||||
|
||||
/// `StandardStreamLock` is a locked reference to a `StandardStream`.
|
||||
@@ -265,26 +273,34 @@ pub struct StandardStream {
|
||||
/// This implements the `io::Write` and `WriteColor` traits, and is constructed
|
||||
/// via the `Write::lock` method.
|
||||
///
|
||||
/// The lifetime `'a` refers to the lifetime of the corresponding `StandardStream`.
|
||||
/// The lifetime `'a` refers to the lifetime of the corresponding
|
||||
/// `StandardStream`.
|
||||
pub struct StandardStreamLock<'a> {
|
||||
wtr: LossyStandardStream<WriterInner<'a, IoStandardStreamLock<'a>>>,
|
||||
wtr: LossyStandardStream<WriterInnerLock<'a, IoStandardStreamLock<'a>>>,
|
||||
}
|
||||
|
||||
/// WriterInner is a (limited) generic representation of a writer. It is
|
||||
/// limited because W should only ever be stdout/stderr on Windows.
|
||||
enum WriterInner<'a, W> {
|
||||
enum WriterInner<W> {
|
||||
NoColor(NoColor<W>),
|
||||
Ansi(Ansi<W>),
|
||||
#[cfg(windows)]
|
||||
Windows { wtr: W, console: Mutex<wincolor::Console> },
|
||||
}
|
||||
|
||||
/// WriterInnerLock is a (limited) generic representation of a writer. It is
|
||||
/// limited because W should only ever be stdout/stderr on Windows.
|
||||
enum WriterInnerLock<'a, W> {
|
||||
NoColor(NoColor<W>),
|
||||
Ansi(Ansi<W>),
|
||||
/// What a gross hack. On Windows, we need to specify a lifetime for the
|
||||
/// console when in a locked state, but obviously don't need to do that
|
||||
/// on Unix, which make the `'a` unused. To satisfy the compiler, we need
|
||||
/// on Unix, which makes the `'a` unused. To satisfy the compiler, we need
|
||||
/// a PhantomData.
|
||||
#[allow(dead_code)]
|
||||
Unreachable(::std::marker::PhantomData<&'a ()>),
|
||||
#[cfg(windows)]
|
||||
Windows { wtr: W, console: Mutex<wincolor::Console> },
|
||||
#[cfg(windows)]
|
||||
WindowsLocked { wtr: W, console: MutexGuard<'a, wincolor::Console> },
|
||||
Windows { wtr: W, console: MutexGuard<'a, wincolor::Console> },
|
||||
}
|
||||
|
||||
impl StandardStream {
|
||||
@@ -312,14 +328,17 @@ impl StandardStream {
|
||||
/// the `WriteColor` trait.
|
||||
#[cfg(windows)]
|
||||
fn create(sty: StandardStreamType, choice: ColorChoice) -> StandardStream {
|
||||
let con = match sty {
|
||||
let mut con = match sty {
|
||||
StandardStreamType::Stdout => wincolor::Console::stdout(),
|
||||
StandardStreamType::Stderr => wincolor::Console::stderr(),
|
||||
};
|
||||
let is_win_console = con.is_ok();
|
||||
let is_console_virtual = con.as_mut().map(|con| {
|
||||
con.set_virtual_terminal_processing(true).is_ok()
|
||||
}).unwrap_or(false);
|
||||
let wtr =
|
||||
if choice.should_attempt_color() {
|
||||
if choice.should_ansi() {
|
||||
if choice.should_ansi() || is_console_virtual {
|
||||
WriterInner::Ansi(Ansi(IoStandardStream::new(sty)))
|
||||
} else if let Ok(console) = con {
|
||||
WriterInner::Windows {
|
||||
@@ -332,7 +351,9 @@ impl StandardStream {
|
||||
} else {
|
||||
WriterInner::NoColor(NoColor(IoStandardStream::new(sty)))
|
||||
};
|
||||
StandardStream { wtr: LossyStandardStream::new(wtr).is_console(is_win_console) }
|
||||
StandardStream {
|
||||
wtr: LossyStandardStream::new(wtr).is_console(is_win_console),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new `StandardStream` with the given color preferences that
|
||||
@@ -375,12 +396,11 @@ impl<'a> StandardStreamLock<'a> {
|
||||
#[cfg(not(windows))]
|
||||
fn from_stream(stream: &StandardStream) -> StandardStreamLock {
|
||||
let locked = match *stream.wtr.get_ref() {
|
||||
WriterInner::Unreachable(_) => unreachable!(),
|
||||
WriterInner::NoColor(ref w) => {
|
||||
WriterInner::NoColor(NoColor(w.0.lock()))
|
||||
WriterInnerLock::NoColor(NoColor(w.0.lock()))
|
||||
}
|
||||
WriterInner::Ansi(ref w) => {
|
||||
WriterInner::Ansi(Ansi(w.0.lock()))
|
||||
WriterInnerLock::Ansi(Ansi(w.0.lock()))
|
||||
}
|
||||
};
|
||||
StandardStreamLock { wtr: stream.wtr.wrap(locked) }
|
||||
@@ -389,24 +409,19 @@ impl<'a> StandardStreamLock<'a> {
|
||||
#[cfg(windows)]
|
||||
fn from_stream(stream: &StandardStream) -> StandardStreamLock {
|
||||
let locked = match *stream.wtr.get_ref() {
|
||||
WriterInner::Unreachable(_) => unreachable!(),
|
||||
WriterInner::NoColor(ref w) => {
|
||||
WriterInner::NoColor(NoColor(w.0.lock()))
|
||||
WriterInnerLock::NoColor(NoColor(w.0.lock()))
|
||||
}
|
||||
WriterInner::Ansi(ref w) => {
|
||||
WriterInner::Ansi(Ansi(w.0.lock()))
|
||||
WriterInnerLock::Ansi(Ansi(w.0.lock()))
|
||||
}
|
||||
#[cfg(windows)]
|
||||
WriterInner::Windows { ref wtr, ref console } => {
|
||||
WriterInner::WindowsLocked {
|
||||
WriterInnerLock::Windows {
|
||||
wtr: wtr.lock(),
|
||||
console: console.lock().unwrap(),
|
||||
}
|
||||
}
|
||||
#[cfg(windows)]
|
||||
WriterInner::WindowsLocked{..} => {
|
||||
panic!("cannot call StandardStream.lock while a StandardStreamLock is alive");
|
||||
}
|
||||
};
|
||||
StandardStreamLock { wtr: stream.wtr.wrap(locked) }
|
||||
}
|
||||
@@ -438,59 +453,104 @@ impl<'a> WriteColor for StandardStreamLock<'a> {
|
||||
fn reset(&mut self) -> io::Result<()> { self.wtr.reset() }
|
||||
}
|
||||
|
||||
impl<'a, W: io::Write> io::Write for WriterInner<'a, W> {
|
||||
impl<W: io::Write> io::Write for WriterInner<W> {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
match *self {
|
||||
WriterInner::Unreachable(_) => unreachable!(),
|
||||
WriterInner::NoColor(ref mut wtr) => wtr.write(buf),
|
||||
WriterInner::Ansi(ref mut wtr) => wtr.write(buf),
|
||||
#[cfg(windows)]
|
||||
WriterInner::Windows { ref mut wtr, .. } => wtr.write(buf),
|
||||
#[cfg(windows)]
|
||||
WriterInner::WindowsLocked { ref mut wtr, .. } => wtr.write(buf),
|
||||
}
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
match *self {
|
||||
WriterInner::Unreachable(_) => unreachable!(),
|
||||
WriterInner::NoColor(ref mut wtr) => wtr.flush(),
|
||||
WriterInner::Ansi(ref mut wtr) => wtr.flush(),
|
||||
#[cfg(windows)]
|
||||
WriterInner::Windows { ref mut wtr, .. } => wtr.flush(),
|
||||
#[cfg(windows)]
|
||||
WriterInner::WindowsLocked { ref mut wtr, .. } => wtr.flush(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, W: io::Write> WriteColor for WriterInner<'a, W> {
|
||||
impl<W: io::Write> WriteColor for WriterInner<W> {
|
||||
fn supports_color(&self) -> bool {
|
||||
match *self {
|
||||
WriterInner::Unreachable(_) => unreachable!(),
|
||||
WriterInner::NoColor(_) => false,
|
||||
WriterInner::Ansi(_) => true,
|
||||
#[cfg(windows)]
|
||||
WriterInner::Windows { .. } => true,
|
||||
#[cfg(windows)]
|
||||
WriterInner::WindowsLocked { .. } => true,
|
||||
}
|
||||
}
|
||||
|
||||
fn set_color(&mut self, spec: &ColorSpec) -> io::Result<()> {
|
||||
match *self {
|
||||
WriterInner::Unreachable(_) => unreachable!(),
|
||||
WriterInner::NoColor(ref mut wtr) => wtr.set_color(spec),
|
||||
WriterInner::Ansi(ref mut wtr) => wtr.set_color(spec),
|
||||
#[cfg(windows)]
|
||||
WriterInner::Windows { ref mut wtr, ref console } => {
|
||||
try!(wtr.flush());
|
||||
wtr.flush()?;
|
||||
let mut console = console.lock().unwrap();
|
||||
spec.write_console(&mut *console)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn reset(&mut self) -> io::Result<()> {
|
||||
match *self {
|
||||
WriterInner::NoColor(ref mut wtr) => wtr.reset(),
|
||||
WriterInner::Ansi(ref mut wtr) => wtr.reset(),
|
||||
#[cfg(windows)]
|
||||
WriterInner::WindowsLocked { ref mut wtr, ref mut console } => {
|
||||
try!(wtr.flush());
|
||||
WriterInner::Windows { ref mut wtr, ref mut console } => {
|
||||
wtr.flush()?;
|
||||
console.lock().unwrap().reset()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, W: io::Write> io::Write for WriterInnerLock<'a, W> {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
match *self {
|
||||
WriterInnerLock::Unreachable(_) => unreachable!(),
|
||||
WriterInnerLock::NoColor(ref mut wtr) => wtr.write(buf),
|
||||
WriterInnerLock::Ansi(ref mut wtr) => wtr.write(buf),
|
||||
#[cfg(windows)]
|
||||
WriterInnerLock::Windows { ref mut wtr, .. } => wtr.write(buf),
|
||||
}
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
match *self {
|
||||
WriterInnerLock::Unreachable(_) => unreachable!(),
|
||||
WriterInnerLock::NoColor(ref mut wtr) => wtr.flush(),
|
||||
WriterInnerLock::Ansi(ref mut wtr) => wtr.flush(),
|
||||
#[cfg(windows)]
|
||||
WriterInnerLock::Windows { ref mut wtr, .. } => wtr.flush(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, W: io::Write> WriteColor for WriterInnerLock<'a, W> {
|
||||
fn supports_color(&self) -> bool {
|
||||
match *self {
|
||||
WriterInnerLock::Unreachable(_) => unreachable!(),
|
||||
WriterInnerLock::NoColor(_) => false,
|
||||
WriterInnerLock::Ansi(_) => true,
|
||||
#[cfg(windows)]
|
||||
WriterInnerLock::Windows { .. } => true,
|
||||
}
|
||||
}
|
||||
|
||||
fn set_color(&mut self, spec: &ColorSpec) -> io::Result<()> {
|
||||
match *self {
|
||||
WriterInnerLock::Unreachable(_) => unreachable!(),
|
||||
WriterInnerLock::NoColor(ref mut wtr) => wtr.set_color(spec),
|
||||
WriterInnerLock::Ansi(ref mut wtr) => wtr.set_color(spec),
|
||||
#[cfg(windows)]
|
||||
WriterInnerLock::Windows { ref mut wtr, ref mut console } => {
|
||||
wtr.flush()?;
|
||||
spec.write_console(console)
|
||||
}
|
||||
}
|
||||
@@ -498,19 +558,13 @@ impl<'a, W: io::Write> WriteColor for WriterInner<'a, W> {
|
||||
|
||||
fn reset(&mut self) -> io::Result<()> {
|
||||
match *self {
|
||||
WriterInner::Unreachable(_) => unreachable!(),
|
||||
WriterInner::NoColor(ref mut wtr) => wtr.reset(),
|
||||
WriterInner::Ansi(ref mut wtr) => wtr.reset(),
|
||||
WriterInnerLock::Unreachable(_) => unreachable!(),
|
||||
WriterInnerLock::NoColor(ref mut wtr) => wtr.reset(),
|
||||
WriterInnerLock::Ansi(ref mut wtr) => wtr.reset(),
|
||||
#[cfg(windows)]
|
||||
WriterInner::Windows { ref mut wtr, ref mut console } => {
|
||||
try!(wtr.flush());
|
||||
try!(console.lock().unwrap().reset());
|
||||
Ok(())
|
||||
}
|
||||
#[cfg(windows)]
|
||||
WriterInner::WindowsLocked { ref mut wtr, ref mut console } => {
|
||||
try!(wtr.flush());
|
||||
try!(console.reset());
|
||||
WriterInnerLock::Windows { ref mut wtr, ref mut console } => {
|
||||
wtr.flush()?;
|
||||
console.reset()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -561,17 +615,26 @@ impl BufferWriter {
|
||||
/// the buffers themselves.
|
||||
#[cfg(windows)]
|
||||
fn create(sty: StandardStreamType, choice: ColorChoice) -> BufferWriter {
|
||||
let con = match sty {
|
||||
let mut con = match sty {
|
||||
StandardStreamType::Stdout => wincolor::Console::stdout(),
|
||||
StandardStreamType::Stderr => wincolor::Console::stderr(),
|
||||
}.ok().map(Mutex::new);
|
||||
let stream = LossyStandardStream::new(IoStandardStream::new(sty)).is_console(con.is_some());
|
||||
}.ok();
|
||||
let is_console_virtual = con.as_mut().map(|con| {
|
||||
con.set_virtual_terminal_processing(true).is_ok()
|
||||
}).unwrap_or(false);
|
||||
// If we can enable ANSI on Windows, then we don't need the console
|
||||
// anymore.
|
||||
if is_console_virtual {
|
||||
con = None;
|
||||
}
|
||||
let stream = LossyStandardStream::new(IoStandardStream::new(sty))
|
||||
.is_console(con.is_some());
|
||||
BufferWriter {
|
||||
stream: stream,
|
||||
printed: AtomicBool::new(false),
|
||||
separator: None,
|
||||
color_choice: choice,
|
||||
console: con,
|
||||
console: con.map(Mutex::new),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -637,13 +700,13 @@ impl BufferWriter {
|
||||
let mut stream = self.stream.wrap(self.stream.get_ref().lock());
|
||||
if let Some(ref sep) = self.separator {
|
||||
if self.printed.load(Ordering::SeqCst) {
|
||||
try!(stream.write_all(sep));
|
||||
try!(stream.write_all(b"\n"));
|
||||
stream.write_all(sep)?;
|
||||
stream.write_all(b"\n")?;
|
||||
}
|
||||
}
|
||||
match buf.0 {
|
||||
BufferInner::NoColor(ref b) => try!(stream.write_all(&b.0)),
|
||||
BufferInner::Ansi(ref b) => try!(stream.write_all(&b.0)),
|
||||
BufferInner::NoColor(ref b) => stream.write_all(&b.0)?,
|
||||
BufferInner::Ansi(ref b) => stream.write_all(&b.0)?,
|
||||
#[cfg(windows)]
|
||||
BufferInner::Windows(ref b) => {
|
||||
// We guarantee by construction that we have a console here.
|
||||
@@ -651,7 +714,7 @@ impl BufferWriter {
|
||||
let console_mutex = self.console.as_ref()
|
||||
.expect("got Windows buffer but have no Console");
|
||||
let mut console = console_mutex.lock().unwrap();
|
||||
try!(b.print(&mut *console, &mut stream));
|
||||
b.print(&mut *console, &mut stream)?;
|
||||
}
|
||||
}
|
||||
self.printed.store(true, Ordering::SeqCst);
|
||||
@@ -907,21 +970,24 @@ impl<W: io::Write> WriteColor for Ansi<W> {
|
||||
fn supports_color(&self) -> bool { true }
|
||||
|
||||
fn set_color(&mut self, spec: &ColorSpec) -> io::Result<()> {
|
||||
try!(self.reset());
|
||||
self.reset()?;
|
||||
if spec.bold {
|
||||
self.write_str("\x1B[1m")?;
|
||||
}
|
||||
if spec.underline {
|
||||
self.write_str("\x1B[4m")?;
|
||||
}
|
||||
if let Some(ref c) = spec.fg_color {
|
||||
try!(self.write_color(true, c, spec.intense));
|
||||
self.write_color(true, c, spec.intense)?;
|
||||
}
|
||||
if let Some(ref c) = spec.bg_color {
|
||||
try!(self.write_color(false, c, spec.intense));
|
||||
}
|
||||
if spec.bold {
|
||||
try!(self.write_str("\x1B[1m"));
|
||||
self.write_color(false, c, spec.intense)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn reset(&mut self) -> io::Result<()> {
|
||||
self.write_str("\x1B[m")
|
||||
self.write_str("\x1B[0m")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -954,6 +1020,60 @@ impl<W: io::Write> Ansi<W> {
|
||||
}
|
||||
}
|
||||
}
|
||||
macro_rules! write_var_ansi_code {
|
||||
($pre:expr, $($code:expr),+) => {{
|
||||
// The loop generates at worst a literal of the form
|
||||
// '255,255,255m' which is 12-bytes.
|
||||
// The largest `pre` expression we currently use is 7 bytes.
|
||||
// This gives us the maximum of 19-bytes for our work buffer.
|
||||
let pre_len = $pre.len();
|
||||
assert!(pre_len <= 7);
|
||||
let mut fmt = [0u8; 19];
|
||||
fmt[..pre_len].copy_from_slice($pre);
|
||||
let mut i = pre_len - 1;
|
||||
$(
|
||||
let c1: u8 = ($code / 100) % 10;
|
||||
let c2: u8 = ($code / 10) % 10;
|
||||
let c3: u8 = $code % 10;
|
||||
let mut printed = false;
|
||||
|
||||
if c1 != 0 {
|
||||
printed = true;
|
||||
i += 1;
|
||||
fmt[i] = b'0' + c1;
|
||||
}
|
||||
if c2 != 0 || printed {
|
||||
i += 1;
|
||||
fmt[i] = b'0' + c2;
|
||||
}
|
||||
// If we received a zero value we must still print a value.
|
||||
i += 1;
|
||||
fmt[i] = b'0' + c3;
|
||||
i += 1;
|
||||
fmt[i] = b';';
|
||||
)+
|
||||
|
||||
fmt[i] = b'm';
|
||||
self.write_all(&fmt[0..i+1])
|
||||
}}
|
||||
}
|
||||
macro_rules! write_custom {
|
||||
($ansi256:expr) => {
|
||||
if fg {
|
||||
write_var_ansi_code!(b"\x1B[38;5;", $ansi256)
|
||||
} else {
|
||||
write_var_ansi_code!(b"\x1B[48;5;", $ansi256)
|
||||
}
|
||||
};
|
||||
|
||||
($r:expr, $g:expr, $b:expr) => {{
|
||||
if fg {
|
||||
write_var_ansi_code!(b"\x1B[38;2;", $r, $g, $b)
|
||||
} else {
|
||||
write_var_ansi_code!(b"\x1B[48;2;", $r, $g, $b)
|
||||
}
|
||||
}};
|
||||
}
|
||||
if intense {
|
||||
match *c {
|
||||
Color::Black => write_intense!("8"),
|
||||
@@ -964,6 +1084,8 @@ impl<W: io::Write> Ansi<W> {
|
||||
Color::Magenta => write_intense!("13"),
|
||||
Color::Yellow => write_intense!("11"),
|
||||
Color::White => write_intense!("15"),
|
||||
Color::Ansi256(c) => write_custom!(c),
|
||||
Color::Rgb(r, g, b) => write_custom!(r, g, b),
|
||||
Color::__Nonexhaustive => unreachable!(),
|
||||
}
|
||||
} else {
|
||||
@@ -976,6 +1098,8 @@ impl<W: io::Write> Ansi<W> {
|
||||
Color::Magenta => write_normal!("5"),
|
||||
Color::Yellow => write_normal!("3"),
|
||||
Color::White => write_normal!("7"),
|
||||
Color::Ansi256(c) => write_custom!(c),
|
||||
Color::Rgb(r, g, b) => write_custom!(r, g, b),
|
||||
Color::__Nonexhaustive => unreachable!(),
|
||||
}
|
||||
}
|
||||
@@ -1038,15 +1162,15 @@ impl WindowsBuffer {
|
||||
) -> io::Result<()> {
|
||||
let mut last = 0;
|
||||
for &(pos, ref spec) in &self.colors {
|
||||
try!(stream.write_all(&self.buf[last..pos]));
|
||||
try!(stream.flush());
|
||||
stream.write_all(&self.buf[last..pos])?;
|
||||
stream.flush()?;
|
||||
last = pos;
|
||||
match *spec {
|
||||
None => try!(console.reset()),
|
||||
Some(ref spec) => try!(spec.write_console(console)),
|
||||
None => console.reset()?,
|
||||
Some(ref spec) => spec.write_console(console)?,
|
||||
}
|
||||
}
|
||||
try!(stream.write_all(&self.buf[last..]));
|
||||
stream.write_all(&self.buf[last..])?;
|
||||
stream.flush()
|
||||
}
|
||||
|
||||
@@ -1091,6 +1215,7 @@ pub struct ColorSpec {
|
||||
bg_color: Option<Color>,
|
||||
bold: bool,
|
||||
intense: bool,
|
||||
underline: bool,
|
||||
}
|
||||
|
||||
impl ColorSpec {
|
||||
@@ -1130,10 +1255,37 @@ impl ColorSpec {
|
||||
self
|
||||
}
|
||||
|
||||
/// Get whether this is underline or not.
|
||||
///
|
||||
/// Note that the underline setting has no effect in a Windows console.
|
||||
pub fn underline(&self) -> bool { self.underline }
|
||||
|
||||
/// Set whether the text is underlined or not.
|
||||
///
|
||||
/// Note that the underline setting has no effect in a Windows console.
|
||||
pub fn set_underline(&mut self, yes: bool) -> &mut ColorSpec {
|
||||
self.underline = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Get whether this is intense or not.
|
||||
///
|
||||
/// On Unix-like systems, this will output the ANSI escape sequence
|
||||
/// that will print a high-intensity version of the color
|
||||
/// specified.
|
||||
///
|
||||
/// On Windows systems, this will output the ANSI escape sequence
|
||||
/// that will print a brighter version of the color specified.
|
||||
pub fn intense(&self) -> bool { self.intense }
|
||||
|
||||
/// Set whether the text is intense or not.
|
||||
///
|
||||
/// On Unix-like systems, this will output the ANSI escape sequence
|
||||
/// that will print a high-intensity version of the color
|
||||
/// specified.
|
||||
///
|
||||
/// On Windows systems, this will output the ANSI escape sequence
|
||||
/// that will print a brighter version of the color specified.
|
||||
pub fn set_intense(&mut self, yes: bool) -> &mut ColorSpec {
|
||||
self.intense = yes;
|
||||
self
|
||||
@@ -1141,7 +1293,8 @@ impl ColorSpec {
|
||||
|
||||
/// Returns true if this color specification has no colors or styles.
|
||||
pub fn is_none(&self) -> bool {
|
||||
self.fg_color.is_none() && self.bg_color.is_none() && !self.bold
|
||||
self.fg_color.is_none() && self.bg_color.is_none()
|
||||
&& !self.bold && !self.underline
|
||||
}
|
||||
|
||||
/// Clears this color specification so that it has no color/style settings.
|
||||
@@ -1149,6 +1302,7 @@ impl ColorSpec {
|
||||
self.fg_color = None;
|
||||
self.bg_color = None;
|
||||
self.bold = false;
|
||||
self.underline = false;
|
||||
}
|
||||
|
||||
/// Writes this color spec to the given Windows console.
|
||||
@@ -1160,19 +1314,41 @@ impl ColorSpec {
|
||||
use wincolor::Intense;
|
||||
|
||||
let intense = if self.intense { Intense::Yes } else { Intense::No };
|
||||
if let Some(color) = self.fg_color.as_ref().map(|c| c.to_windows()) {
|
||||
try!(console.fg(intense, color));
|
||||
|
||||
let fg_color = self.fg_color.as_ref().and_then(|c| c.to_windows());
|
||||
if let Some(color) = fg_color {
|
||||
console.fg(intense, color)?;
|
||||
}
|
||||
if let Some(color) = self.bg_color.as_ref().map(|c| c.to_windows()) {
|
||||
try!(console.bg(intense, color));
|
||||
|
||||
let bg_color = self.bg_color.as_ref().and_then(|c| c.to_windows());
|
||||
if let Some(color) = bg_color {
|
||||
console.bg(intense, color)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// The set of available English colors for the terminal foreground/background.
|
||||
/// The set of available colors for the terminal foreground/background.
|
||||
///
|
||||
/// Note that this set may expand over time.
|
||||
/// The `Ansi256` and `Rgb` colors will only output the correct codes when
|
||||
/// paired with the `Ansi` `WriteColor` implementation.
|
||||
///
|
||||
/// The `Ansi256` and `Rgb` color types are not supported when writing colors
|
||||
/// on Windows using the console. If they are used on Windows, then they are
|
||||
/// silently ignored and no colors will be emitted.
|
||||
///
|
||||
/// This set may expand over time.
|
||||
///
|
||||
/// This type has a `FromStr` impl that can parse colors from their human
|
||||
/// readable form. The format is as follows:
|
||||
///
|
||||
/// 1. Any of the explicitly listed colors in English. They are matched
|
||||
/// case insensitively.
|
||||
/// 2. A single 8-bit integer, in either decimal or hexadecimal format.
|
||||
/// 3. A triple of 8-bit integers separated by a comma, where each integer is
|
||||
/// in decimal or hexadecimal format.
|
||||
///
|
||||
/// Hexadecimal numbers are written with a `0x` prefix.
|
||||
#[allow(missing_docs)]
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub enum Color {
|
||||
@@ -1184,46 +1360,145 @@ pub enum Color {
|
||||
Magenta,
|
||||
Yellow,
|
||||
White,
|
||||
Ansi256(u8),
|
||||
Rgb(u8, u8, u8),
|
||||
#[doc(hidden)]
|
||||
__Nonexhaustive,
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
impl Color {
|
||||
/// Translate this color to a wincolor::Color.
|
||||
fn to_windows(&self) -> wincolor::Color {
|
||||
#[cfg(windows)]
|
||||
fn to_windows(&self) -> Option<wincolor::Color> {
|
||||
match *self {
|
||||
Color::Black => wincolor::Color::Black,
|
||||
Color::Blue => wincolor::Color::Blue,
|
||||
Color::Green => wincolor::Color::Green,
|
||||
Color::Red => wincolor::Color::Red,
|
||||
Color::Cyan => wincolor::Color::Cyan,
|
||||
Color::Magenta => wincolor::Color::Magenta,
|
||||
Color::Yellow => wincolor::Color::Yellow,
|
||||
Color::White => wincolor::Color::White,
|
||||
Color::Black => Some(wincolor::Color::Black),
|
||||
Color::Blue => Some(wincolor::Color::Blue),
|
||||
Color::Green => Some(wincolor::Color::Green),
|
||||
Color::Red => Some(wincolor::Color::Red),
|
||||
Color::Cyan => Some(wincolor::Color::Cyan),
|
||||
Color::Magenta => Some(wincolor::Color::Magenta),
|
||||
Color::Yellow => Some(wincolor::Color::Yellow),
|
||||
Color::White => Some(wincolor::Color::White),
|
||||
Color::Ansi256(_) => None,
|
||||
Color::Rgb(_, _, _) => None,
|
||||
Color::__Nonexhaustive => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Parses a numeric color string, either ANSI or RGB.
|
||||
fn from_str_numeric(s: &str) -> Result<Color, ParseColorError> {
|
||||
// The "ansi256" format is a single number (decimal or hex)
|
||||
// corresponding to one of 256 colors.
|
||||
//
|
||||
// The "rgb" format is a triple of numbers (decimal or hex) delimited
|
||||
// by a comma corresponding to one of 256^3 colors.
|
||||
|
||||
fn parse_number(s: &str) -> Option<u8> {
|
||||
use std::u8;
|
||||
|
||||
if s.starts_with("0x") {
|
||||
u8::from_str_radix(&s[2..], 16).ok()
|
||||
} else {
|
||||
u8::from_str_radix(s, 10).ok()
|
||||
}
|
||||
}
|
||||
|
||||
let codes: Vec<&str> = s.split(',').collect();
|
||||
if codes.len() == 1 {
|
||||
if let Some(n) = parse_number(&codes[0]) {
|
||||
Ok(Color::Ansi256(n))
|
||||
} else {
|
||||
if s.chars().all(|c| c.is_digit(16)) {
|
||||
Err(ParseColorError {
|
||||
kind: ParseColorErrorKind::InvalidAnsi256,
|
||||
given: s.to_string(),
|
||||
})
|
||||
} else {
|
||||
Err(ParseColorError {
|
||||
kind: ParseColorErrorKind::InvalidName,
|
||||
given: s.to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
} else if codes.len() == 3 {
|
||||
let mut v = vec![];
|
||||
for code in codes {
|
||||
let n = parse_number(code).ok_or_else(|| {
|
||||
ParseColorError {
|
||||
kind: ParseColorErrorKind::InvalidRgb,
|
||||
given: s.to_string(),
|
||||
}
|
||||
})?;
|
||||
v.push(n);
|
||||
}
|
||||
Ok(Color::Rgb(v[0], v[1], v[2]))
|
||||
} else {
|
||||
Err(if s.contains(",") {
|
||||
ParseColorError {
|
||||
kind: ParseColorErrorKind::InvalidRgb,
|
||||
given: s.to_string(),
|
||||
}
|
||||
} else {
|
||||
ParseColorError {
|
||||
kind: ParseColorErrorKind::InvalidName,
|
||||
given: s.to_string(),
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An error from parsing an invalid color name.
|
||||
/// An error from parsing an invalid color specification.
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct ParseColorError(String);
|
||||
pub struct ParseColorError {
|
||||
kind: ParseColorErrorKind,
|
||||
given: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
enum ParseColorErrorKind {
|
||||
InvalidName,
|
||||
InvalidAnsi256,
|
||||
InvalidRgb,
|
||||
}
|
||||
|
||||
impl ParseColorError {
|
||||
/// Return the string that couldn't be parsed as a valid color.
|
||||
pub fn invalid(&self) -> &str { &self.0 }
|
||||
pub fn invalid(&self) -> &str { &self.given }
|
||||
}
|
||||
|
||||
impl error::Error for ParseColorError {
|
||||
fn description(&self) -> &str { "unrecognized color name" }
|
||||
fn description(&self) -> &str {
|
||||
use self::ParseColorErrorKind::*;
|
||||
match self.kind {
|
||||
InvalidName => "unrecognized color name",
|
||||
InvalidAnsi256 => "invalid ansi256 color number",
|
||||
InvalidRgb => "invalid RGB color triple",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ParseColorError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "Unrecognized color name '{}'. Choose from: \
|
||||
black, blue, green, red, cyan, magenta, yellow, white.",
|
||||
self.0)
|
||||
use self::ParseColorErrorKind::*;
|
||||
match self.kind {
|
||||
InvalidName => {
|
||||
write!(f, "unrecognized color name '{}'. Choose from: \
|
||||
black, blue, green, red, cyan, magenta, yellow, \
|
||||
white",
|
||||
self.given)
|
||||
}
|
||||
InvalidAnsi256 => {
|
||||
write!(f, "unrecognized ansi256 color number, \
|
||||
should be '[0-255]' (or a hex number), but is '{}'",
|
||||
self.given)
|
||||
}
|
||||
InvalidRgb => {
|
||||
write!(f, "unrecognized RGB color triple, \
|
||||
should be '[0-255],[0-255],[0-255]' (or a hex \
|
||||
triple), but is '{}'", self.given)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1240,7 +1515,7 @@ impl FromStr for Color {
|
||||
"magenta" => Ok(Color::Magenta),
|
||||
"yellow" => Ok(Color::Yellow),
|
||||
"white" => Ok(Color::White),
|
||||
_ => Err(ParseColorError(s.to_string())),
|
||||
_ => Color::from_str_numeric(s),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1253,7 +1528,9 @@ struct LossyStandardStream<W> {
|
||||
|
||||
impl<W: io::Write> LossyStandardStream<W> {
|
||||
#[cfg(not(windows))]
|
||||
fn new(wtr: W) -> LossyStandardStream<W> { LossyStandardStream { wtr: wtr } }
|
||||
fn new(wtr: W) -> LossyStandardStream<W> {
|
||||
LossyStandardStream { wtr: wtr }
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
fn new(wtr: W) -> LossyStandardStream<W> {
|
||||
@@ -1314,9 +1591,107 @@ fn write_lossy_utf8<W: io::Write>(mut w: W, buf: &[u8]) -> io::Result<usize> {
|
||||
match ::std::str::from_utf8(buf) {
|
||||
Ok(s) => w.write(s.as_bytes()),
|
||||
Err(ref e) if e.valid_up_to() == 0 => {
|
||||
try!(w.write(b"\xEF\xBF\xBD"));
|
||||
w.write(b"\xEF\xBF\xBD")?;
|
||||
Ok(1)
|
||||
}
|
||||
Err(e) => w.write(&buf[..e.valid_up_to()]),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{
|
||||
Ansi, Color, ParseColorError, ParseColorErrorKind, StandardStream,
|
||||
};
|
||||
|
||||
fn assert_is_send<T: Send>() {}
|
||||
|
||||
#[test]
|
||||
fn standard_stream_is_send() {
|
||||
assert_is_send::<StandardStream>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple_parse_ok() {
|
||||
let color = "green".parse::<Color>();
|
||||
assert_eq!(color, Ok(Color::Green));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_256_parse_ok() {
|
||||
let color = "7".parse::<Color>();
|
||||
assert_eq!(color, Ok(Color::Ansi256(7)));
|
||||
|
||||
let color = "32".parse::<Color>();
|
||||
assert_eq!(color, Ok(Color::Ansi256(32)));
|
||||
|
||||
let color = "0xFF".parse::<Color>();
|
||||
assert_eq!(color, Ok(Color::Ansi256(0xFF)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_256_parse_err_out_of_range() {
|
||||
let color = "256".parse::<Color>();
|
||||
assert_eq!(color, Err(ParseColorError {
|
||||
kind: ParseColorErrorKind::InvalidAnsi256,
|
||||
given: "256".to_string(),
|
||||
}));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rgb_parse_ok() {
|
||||
let color = "0,0,0".parse::<Color>();
|
||||
assert_eq!(color, Ok(Color::Rgb(0, 0, 0)));
|
||||
|
||||
let color = "0,128,255".parse::<Color>();
|
||||
assert_eq!(color, Ok(Color::Rgb(0, 128, 255)));
|
||||
|
||||
let color = "0x0,0x0,0x0".parse::<Color>();
|
||||
assert_eq!(color, Ok(Color::Rgb(0, 0, 0)));
|
||||
|
||||
let color = "0x33,0x66,0xFF".parse::<Color>();
|
||||
assert_eq!(color, Ok(Color::Rgb(0x33, 0x66, 0xFF)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rgb_parse_err_out_of_range() {
|
||||
let color = "0,0,256".parse::<Color>();
|
||||
assert_eq!(color, Err(ParseColorError {
|
||||
kind: ParseColorErrorKind::InvalidRgb,
|
||||
given: "0,0,256".to_string(),
|
||||
}));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rgb_parse_err_bad_format() {
|
||||
let color = "0,0".parse::<Color>();
|
||||
assert_eq!(color, Err(ParseColorError {
|
||||
kind: ParseColorErrorKind::InvalidRgb,
|
||||
given: "0,0".to_string(),
|
||||
}));
|
||||
|
||||
let color = "not_a_color".parse::<Color>();
|
||||
assert_eq!(color, Err(ParseColorError {
|
||||
kind: ParseColorErrorKind::InvalidName,
|
||||
given: "not_a_color".to_string(),
|
||||
}));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_var_ansi_write_rgb() {
|
||||
let mut buf = Ansi::new(vec![]);
|
||||
let _ = buf.write_color(true, &Color::Rgb(254, 253, 255), false);
|
||||
assert_eq!(buf.0, b"\x1B[38;2;254;253;255m");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_var_ansi_write_256() {
|
||||
let mut buf = Ansi::new(vec![]);
|
||||
let _ = buf.write_color(false, &Color::Ansi256(7), false);
|
||||
assert_eq!(buf.0, b"\x1B[48;5;7m");
|
||||
|
||||
let mut buf = Ansi::new(vec![]);
|
||||
let _ = buf.write_color(false, &Color::Ansi256(208), false);
|
||||
assert_eq!(buf.0, b"\x1B[48;5;208m");
|
||||
}
|
||||
}
|
||||
|
||||
BIN
tests/data/sherlock.bz2
Normal file
BIN
tests/data/sherlock.bz2
Normal file
Binary file not shown.
BIN
tests/data/sherlock.gz
Normal file
BIN
tests/data/sherlock.gz
Normal file
Binary file not shown.
BIN
tests/data/sherlock.lzma
Normal file
BIN
tests/data/sherlock.lzma
Normal file
Binary file not shown.
BIN
tests/data/sherlock.xz
Normal file
BIN
tests/data/sherlock.xz
Normal file
Binary file not shown.
663
tests/tests.rs
663
tests/tests.rs
@@ -62,7 +62,7 @@ fn paths(unix: &[&str]) -> Vec<String> {
|
||||
|
||||
fn paths_from_stdout(stdout: String) -> Vec<String> {
|
||||
let mut paths: Vec<_> = stdout.lines().map(|s| {
|
||||
s.split(":").next().unwrap().to_string()
|
||||
s.split(':').next().unwrap().to_string()
|
||||
}).collect();
|
||||
paths.sort();
|
||||
paths
|
||||
@@ -75,6 +75,10 @@ fn sort_lines(lines: &str) -> String {
|
||||
format!("{}\n", lines.join("\n"))
|
||||
}
|
||||
|
||||
fn cmd_exists(name: &str) -> bool {
|
||||
Command::new(name).arg("--help").output().is_ok()
|
||||
}
|
||||
|
||||
sherlock!(single_file, |wd: WorkDir, mut cmd| {
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
@@ -103,6 +107,22 @@ sherlock!(line_numbers, |wd: WorkDir, mut cmd: Command| {
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
sherlock!(line_number_width, |wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("-n");
|
||||
cmd.arg("--line-number-width").arg("2");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = " 1:For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
3:be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
sherlock!(line_number_width_padding_character_error, |wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("-n");
|
||||
cmd.arg("--line-number-width").arg("02");
|
||||
wd.assert_non_empty_stderr(&mut cmd);
|
||||
});
|
||||
|
||||
sherlock!(columns, |wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("--column");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
@@ -125,7 +145,7 @@ sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
|
||||
sherlock!(with_heading, |wd: WorkDir, mut cmd: Command| {
|
||||
// This forces the issue since --with-filename is disabled by default
|
||||
// when searching one fil.e
|
||||
// when searching one file.
|
||||
cmd.arg("--with-filename").arg("--heading");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
@@ -209,6 +229,16 @@ For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
sherlock!(line, "Watson|and exhibited clearly, with a label attached.",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("-x");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
and exhibited clearly, with a label attached.
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
sherlock!(literal, "()", "file", |wd: WorkDir, mut cmd: Command| {
|
||||
wd.create("file", "blib\n()\nblab\n");
|
||||
cmd.arg("-F");
|
||||
@@ -256,6 +286,20 @@ but Watson, Doctor has to have it taken out for him and dusted,
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
sherlock!(replace_with_only_matching, "of (\\w+)",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("-o").arg("-r").arg("$1");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
this
|
||||
detective
|
||||
luck
|
||||
straw
|
||||
cigar
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
sherlock!(file_types, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
wd.create("file.py", "Sherlock");
|
||||
wd.create("file.rs", "Sherlock");
|
||||
@@ -336,6 +380,31 @@ sherlock!(glob_negate, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
assert_eq!(lines, "file.py:Sherlock\n");
|
||||
});
|
||||
|
||||
sherlock!(iglob, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
wd.create("file.HTML", "Sherlock");
|
||||
cmd.arg("--iglob").arg("*.html");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
assert_eq!(lines, "file.HTML:Sherlock\n");
|
||||
});
|
||||
|
||||
sherlock!(csglob, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
wd.create("file1.HTML", "Sherlock");
|
||||
wd.create("file2.html", "Sherlock");
|
||||
cmd.arg("--glob").arg("*.html");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
assert_eq!(lines, "file2.html:Sherlock\n");
|
||||
});
|
||||
|
||||
sherlock!(byte_offset_only_matching, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("-b").arg("-o");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
sherlock:56:Sherlock
|
||||
sherlock:177:Sherlock
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
sherlock!(count, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("--count");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
@@ -343,6 +412,27 @@ sherlock!(count, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
sherlock!(count_matches, "the", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("--count-matches");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "sherlock:4\n";
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
sherlock!(count_matches_inverted, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("--count-matches").arg("--invert-match");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "sherlock:4\n";
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
sherlock!(count_matches_via_only, "the", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("--count").arg("--only-matching");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "sherlock:4\n";
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
sherlock!(files_with_matches, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("--files-with-matches");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
@@ -455,7 +545,6 @@ sherlock!(max_filesize_parse_no_suffix, "Sherlock", ".",
|
||||
let expected = "\
|
||||
foo
|
||||
";
|
||||
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
@@ -470,7 +559,6 @@ sherlock!(max_filesize_parse_k_suffix, "Sherlock", ".",
|
||||
let expected = "\
|
||||
foo
|
||||
";
|
||||
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
@@ -485,10 +573,19 @@ sherlock!(max_filesize_parse_m_suffix, "Sherlock", ".",
|
||||
let expected = "\
|
||||
foo
|
||||
";
|
||||
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
sherlock!(max_filesize_suffix_overflow, "Sherlock", ".",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
wd.remove("sherlock");
|
||||
wd.create_size("foo", 1000000);
|
||||
|
||||
// 2^35 * 2^30 would otherwise overflow
|
||||
cmd.arg("--max-filesize").arg("34359738368G").arg("--files");
|
||||
wd.assert_err(&mut cmd);
|
||||
});
|
||||
|
||||
sherlock!(ignore_hidden, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
wd.remove("sherlock");
|
||||
wd.create(".sherlock", hay::SHERLOCK);
|
||||
@@ -734,11 +831,7 @@ clean!(regression_25, "test", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/30
|
||||
clean!(regression_30, "test", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
if cfg!(windows) {
|
||||
wd.create(".gitignore", "vendor/**\n!vendor\\manifest");
|
||||
} else {
|
||||
wd.create(".gitignore", "vendor/**\n!vendor/manifest");
|
||||
}
|
||||
wd.create_dir("vendor");
|
||||
wd.create("vendor/manifest", "test");
|
||||
|
||||
@@ -982,12 +1075,15 @@ fn regression_210() {
|
||||
let badutf8 = OsStr::from_bytes(&b"foo\xffbar"[..]);
|
||||
|
||||
let wd = WorkDir::new("regression_210");
|
||||
// APFS does not support creating files with invalid UTF-8 bytes.
|
||||
// https://github.com/BurntSushi/ripgrep/issues/559
|
||||
if wd.try_create(badutf8, "test").is_ok() {
|
||||
let mut cmd = wd.command();
|
||||
wd.create(badutf8, "test");
|
||||
cmd.arg("-H").arg("test").arg(badutf8);
|
||||
|
||||
let out = wd.output(&mut cmd);
|
||||
assert_eq!(out.stdout, b"foo\xffbar:test\n".to_vec());
|
||||
}
|
||||
}
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/228
|
||||
@@ -1057,6 +1153,126 @@ clean!(regression_405, "test", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
assert_eq!(lines, format!("{}:test\n", path("bar/foo/file2.txt")));
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/428
|
||||
#[cfg(not(windows))]
|
||||
clean!(regression_428_color_context_path, "foo", ".",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
wd.create("sherlock", "foo\nbar");
|
||||
cmd.arg("-A1").arg("-H").arg("--no-heading").arg("-N")
|
||||
.arg("--colors=match:none").arg("--color=always");
|
||||
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = format!(
|
||||
"{colored_path}:foo\n{colored_path}-bar\n",
|
||||
colored_path=format!(
|
||||
"\x1b\x5b\x30\x6d\x1b\x5b\x33\x35\x6d{path}\x1b\x5b\x30\x6d",
|
||||
path=path("sherlock")));
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/428
|
||||
clean!(regression_428_unrecognized_style, "Sherlok", ".",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("--colors=match:style:");
|
||||
wd.assert_err(&mut cmd);
|
||||
|
||||
let output = cmd.output().unwrap();
|
||||
let err = String::from_utf8_lossy(&output.stderr);
|
||||
let expected = "\
|
||||
Unrecognized style attribute ''. Choose from: nobold, bold, nointense, intense, \
|
||||
nounderline, underline.
|
||||
";
|
||||
assert_eq!(err, expected);
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/493
|
||||
clean!(regression_493, " 're ", "input.txt", |wd: WorkDir, mut cmd: Command| {
|
||||
wd.create("input.txt", "peshwaship 're seminomata");
|
||||
cmd.arg("-o").arg("-w");
|
||||
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
assert_eq!(lines, " 're \n");
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/553
|
||||
sherlock!(regression_553_switch, "sherlock", ".",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("-i");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
|
||||
// This repeats the `-i` flag.
|
||||
cmd.arg("-i");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
sherlock!(regression_553_flag, "world|attached",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("-C").arg("1");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
Holmeses, success in the province of detective work must always
|
||||
--
|
||||
but Doctor Watson has to have it taken out for him and dusted,
|
||||
and exhibited clearly, with a label attached.
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
|
||||
cmd.arg("-C").arg("0");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
and exhibited clearly, with a label attached.
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/599
|
||||
clean!(regression_599, "^$", "input.txt", |wd: WorkDir, mut cmd: Command| {
|
||||
wd.create("input.txt", "\n\ntest\n");
|
||||
cmd.args(&[
|
||||
"--color", "ansi",
|
||||
"--colors", "path:none",
|
||||
"--colors", "line:none",
|
||||
"--colors", "match:fg:red",
|
||||
"--colors", "match:style:nobold",
|
||||
"--line-number",
|
||||
]);
|
||||
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
// Technically, the expected output should only be two lines, but:
|
||||
// https://github.com/BurntSushi/ripgrep/issues/441
|
||||
let expected = "\
|
||||
[0m1[0m:[0m[31m[0m
|
||||
[0m2[0m:[0m[31m[0m
|
||||
[0m4[0m:
|
||||
";
|
||||
assert_eq!(expected, lines);
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/807
|
||||
clean!(regression_807, "test", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
wd.create(".gitignore", ".a/b");
|
||||
wd.create_dir(".a/b");
|
||||
wd.create_dir(".a/c");
|
||||
wd.create(".a/b/file", "test");
|
||||
wd.create(".a/c/file", "test");
|
||||
|
||||
cmd.arg("--hidden");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
assert_eq!(lines, format!("{}:test\n", path(".a/c/file")));
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/1
|
||||
clean!(feature_1_sjis, "Шерлок Холмс", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
let sherlock =
|
||||
@@ -1103,6 +1319,21 @@ clean!(feature_1_eucjp, "Шерлок Холмс", ".",
|
||||
assert_eq!(lines, "foo:Шерлок Холмс\n");
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/1
|
||||
sherlock!(feature_1_unknown_encoding, "Sherlock", ".",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("-Efoobar");
|
||||
wd.assert_non_empty_stderr(&mut cmd);
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/1
|
||||
// Specific: https://github.com/BurntSushi/ripgrep/pull/398/files#r111109265
|
||||
sherlock!(feature_1_replacement_encoding, "Sherlock", ".",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("-Ecsiso2022kr");
|
||||
wd.assert_non_empty_stderr(&mut cmd);
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/7
|
||||
sherlock!(feature_7, "-fpat", "sherlock", |wd: WorkDir, mut cmd: Command| {
|
||||
wd.create("pat", "Sherlock\nHolmes");
|
||||
@@ -1139,6 +1370,32 @@ be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/34
|
||||
sherlock!(feature_34_only_matching, "Sherlock", ".",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("--only-matching");
|
||||
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
sherlock:Sherlock
|
||||
sherlock:Sherlock
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/34
|
||||
sherlock!(feature_34_only_matching_line_column, "Sherlock", ".",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("--only-matching").arg("--column").arg("--line-number");
|
||||
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
sherlock:1:57:Sherlock
|
||||
sherlock:3:49:Sherlock
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/45
|
||||
sherlock!(feature_45_relative_cwd, "test", ".",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
@@ -1390,6 +1647,259 @@ clean!(feature_275_pathsep, "test", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
assert_eq!(lines, "fooZbar:test\n");
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/362
|
||||
sherlock!(feature_362_dfa_size_limit, r"For\s",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
// This should fall back to the nfa engine but should still produce the
|
||||
// expected result.
|
||||
cmd.arg("--dfa-size-limit").arg("10");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
sherlock!(feature_362_exceeds_regex_size_limit, r"[0-9]\w+",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("--regex-size-limit").arg("10K");
|
||||
wd.assert_err(&mut cmd);
|
||||
});
|
||||
|
||||
#[cfg(target_pointer_width = "32")]
|
||||
sherlock!(feature_362_u64_to_narrow_usize_suffix_overflow, "Sherlock", ".",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
wd.remove("sherlock");
|
||||
wd.create_size("foo", 1000000);
|
||||
|
||||
// 2^35 * 2^20 is ok for u64, but not for usize
|
||||
cmd.arg("--dfa-size-limit").arg("34359738368M").arg("--files");
|
||||
wd.assert_err(&mut cmd);
|
||||
});
|
||||
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/419
|
||||
sherlock!(feature_419_zero_as_shortcut_for_null, "Sherlock", ".",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("-0").arg("--count");
|
||||
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
assert_eq!(lines, "sherlock\x002\n");
|
||||
});
|
||||
|
||||
#[test]
|
||||
fn compressed_gzip() {
|
||||
if !cmd_exists("gzip") {
|
||||
return;
|
||||
}
|
||||
let gzip_file = include_bytes!("./data/sherlock.gz");
|
||||
|
||||
let wd = WorkDir::new("feature_search_compressed");
|
||||
wd.create_bytes("sherlock.gz", gzip_file);
|
||||
|
||||
let mut cmd = wd.command();
|
||||
cmd.arg("-z").arg("Sherlock").arg("sherlock.gz");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn compressed_bzip2() {
|
||||
if !cmd_exists("bzip2") {
|
||||
return;
|
||||
}
|
||||
let bzip2_file = include_bytes!("./data/sherlock.bz2");
|
||||
|
||||
let wd = WorkDir::new("feature_search_compressed");
|
||||
wd.create_bytes("sherlock.bz2", bzip2_file);
|
||||
|
||||
let mut cmd = wd.command();
|
||||
cmd.arg("-z").arg("Sherlock").arg("sherlock.bz2");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn compressed_xz() {
|
||||
if !cmd_exists("xz") {
|
||||
return;
|
||||
}
|
||||
let xz_file = include_bytes!("./data/sherlock.xz");
|
||||
|
||||
let wd = WorkDir::new("feature_search_compressed");
|
||||
wd.create_bytes("sherlock.xz", xz_file);
|
||||
|
||||
let mut cmd = wd.command();
|
||||
cmd.arg("-z").arg("Sherlock").arg("sherlock.xz");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn compressed_lzma() {
|
||||
if !cmd_exists("xz") {
|
||||
return;
|
||||
}
|
||||
let lzma_file = include_bytes!("./data/sherlock.lzma");
|
||||
|
||||
let wd = WorkDir::new("feature_search_compressed");
|
||||
wd.create_bytes("sherlock.lzma", lzma_file);
|
||||
|
||||
let mut cmd = wd.command();
|
||||
cmd.arg("-z").arg("Sherlock").arg("sherlock.lzma");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn compressed_failing_gzip() {
|
||||
if !cmd_exists("gzip") {
|
||||
return;
|
||||
}
|
||||
let wd = WorkDir::new("feature_search_compressed");
|
||||
wd.create("sherlock.gz", hay::SHERLOCK);
|
||||
|
||||
let mut cmd = wd.command();
|
||||
cmd.arg("-z").arg("Sherlock").arg("sherlock.gz");
|
||||
|
||||
wd.assert_non_empty_stderr(&mut cmd);
|
||||
|
||||
let output = cmd.output().unwrap();
|
||||
let err = String::from_utf8_lossy(&output.stderr);
|
||||
assert_eq!(err.contains("not in gzip format"), true);
|
||||
}
|
||||
|
||||
sherlock!(feature_196_persistent_config, "sherlock",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
// Make sure we get no matches by default.
|
||||
wd.assert_err(&mut cmd);
|
||||
|
||||
// Now add our config file, and make sure it impacts ripgrep.
|
||||
wd.create(".ripgreprc", "--ignore-case");
|
||||
cmd.env("RIPGREP_CONFIG_PATH", ".ripgreprc");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
sherlock!(feature_411_single_threaded_search_stats,
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("--stats");
|
||||
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
assert_eq!(lines.contains("2 matched lines"), true);
|
||||
assert_eq!(lines.contains("1 files contained matches"), true);
|
||||
assert_eq!(lines.contains("1 files searched"), true);
|
||||
assert_eq!(lines.contains("seconds"), true);
|
||||
});
|
||||
|
||||
#[test]
|
||||
fn feature_411_parallel_search_stats() {
|
||||
let wd = WorkDir::new("feature_411");
|
||||
wd.create("sherlock_1", hay::SHERLOCK);
|
||||
wd.create("sherlock_2", hay::SHERLOCK);
|
||||
|
||||
let mut cmd = wd.command();
|
||||
cmd.arg("--stats");
|
||||
cmd.arg("Sherlock");
|
||||
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
assert_eq!(lines.contains("4 matched lines"), true);
|
||||
assert_eq!(lines.contains("2 files contained matches"), true);
|
||||
assert_eq!(lines.contains("2 files searched"), true);
|
||||
assert_eq!(lines.contains("seconds"), true);
|
||||
}
|
||||
|
||||
sherlock!(feature_411_ignore_stats_1, |wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("--files-with-matches");
|
||||
cmd.arg("--stats");
|
||||
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
assert_eq!(lines.contains("seconds"), false);
|
||||
});
|
||||
|
||||
sherlock!(feature_411_ignore_stats_2, |wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("--files-without-match");
|
||||
cmd.arg("--stats");
|
||||
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
assert_eq!(lines.contains("seconds"), false);
|
||||
});
|
||||
|
||||
#[test]
|
||||
fn feature_740_passthru() {
|
||||
let wd = WorkDir::new("feature_740");
|
||||
wd.create("file", "\nfoo\nbar\nfoobar\n\nbaz\n");
|
||||
wd.create("patterns", "foo\n\nbar\n");
|
||||
|
||||
// We can't assume that the way colour specs are translated to ANSI
|
||||
// sequences will remain stable, and --replace doesn't currently work with
|
||||
// pass-through, so for now we don't actually test the match sub-strings
|
||||
let common_args = &["-n", "--passthru"];
|
||||
let expected = "\
|
||||
1:
|
||||
2:foo
|
||||
3:bar
|
||||
4:foobar
|
||||
5:
|
||||
6:baz
|
||||
";
|
||||
|
||||
// With single pattern
|
||||
let mut cmd = wd.command();
|
||||
cmd.args(common_args).arg("foo").arg("file");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
assert_eq!(lines, expected);
|
||||
|
||||
// With multiple -e patterns
|
||||
let mut cmd = wd.command();
|
||||
cmd.args(common_args)
|
||||
.arg("-e").arg("foo").arg("-e").arg("bar").arg("file");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
assert_eq!(lines, expected);
|
||||
|
||||
// With multiple -f patterns
|
||||
let mut cmd = wd.command();
|
||||
cmd.args(common_args).arg("-f").arg("patterns").arg("file");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
assert_eq!(lines, expected);
|
||||
|
||||
// -c should override
|
||||
let mut cmd = wd.command();
|
||||
cmd.args(common_args).arg("-c").arg("foo").arg("file");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
assert_eq!(lines, "2\n");
|
||||
|
||||
// -o should conflict
|
||||
let mut cmd = wd.command();
|
||||
cmd.args(common_args).arg("-o").arg("foo").arg("file");
|
||||
wd.assert_err(&mut cmd);
|
||||
|
||||
// -r should conflict
|
||||
let mut cmd = wd.command();
|
||||
cmd.args(common_args).arg("-r").arg("$0").arg("foo").arg("file");
|
||||
wd.assert_err(&mut cmd);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn binary_nosearch() {
|
||||
let wd = WorkDir::new("binary_nosearch");
|
||||
@@ -1492,6 +2002,139 @@ fn regression_391() {
|
||||
assert_eq!(lines, "bar.py\n");
|
||||
}
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/451
|
||||
#[test]
|
||||
fn regression_451_only_matching_as_in_issue() {
|
||||
let wd = WorkDir::new("regression_451_only_matching");
|
||||
let path = "digits.txt";
|
||||
wd.create(path, "1 2 3\n");
|
||||
|
||||
let mut cmd = wd.command();
|
||||
cmd.arg("[0-9]+").arg(path).arg("--only-matching");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
|
||||
let expected = "\
|
||||
1
|
||||
2
|
||||
3
|
||||
";
|
||||
|
||||
assert_eq!(lines, expected);
|
||||
}
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/451
|
||||
#[test]
|
||||
fn regression_451_only_matching() {
|
||||
let wd = WorkDir::new("regression_451_only_matching");
|
||||
let path = "digits.txt";
|
||||
wd.create(path, "1 2 3\n123\n");
|
||||
|
||||
let mut cmd = wd.command();
|
||||
cmd.arg("[0-9]").arg(path)
|
||||
.arg("--only-matching")
|
||||
.arg("--column");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
|
||||
let expected = "\
|
||||
1:1:1
|
||||
1:3:2
|
||||
1:5:3
|
||||
2:1:1
|
||||
2:2:2
|
||||
2:3:3
|
||||
";
|
||||
|
||||
assert_eq!(lines, expected);
|
||||
}
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/483
|
||||
#[test]
|
||||
fn regression_483_matching_no_stdout() {
|
||||
let wd = WorkDir::new("regression_483_matching_no_stdout");
|
||||
wd.create("file.py", "");
|
||||
|
||||
let mut cmd = wd.command();
|
||||
cmd.arg("--quiet")
|
||||
.arg("--files")
|
||||
.arg("--glob").arg("*.py");
|
||||
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
assert!(lines.is_empty());
|
||||
}
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/483
|
||||
#[test]
|
||||
fn regression_483_non_matching_exit_code() {
|
||||
let wd = WorkDir::new("regression_483_non_matching_exit_code");
|
||||
wd.create("file.rs", "");
|
||||
|
||||
let mut cmd = wd.command();
|
||||
cmd.arg("--quiet")
|
||||
.arg("--files")
|
||||
.arg("--glob").arg("*.py");
|
||||
|
||||
wd.assert_err(&mut cmd);
|
||||
}
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/506
|
||||
#[test]
|
||||
fn regression_506_word_boundaries_not_parenthesized() {
|
||||
let wd = WorkDir::new("regression_506_word_boundaries_not_parenthesized");
|
||||
let path = "wb.txt";
|
||||
wd.create(path, "min minimum amin\n\
|
||||
max maximum amax");
|
||||
|
||||
let mut cmd = wd.command();
|
||||
cmd.arg("-w").arg("min|max").arg(path).arg("--only-matching");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
|
||||
let expected = "min\nmax\n";
|
||||
|
||||
assert_eq!(lines, expected);
|
||||
}
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/568
|
||||
#[test]
|
||||
fn regression_568_leading_hyphen_option_arguments() {
|
||||
let wd = WorkDir::new("regression_568_leading_hyphen_option_arguments");
|
||||
let path = "file";
|
||||
wd.create(path, "foo bar -baz\n");
|
||||
|
||||
let mut cmd = wd.command();
|
||||
cmd.arg("-e-baz").arg("-e").arg("-baz").arg(path);
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
assert_eq!(lines, "foo bar -baz\n");
|
||||
|
||||
let mut cmd = wd.command();
|
||||
cmd.arg("-rni").arg("bar").arg(path);
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
assert_eq!(lines, "foo ni -baz\n");
|
||||
|
||||
let mut cmd = wd.command();
|
||||
cmd.arg("-r").arg("-n").arg("-i").arg("bar").arg(path);
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
assert_eq!(lines, "foo -n -baz\n");
|
||||
}
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/693
|
||||
#[test]
|
||||
fn regression_693_context_option_in_contextless_mode() {
|
||||
let wd = WorkDir::new("regression_693_context_option_in_contextless_mode");
|
||||
|
||||
wd.create("foo", "xyz\n");
|
||||
wd.create("bar", "xyz\n");
|
||||
|
||||
let mut cmd = wd.command();
|
||||
cmd.arg("-C1").arg("-c").arg("--sort-files").arg("xyz");
|
||||
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
bar:1
|
||||
foo:1
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn type_list() {
|
||||
let wd = WorkDir::new("type_list");
|
||||
|
||||
@@ -13,7 +13,7 @@ use std::time::Duration;
|
||||
static TEST_DIR: &'static str = "ripgrep-tests";
|
||||
static NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||
|
||||
/// WorkDir represents a directory in which tests are run.
|
||||
/// `WorkDir` represents a directory in which tests are run.
|
||||
///
|
||||
/// Directories are created from a global atomic counter to avoid duplicates.
|
||||
#[derive(Debug)]
|
||||
@@ -41,11 +41,19 @@ impl WorkDir {
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new file with the given name and contents in this directory.
|
||||
/// Create a new file with the given name and contents in this directory,
|
||||
/// or panic on error.
|
||||
pub fn create<P: AsRef<Path>>(&self, name: P, contents: &str) {
|
||||
self.create_bytes(name, contents.as_bytes());
|
||||
}
|
||||
|
||||
/// Try to create a new file with the given name and contents in this
|
||||
/// directory.
|
||||
pub fn try_create<P: AsRef<Path>>(&self, name: P, contents: &str) -> io::Result<()> {
|
||||
let path = self.dir.join(name);
|
||||
self.try_create_bytes(path, contents.as_bytes())
|
||||
}
|
||||
|
||||
/// Create a new file with the given name and size.
|
||||
pub fn create_size<P: AsRef<Path>>(&self, name: P, filesize: u64) {
|
||||
let path = self.dir.join(name);
|
||||
@@ -53,12 +61,19 @@ impl WorkDir {
|
||||
nice_err(&path, file.set_len(filesize));
|
||||
}
|
||||
|
||||
/// Create a new file with the given name and contents in this directory.
|
||||
/// Create a new file with the given name and contents in this directory,
|
||||
/// or panic on error.
|
||||
pub fn create_bytes<P: AsRef<Path>>(&self, name: P, contents: &[u8]) {
|
||||
let path = self.dir.join(name);
|
||||
let mut file = nice_err(&path, File::create(&path));
|
||||
nice_err(&path, file.write_all(contents));
|
||||
nice_err(&path, file.flush());
|
||||
nice_err(&path, self.try_create_bytes(&path, contents));
|
||||
}
|
||||
|
||||
/// Try to create a new file with the given name and contents in this
|
||||
/// directory.
|
||||
fn try_create_bytes<P: AsRef<Path>>(&self, path: P, contents: &[u8]) -> io::Result<()> {
|
||||
let mut file = File::create(&path)?;
|
||||
file.write_all(contents)?;
|
||||
file.flush()
|
||||
}
|
||||
|
||||
/// Remove a file with the given name from this directory.
|
||||
@@ -78,6 +93,7 @@ impl WorkDir {
|
||||
/// this working directory.
|
||||
pub fn command(&self) -> process::Command {
|
||||
let mut cmd = process::Command::new(&self.bin());
|
||||
cmd.env_remove("RIPGREP_CONFIG_PATH");
|
||||
cmd.current_dir(&self.dir);
|
||||
cmd
|
||||
}
|
||||
@@ -256,6 +272,23 @@ impl WorkDir {
|
||||
String::from_utf8_lossy(&o.stderr));
|
||||
}
|
||||
}
|
||||
|
||||
/// Runs the given command and asserts that something was printed to
|
||||
/// stderr.
|
||||
pub fn assert_non_empty_stderr(&self, cmd: &mut process::Command) {
|
||||
let o = cmd.output().unwrap();
|
||||
if o.status.success() || o.stderr.is_empty() {
|
||||
panic!("\n\n===== {:?} =====\n\
|
||||
command succeeded but expected failure!\
|
||||
\n\ncwd: {}\
|
||||
\n\nstatus: {}\
|
||||
\n\nstdout: {}\n\nstderr: {}\
|
||||
\n\n=====\n",
|
||||
cmd, self.dir.display(), o.status,
|
||||
String::from_utf8_lossy(&o.stdout),
|
||||
String::from_utf8_lossy(&o.stderr));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn nice_err<P: AsRef<Path>, T, E: error::Error>(
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "wincolor"
|
||||
version = "0.1.3" #:version
|
||||
version = "0.1.6" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
A simple Windows specific API for controlling text color in a Windows console.
|
||||
@@ -16,6 +16,6 @@ license = "Unlicense/MIT"
|
||||
name = "wincolor"
|
||||
bench = false
|
||||
|
||||
[dependencies]
|
||||
kernel32-sys = "0.2.2"
|
||||
winapi = "0.2.8"
|
||||
[dependencies.winapi]
|
||||
version = "0.3"
|
||||
features = ["consoleapi", "minwindef", "processenv", "winbase", "wincon"]
|
||||
|
||||
@@ -8,6 +8,8 @@ Note that on non-Windows platforms, this crate is empty but will compile.
|
||||
# Example
|
||||
|
||||
```no_run
|
||||
# #[cfg(windows)]
|
||||
# {
|
||||
use wincolor::{Console, Color, Intense};
|
||||
|
||||
let mut con = Console::stdout().unwrap();
|
||||
@@ -15,13 +17,12 @@ con.fg(Intense::Yes, Color::Cyan).unwrap();
|
||||
println!("This text will be intense cyan.");
|
||||
con.reset().unwrap();
|
||||
println!("This text will be normal.");
|
||||
# }
|
||||
```
|
||||
*/
|
||||
|
||||
#![deny(missing_docs)]
|
||||
|
||||
#[cfg(windows)]
|
||||
extern crate kernel32;
|
||||
#[cfg(windows)]
|
||||
extern crate winapi;
|
||||
|
||||
|
||||
@@ -1,20 +1,22 @@
|
||||
use std::io;
|
||||
use std::mem;
|
||||
|
||||
use kernel32;
|
||||
use winapi::{DWORD, HANDLE, WORD};
|
||||
use winapi::winbase::{STD_ERROR_HANDLE, STD_OUTPUT_HANDLE};
|
||||
use winapi::wincon::{
|
||||
use winapi::shared::minwindef::{DWORD, WORD};
|
||||
use winapi::um::consoleapi;
|
||||
use winapi::um::processenv;
|
||||
use winapi::um::winbase::{STD_ERROR_HANDLE, STD_OUTPUT_HANDLE};
|
||||
use winapi::um::wincon::{
|
||||
self,
|
||||
FOREGROUND_BLUE as FG_BLUE,
|
||||
FOREGROUND_GREEN as FG_GREEN,
|
||||
FOREGROUND_RED as FG_RED,
|
||||
FOREGROUND_INTENSITY as FG_INTENSITY,
|
||||
};
|
||||
|
||||
const FG_CYAN: DWORD = FG_BLUE | FG_GREEN;
|
||||
const FG_MAGENTA: DWORD = FG_BLUE | FG_RED;
|
||||
const FG_YELLOW: DWORD = FG_GREEN | FG_RED;
|
||||
const FG_WHITE: DWORD = FG_BLUE | FG_GREEN | FG_RED;
|
||||
const FG_CYAN: WORD = FG_BLUE | FG_GREEN;
|
||||
const FG_MAGENTA: WORD = FG_BLUE | FG_RED;
|
||||
const FG_YELLOW: WORD = FG_GREEN | FG_RED;
|
||||
const FG_WHITE: WORD = FG_BLUE | FG_GREEN | FG_RED;
|
||||
|
||||
/// A Windows console.
|
||||
///
|
||||
@@ -30,33 +32,25 @@ const FG_WHITE: DWORD = FG_BLUE | FG_GREEN | FG_RED;
|
||||
/// stdout before setting new text attributes.
|
||||
#[derive(Debug)]
|
||||
pub struct Console {
|
||||
handle: HANDLE,
|
||||
handle_id: DWORD,
|
||||
start_attr: TextAttributes,
|
||||
cur_attr: TextAttributes,
|
||||
}
|
||||
|
||||
unsafe impl Send for Console {}
|
||||
|
||||
impl Drop for Console {
|
||||
fn drop(&mut self) {
|
||||
unsafe { kernel32::CloseHandle(self.handle); }
|
||||
}
|
||||
}
|
||||
|
||||
impl Console {
|
||||
/// Get a console for a standard I/O stream.
|
||||
fn create_for_stream(handle_id: DWORD) -> io::Result<Console> {
|
||||
let mut info = unsafe { mem::zeroed() };
|
||||
let (handle, res) = unsafe {
|
||||
let handle = kernel32::GetStdHandle(handle_id);
|
||||
(handle, kernel32::GetConsoleScreenBufferInfo(handle, &mut info))
|
||||
let res = unsafe {
|
||||
let handle = processenv::GetStdHandle(handle_id);
|
||||
wincon::GetConsoleScreenBufferInfo(handle, &mut info)
|
||||
};
|
||||
if res == 0 {
|
||||
return Err(io::Error::last_os_error());
|
||||
}
|
||||
let attr = TextAttributes::from_word(info.wAttributes);
|
||||
Ok(Console {
|
||||
handle: handle,
|
||||
handle_id: handle_id,
|
||||
start_attr: attr,
|
||||
cur_attr: attr,
|
||||
})
|
||||
@@ -80,7 +74,8 @@ impl Console {
|
||||
fn set(&mut self) -> io::Result<()> {
|
||||
let attr = self.cur_attr.to_word();
|
||||
let res = unsafe {
|
||||
kernel32::SetConsoleTextAttribute(self.handle, attr)
|
||||
let handle = processenv::GetStdHandle(self.handle_id);
|
||||
wincon::SetConsoleTextAttribute(handle, attr)
|
||||
};
|
||||
if res == 0 {
|
||||
return Err(io::Error::last_os_error());
|
||||
@@ -121,6 +116,41 @@ impl Console {
|
||||
self.cur_attr = self.start_attr;
|
||||
self.set()
|
||||
}
|
||||
|
||||
/// Toggle virtual terminal processing.
|
||||
///
|
||||
/// This method attempts to toggle virtual terminal processing for this
|
||||
/// console. If there was a problem toggling it, then an error returned.
|
||||
/// On success, the caller may assume that toggling it was successful.
|
||||
///
|
||||
/// When virtual terminal processing is enabled, characters emitted to the
|
||||
/// console are parsed for VT100 and similar control character sequences
|
||||
/// that control color and other similar operations.
|
||||
pub fn set_virtual_terminal_processing(
|
||||
&mut self,
|
||||
yes: bool,
|
||||
) -> io::Result<()> {
|
||||
let vt = wincon::ENABLE_VIRTUAL_TERMINAL_PROCESSING;
|
||||
|
||||
let mut old_mode = 0;
|
||||
let handle = unsafe { processenv::GetStdHandle(self.handle_id) };
|
||||
if unsafe { consoleapi::GetConsoleMode(handle, &mut old_mode) } == 0 {
|
||||
return Err(io::Error::last_os_error());
|
||||
}
|
||||
let new_mode =
|
||||
if yes {
|
||||
old_mode | vt
|
||||
} else {
|
||||
old_mode & !vt
|
||||
};
|
||||
if old_mode == new_mode {
|
||||
return Ok(());
|
||||
}
|
||||
if unsafe { consoleapi::SetConsoleMode(handle, new_mode) } == 0 {
|
||||
return Err(io::Error::last_os_error());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// A representation of text attributes for the Windows console.
|
||||
@@ -139,16 +169,15 @@ impl TextAttributes {
|
||||
w |= self.fg_intense.to_fg();
|
||||
w |= self.bg_color.to_bg();
|
||||
w |= self.bg_intense.to_bg();
|
||||
w as WORD
|
||||
w
|
||||
}
|
||||
|
||||
fn from_word(word: WORD) -> TextAttributes {
|
||||
let attr = word as DWORD;
|
||||
TextAttributes {
|
||||
fg_color: Color::from_fg(attr),
|
||||
fg_intense: Intense::from_fg(attr),
|
||||
bg_color: Color::from_bg(attr),
|
||||
bg_intense: Intense::from_bg(attr),
|
||||
fg_color: Color::from_fg(word),
|
||||
fg_intense: Intense::from_fg(word),
|
||||
bg_color: Color::from_bg(word),
|
||||
bg_intense: Intense::from_bg(word),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -162,22 +191,22 @@ pub enum Intense {
|
||||
}
|
||||
|
||||
impl Intense {
|
||||
fn to_bg(&self) -> DWORD {
|
||||
fn to_bg(&self) -> WORD {
|
||||
self.to_fg() << 4
|
||||
}
|
||||
|
||||
fn from_bg(word: DWORD) -> Intense {
|
||||
fn from_bg(word: WORD) -> Intense {
|
||||
Intense::from_fg(word >> 4)
|
||||
}
|
||||
|
||||
fn to_fg(&self) -> DWORD {
|
||||
fn to_fg(&self) -> WORD {
|
||||
match *self {
|
||||
Intense::No => 0,
|
||||
Intense::Yes => FG_INTENSITY,
|
||||
}
|
||||
}
|
||||
|
||||
fn from_fg(word: DWORD) -> Intense {
|
||||
fn from_fg(word: WORD) -> Intense {
|
||||
if word & FG_INTENSITY > 0 {
|
||||
Intense::Yes
|
||||
} else {
|
||||
@@ -201,15 +230,15 @@ pub enum Color {
|
||||
}
|
||||
|
||||
impl Color {
|
||||
fn to_bg(&self) -> DWORD {
|
||||
fn to_bg(&self) -> WORD {
|
||||
self.to_fg() << 4
|
||||
}
|
||||
|
||||
fn from_bg(word: DWORD) -> Color {
|
||||
fn from_bg(word: WORD) -> Color {
|
||||
Color::from_fg(word >> 4)
|
||||
}
|
||||
|
||||
fn to_fg(&self) -> DWORD {
|
||||
fn to_fg(&self) -> WORD {
|
||||
match *self {
|
||||
Color::Black => 0,
|
||||
Color::Blue => FG_BLUE,
|
||||
@@ -222,7 +251,7 @@ impl Color {
|
||||
}
|
||||
}
|
||||
|
||||
fn from_fg(word: DWORD) -> Color {
|
||||
fn from_fg(word: WORD) -> Color {
|
||||
match word & 0b111 {
|
||||
FG_BLUE => Color::Blue,
|
||||
FG_GREEN => Color::Green,
|
||||
|
||||
Reference in New Issue
Block a user