mirror of
https://github.com/BurntSushi/ripgrep.git
synced 2025-07-26 09:42:00 -07:00
Compare commits
1 Commits
ag/disable
...
ag/bstr-mi
Author | SHA1 | Date | |
---|---|---|---|
|
4b88e08f41 |
@@ -1,5 +1,4 @@
|
||||
language: rust
|
||||
dist: xenial
|
||||
env:
|
||||
global:
|
||||
- PROJECT_NAME: ripgrep
|
||||
|
62
CHANGELOG.md
62
CHANGELOG.md
@@ -1,65 +1,3 @@
|
||||
0.11.0 (TBD)
|
||||
============
|
||||
TODO.
|
||||
|
||||
**BREAKING CHANGES**:
|
||||
|
||||
* ripgrep has tweaked its exit status codes to be more like GNU grep's. Namely,
|
||||
if a non-fatal error occurs during a search, then ripgrep will now always
|
||||
emit a `2` exit status code, regardless of whether a match is found or not.
|
||||
Previously, ripgrep would only emit a `2` exit status code for a catastrophic
|
||||
error (e.g., regex syntax error). One exception to this is if ripgrep is run
|
||||
with `-q/--quiet`. In that case, if an error occurs and a match is found,
|
||||
then ripgrep will exit with a `0` exit status code.
|
||||
* The `avx-accel` feature of ripgrep has been removed since it is no longer
|
||||
necessary. All uses of AVX in ripgrep are now enabled automatically via
|
||||
runtime CPU feature detection. The `simd-accel` feature does remain
|
||||
available, however, it does increase compilation times substantially at the
|
||||
moment.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* [FEATURE #1099](https://github.com/BurntSushi/ripgrep/pull/1099):
|
||||
Add support for Brotli and Zstd to the `-z/--search-zip` flag.
|
||||
* [FEATURE #1138](https://github.com/BurntSushi/ripgrep/pull/1138):
|
||||
Add `--no-ignore-dot` flag for ignoring `.ignore` files.
|
||||
* [FEATURE #1159](https://github.com/BurntSushi/ripgrep/pull/1159):
|
||||
ripgrep's exit status logic should now match GNU grep. See updated man page.
|
||||
* [FEATURE #1170](https://github.com/BurntSushi/ripgrep/pull/1170):
|
||||
Add `--ignore-file-case-insensitive` for case insensitive .ignore globs.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #373](https://github.com/BurntSushi/ripgrep/issues/373),
|
||||
[BUG #1098](https://github.com/BurntSushi/ripgrep/issues/1098):
|
||||
`**` is now accepted as valid syntax anywhere in a glob.
|
||||
* [BUG #916](https://github.com/BurntSushi/ripgrep/issues/916):
|
||||
ripgrep no longer hangs when searching `/proc` with a zombie process present.
|
||||
* [BUG #1091](https://github.com/BurntSushi/ripgrep/issues/1091):
|
||||
Add note about inverted flags to the man page.
|
||||
* [BUG #1095](https://github.com/BurntSushi/ripgrep/issues/1095):
|
||||
Fix corner cases involving the `--crlf` flag.
|
||||
* [BUG #1103](https://github.com/BurntSushi/ripgrep/issues/1103):
|
||||
Clarify what `--encoding auto` does.
|
||||
* [BUG #1106](https://github.com/BurntSushi/ripgrep/issues/1106):
|
||||
`--files-with-matches` and `--files-without-match` work with one file.
|
||||
* [BUG #1093](https://github.com/BurntSushi/ripgrep/pull/1093):
|
||||
Fix handling of literal slashes in gitignore patterns.
|
||||
* [BUG #1121](https://github.com/BurntSushi/ripgrep/issues/1121):
|
||||
Fix bug that was triggering Windows antimalware when using the --files flag.
|
||||
* [BUG #1125](https://github.com/BurntSushi/ripgrep/issues/1125),
|
||||
[BUG #1159](https://github.com/BurntSushi/ripgrep/issues/1159):
|
||||
ripgrep shouldn't panic for `rg -h | rg` and should emit correct exit status.
|
||||
* [BUG #1154](https://github.com/BurntSushi/ripgrep/issues/1154):
|
||||
Windows files with "hidden" attribute are now treated as hidden.
|
||||
* [BUG #1173](https://github.com/BurntSushi/ripgrep/issues/1173):
|
||||
Fix handling of `**` patterns in gitignore files.
|
||||
* [BUG #1174](https://github.com/BurntSushi/ripgrep/issues/1174):
|
||||
Fix handling of repeated `**` patterns in gitignore files.
|
||||
* [BUG #1176](https://github.com/BurntSushi/ripgrep/issues/1176):
|
||||
Fix bug where `-F`/`-x` weren't applied to patterns given via `-f`.
|
||||
|
||||
|
||||
0.10.0 (2018-09-07)
|
||||
===================
|
||||
This is a new minor version release of ripgrep that contains some major new
|
||||
|
506
Cargo.lock
generated
506
Cargo.lock
generated
@@ -1,11 +1,9 @@
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
[[package]]
|
||||
name = "aho-corasick"
|
||||
version = "0.7.3"
|
||||
version = "0.6.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"memchr 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -13,9 +11,9 @@ name = "atty"
|
||||
version = "0.2.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.47 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -25,10 +23,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "base64"
|
||||
version = "0.10.1"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"byteorder 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -38,42 +36,39 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "bstr"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
version = "0.0.1"
|
||||
dependencies = [
|
||||
"lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex-automata 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bytecount"
|
||||
version = "0.5.1"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
version = "1.3.1"
|
||||
version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.0.34"
|
||||
version = "1.0.28"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "0.1.7"
|
||||
version = "0.1.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "2.33.0"
|
||||
version = "2.32.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"strsim 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"textwrap 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
@@ -87,37 +82,38 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-channel"
|
||||
version = "0.3.8"
|
||||
version = "0.3.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"crossbeam-utils 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"smallvec 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"crossbeam-utils 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"smallvec 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-utils"
|
||||
version = "0.6.5"
|
||||
version = "0.6.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "encoding_rs"
|
||||
version = "0.8.17"
|
||||
version = "0.8.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"packed_simd 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"simd 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "encoding_rs_io"
|
||||
version = "0.1.6"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"encoding_rs 0.8.17 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"encoding_rs 0.8.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -126,8 +122,17 @@ version = "1.0.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "fuchsia-cprng"
|
||||
version = "0.1.1"
|
||||
name = "fuchsia-zircon"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fuchsia-zircon-sys"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
@@ -139,24 +144,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
name = "globset"
|
||||
version = "0.4.2"
|
||||
dependencies = [
|
||||
"aho-corasick 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bstr 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"aho-corasick 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep"
|
||||
version = "0.2.3"
|
||||
dependencies = [
|
||||
"atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"grep-cli 0.1.1",
|
||||
"grep-matcher 0.1.1",
|
||||
"grep-pcre2 0.1.2",
|
||||
"grep-printer 0.1.1",
|
||||
"grep-regex 0.1.2",
|
||||
"grep-searcher 0.1.3",
|
||||
"grep-regex 0.1.1",
|
||||
"grep-searcher 0.1.1",
|
||||
"regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"termcolor 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"walkdir 2.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
@@ -166,22 +174,21 @@ name = "grep-cli"
|
||||
version = "0.1.1"
|
||||
dependencies = [
|
||||
"atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bstr 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"globset 0.4.2",
|
||||
"lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"same-file 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"termcolor 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep-matcher"
|
||||
version = "0.1.1"
|
||||
dependencies = [
|
||||
"memchr 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bstr 0.0.1",
|
||||
"regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -196,59 +203,58 @@ dependencies = [
|
||||
name = "grep-printer"
|
||||
version = "0.1.1"
|
||||
dependencies = [
|
||||
"base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bstr 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"base64 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"grep-matcher 0.1.1",
|
||||
"grep-regex 0.1.2",
|
||||
"grep-searcher 0.1.3",
|
||||
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"grep-regex 0.1.1",
|
||||
"grep-searcher 0.1.1",
|
||||
"serde 1.0.85 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 1.0.85 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 1.0.36 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"termcolor 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep-regex"
|
||||
version = "0.1.2"
|
||||
version = "0.1.1"
|
||||
dependencies = [
|
||||
"grep-matcher 0.1.1",
|
||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex-syntax 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex-syntax 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"utf8-ranges 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep-searcher"
|
||||
version = "0.1.3"
|
||||
version = "0.1.1"
|
||||
dependencies = [
|
||||
"bstr 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bytecount 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"encoding_rs 0.8.17 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"encoding_rs_io 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bstr 0.0.1",
|
||||
"bytecount 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"encoding_rs 0.8.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"encoding_rs_io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"grep-matcher 0.1.1",
|
||||
"grep-regex 0.1.2",
|
||||
"grep-regex 0.1.1",
|
||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ignore"
|
||||
version = "0.4.6"
|
||||
dependencies = [
|
||||
"crossbeam-channel 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"crossbeam-channel 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"globset 0.4.2",
|
||||
"lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"same-file 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tempfile 3.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tempfile 3.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"walkdir 2.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -258,50 +264,84 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "1.3.0"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.51"
|
||||
version = "0.2.47"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"owning_ref 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "2.2.0"
|
||||
version = "2.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.47 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "memmap"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.47 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num_cpus"
|
||||
version = "1.10.0"
|
||||
version = "1.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.47 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "packed_simd"
|
||||
version = "0.3.3"
|
||||
name = "owning_ref"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot"
|
||||
version = "0.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"lock_api 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parking_lot_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot_core"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.47 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"smallvec 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -309,7 +349,7 @@ name = "pcre2"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.47 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"pcre2-sys 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@@ -320,8 +360,8 @@ name = "pcre2-sys"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cc 1.0.34 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"cc 1.0.28 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.47 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"pkg-config 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
@@ -332,7 +372,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "0.4.27"
|
||||
version = "0.4.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@@ -340,28 +380,27 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "0.6.11"
|
||||
version = "0.6.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"proc-macro2 0.4.27 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"proc-macro2 0.4.25 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.6.5"
|
||||
version = "0.6.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"autocfg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.47 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_core 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_jitter 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_os 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_pcg 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -370,20 +409,12 @@ version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"autocfg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_core 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_core"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_core"
|
||||
version = "0.4.0"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
@@ -391,7 +422,7 @@ name = "rand_hc"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_core 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -399,39 +430,29 @@ name = "rand_isaac"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_jitter"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_core 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_os"
|
||||
version = "0.1.3"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.47 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_core 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_pcg"
|
||||
version = "0.1.2"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"autocfg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_core 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -439,7 +460,7 @@ name = "rand_xorshift"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_core 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -447,12 +468,12 @@ name = "rdrand"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_core 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.1.52"
|
||||
version = "0.1.50"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
@@ -460,32 +481,24 @@ name = "redox_termios"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"redox_syscall 0.1.52 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"redox_syscall 0.1.50 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.1.5"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"aho-corasick 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex-syntax 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"aho-corasick 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex-syntax 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"utf8-ranges 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex-automata"
|
||||
version = "0.1.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.6.6"
|
||||
version = "0.6.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"ucd-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@@ -496,27 +509,34 @@ name = "remove_dir_all"
|
||||
version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ripgrep"
|
||||
version = "0.10.0"
|
||||
dependencies = [
|
||||
"bstr 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"grep 0.2.3",
|
||||
"ignore 0.4.6",
|
||||
"lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.85 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 1.0.85 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 1.0.36 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"termcolor 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustc_version"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
version = "0.2.7"
|
||||
@@ -527,65 +547,96 @@ name = "same-file"
|
||||
version = "1.0.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "scopeguard"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "semver"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "semver-parser"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.90"
|
||||
version = "1.0.85"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.90"
|
||||
version = "1.0.85"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"proc-macro2 0.4.27 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"quote 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"syn 0.15.30 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"proc-macro2 0.4.25 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"quote 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"syn 0.15.26 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.39"
|
||||
version = "1.0.36"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"itoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ryu 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.85 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "simd"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "0.6.9"
|
||||
version = "0.6.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "stable_deref_trait"
|
||||
version = "1.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.8.0"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "0.15.30"
|
||||
version = "0.15.26"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"proc-macro2 0.4.27 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"quote 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"proc-macro2 0.4.25 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"quote 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tempfile"
|
||||
version = "3.0.7"
|
||||
version = "3.0.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"redox_syscall 0.1.52 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.47 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"redox_syscall 0.1.50 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"remove_dir_all 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -601,14 +652,14 @@ name = "termion"
|
||||
version = "1.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"redox_syscall 0.1.52 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.47 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"redox_syscall 0.1.50 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "textwrap"
|
||||
version = "0.11.0"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@@ -619,7 +670,7 @@ name = "thread_local"
|
||||
version = "0.3.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -637,24 +688,37 @@ name = "unicode-xid"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "unreachable"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "utf8-ranges"
|
||||
version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "void"
|
||||
version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "walkdir"
|
||||
version = "2.2.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"same-file 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.3.7"
|
||||
version = "0.3.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@@ -668,10 +732,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-util"
|
||||
version = "0.1.2"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -684,80 +748,88 @@ name = "wincolor"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[metadata]
|
||||
"checksum aho-corasick 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "e6f484ae0c99fec2e858eb6134949117399f222608d84cadb3f58c1f97c2364c"
|
||||
"checksum aho-corasick 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)" = "1e9a933f4e58658d7b12defcf96dc5c720f20832deebe3e0a19efd3b6aaeeb9e"
|
||||
"checksum atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "9a7d5b8723950951411ee34d271d99dddcc2035a16ab25310ea2c8cfd4369652"
|
||||
"checksum autocfg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a6d640bee2da49f60a4068a7fae53acde8982514ab7bae8b8cea9e88cbcfd799"
|
||||
"checksum base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e"
|
||||
"checksum base64 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "621fc7ecb8008f86d7fb9b95356cd692ce9514b80a86d85b397f32a22da7b9e2"
|
||||
"checksum bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "228047a76f468627ca71776ecdebd732a3423081fcf5125585bcd7c49886ce12"
|
||||
"checksum bstr 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6c8203ca06c502958719dae5f653a79e0cc6ba808ed02beffbf27d09610f2143"
|
||||
"checksum bytecount 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "be0fdd54b507df8f22012890aadd099979befdba27713c767993f8380112ca7c"
|
||||
"checksum byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a019b10a2a7cdeb292db131fc8113e57ea2a908f6e7894b0c3c671893b65dbeb"
|
||||
"checksum cc 1.0.34 (registry+https://github.com/rust-lang/crates.io-index)" = "30f813bf45048a18eda9190fd3c6b78644146056740c43172a5a3699118588fd"
|
||||
"checksum cfg-if 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "11d43355396e872eefb45ce6342e4374ed7bc2b3a502d1b28e36d6e23c05d1f4"
|
||||
"checksum clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5067f5bb2d80ef5d68b4c87db81601f0b75bca627bc2ef76b141d7b846a3c6d9"
|
||||
"checksum bytecount 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c219c0335c21a8bd79587ce5aee9f64aff1d0bd7a2cca7a58a815f9780cd3b0d"
|
||||
"checksum byteorder 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60f0b0d4c0a382d2734228fd12b5a6b5dac185c60e938026fd31b265b94f9bd2"
|
||||
"checksum cc 1.0.28 (registry+https://github.com/rust-lang/crates.io-index)" = "bb4a8b715cb4597106ea87c7c84b2f1d452c7492033765df7f32651e66fcf749"
|
||||
"checksum cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "082bb9b28e00d3c9d39cc03e64ce4cea0f1bb9b3fde493f0cbc008472d22bdf4"
|
||||
"checksum clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b957d88f4b6a63b9d70d5f454ac8011819c6efa7727858f458ab71c756ce2d3e"
|
||||
"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f"
|
||||
"checksum crossbeam-channel 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "0f0ed1a4de2235cabda8558ff5840bffb97fcb64c97827f354a451307df5f72b"
|
||||
"checksum crossbeam-utils 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f8306fcef4a7b563b76b7dd949ca48f52bc1141aa067d2ea09565f3e2652aa5c"
|
||||
"checksum encoding_rs 0.8.17 (registry+https://github.com/rust-lang/crates.io-index)" = "4155785c79f2f6701f185eb2e6b4caf0555ec03477cb4c70db67b465311620ed"
|
||||
"checksum encoding_rs_io 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "9619ee7a2bf4e777e020b95c1439abaf008f8ea8041b78a0552c4f1bcf4df32c"
|
||||
"checksum crossbeam-channel 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "137bc235f622ffaa0428e3854e24acb53291fc0b3ff6fb2cb75a8be6fb02f06b"
|
||||
"checksum crossbeam-utils 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "41ee4864f4797060e52044376f7d107429ce1fb43460021b126424b7180ee21a"
|
||||
"checksum encoding_rs 0.8.14 (registry+https://github.com/rust-lang/crates.io-index)" = "a69d152eaa438a291636c1971b0a370212165ca8a75759eb66818c5ce9b538f7"
|
||||
"checksum encoding_rs_io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "098f6a0ab73a9ba256b71344dc82c6d7e252736ad9db7f4e35345f3a1f8713f5"
|
||||
"checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3"
|
||||
"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
|
||||
"checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82"
|
||||
"checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7"
|
||||
"checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb"
|
||||
"checksum itoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1306f3464951f30e30d12373d31c79fbd52d236e5e896fd92f96ec7babbbe60b"
|
||||
"checksum lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bc5729f27f159ddd61f4df6228e827e86643d4d3e7c32183cb30a1c08f604a14"
|
||||
"checksum libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)" = "bedcc7a809076656486ffe045abeeac163da1b558e963a31e29fbfbeba916917"
|
||||
"checksum lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a374c89b9db55895453a74c1e38861d9deec0b01b405a82516e9d5de4820dea1"
|
||||
"checksum libc 0.2.47 (registry+https://github.com/rust-lang/crates.io-index)" = "48450664a984b25d5b479554c29cc04e3150c97aa4c01da5604a2d4ed9151476"
|
||||
"checksum lock_api 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "62ebf1391f6acad60e5c8b43706dde4582df75c06698ab44511d15016bc2442c"
|
||||
"checksum log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c84ec4b527950aa83a329754b01dbe3f58361d1c5efacd1f6d68c494d08a17c6"
|
||||
"checksum memchr 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2efc7bc57c883d4a4d6e3246905283d8dae951bb3bd32f49d6ef297f546e1c39"
|
||||
"checksum memchr 2.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "e1dd4eaac298c32ce07eb6ed9242eda7d82955b9170b7d6db59b2e02cc63fcb8"
|
||||
"checksum memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b"
|
||||
"checksum num_cpus 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1a23f0ed30a54abaa0c7e83b1d2d87ada7c3c23078d1d87815af3e3b6385fbba"
|
||||
"checksum packed_simd 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a85ea9fc0d4ac0deb6fe7911d38786b32fc11119afd9e9d38b84ff691ce64220"
|
||||
"checksum num_cpus 1.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5a69d464bdc213aaaff628444e99578ede64e9c854025aa43b9796530afa9238"
|
||||
"checksum owning_ref 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "49a4b8ea2179e6a2e27411d3bca09ca6dd630821cf6894c6c7c8467a8ee7ef13"
|
||||
"checksum parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ab41b4aed082705d1056416ae4468b6ea99d52599ecf3169b00088d43113e337"
|
||||
"checksum parking_lot_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "94c8c7923936b28d546dfd14d4472eaf34c99b14e1c973a32b3e6d4eb04298c9"
|
||||
"checksum pcre2 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3ae0a2682105ec5ca0ee5910bbc7e926386d348a05166348f74007942983c319"
|
||||
"checksum pcre2-sys 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a9027f9474e4e13d3b965538aafcaebe48c803488ad76b3c97ef061a8324695f"
|
||||
"checksum pkg-config 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)" = "676e8eb2b1b4c9043511a9b7bea0915320d7e502b0a079fb03f9635a5252b18c"
|
||||
"checksum proc-macro2 0.4.27 (registry+https://github.com/rust-lang/crates.io-index)" = "4d317f9caece796be1980837fd5cb3dfec5613ebdb04ad0956deea83ce168915"
|
||||
"checksum quote 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)" = "cdd8e04bd9c52e0342b406469d494fcb033be4bdbe5c606016defbb1681411e1"
|
||||
"checksum rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca"
|
||||
"checksum proc-macro2 0.4.25 (registry+https://github.com/rust-lang/crates.io-index)" = "d3797b7142c9aa74954e351fc089bbee7958cebbff6bf2815e7ffff0b19f547d"
|
||||
"checksum quote 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)" = "53fa22a1994bd0f9372d7a816207d8a2677ad0325b073f5c5332760f0fb62b5c"
|
||||
"checksum rand 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3906503e80ac6cbcacb2c2973fa8e473f24d7e2747c8c92bb230c2441cad96b5"
|
||||
"checksum rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef"
|
||||
"checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b"
|
||||
"checksum rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d0e7a549d590831370895ab7ba4ea0c1b6b011d106b5ff2da6eee112615e6dc0"
|
||||
"checksum rand_core 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0905b6b7079ec73b314d4c748701f6931eb79fd97c668caa3f1899b22b32c6db"
|
||||
"checksum rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4"
|
||||
"checksum rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08"
|
||||
"checksum rand_jitter 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7b9ea758282efe12823e0d952ddb269d2e1897227e464919a554f2a03ef1b832"
|
||||
"checksum rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071"
|
||||
"checksum rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44"
|
||||
"checksum rand_os 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f46fbd5550acf75b0c2730f5dd1873751daf9beb8f11b44027778fae50d7feca"
|
||||
"checksum rand_pcg 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "086bd09a33c7044e56bb44d5bdde5a60e7f119a9e95b0775f545de759a32fe05"
|
||||
"checksum rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c"
|
||||
"checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2"
|
||||
"checksum redox_syscall 0.1.52 (registry+https://github.com/rust-lang/crates.io-index)" = "d32b3053e5ced86e4bc0411fec997389532bf56b000e66cb4884eeeb41413d69"
|
||||
"checksum redox_syscall 0.1.50 (registry+https://github.com/rust-lang/crates.io-index)" = "52ee9a534dc1301776eff45b4fa92d2c39b1d8c3d3357e6eb593e0d795506fc2"
|
||||
"checksum redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7e891cfe48e9100a70a3b6eb652fef28920c117d366339687bd5576160db0f76"
|
||||
"checksum regex 1.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "559008764a17de49a3146b234641644ed37d118d1ef641a0bb573d146edc6ce0"
|
||||
"checksum regex-automata 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "a25a7daa2eea48550e9946133d6cc9621020d29cc7069089617234bf8b6a8693"
|
||||
"checksum regex-syntax 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "dcfd8681eebe297b81d98498869d4aae052137651ad7b96822f09ceb690d0a96"
|
||||
"checksum regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "37e7cbbd370869ce2e8dff25c7018702d10b21a20ef7135316f8daecd6c25b7f"
|
||||
"checksum regex-syntax 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4e47a2ed29da7a9e1960e1639e7a982e6edc6d49be308a3b02daf511504a16d1"
|
||||
"checksum remove_dir_all 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3488ba1b9a2084d38645c4c08276a1752dcbf2c7130d74f1569681ad5d2799c5"
|
||||
"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
|
||||
"checksum ryu 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "eb9e9b8cde282a9fe6a42dd4681319bfb63f121b8a8ee9439c6f4107e58a46f7"
|
||||
"checksum same-file 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8f20c4be53a8a1ff4c1f1b2bd14570d2f634628709752f0702ecdd2b3f9a5267"
|
||||
"checksum serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)" = "aa5f7c20820475babd2c077c3ab5f8c77a31c15e16ea38687b4c02d3e48680f4"
|
||||
"checksum serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)" = "58fc82bec244f168b23d1963b45c8bf5726e9a15a9d146a067f9081aeed2de79"
|
||||
"checksum serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)" = "5a23aa71d4a4d43fdbfaac00eff68ba8a06a51759a89ac3304323e800c4dd40d"
|
||||
"checksum smallvec 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)" = "c4488ae950c49d403731982257768f48fada354a5203fe81f9bb6f43ca9002be"
|
||||
"checksum strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
|
||||
"checksum syn 0.15.30 (registry+https://github.com/rust-lang/crates.io-index)" = "66c8865bf5a7cbb662d8b011950060b3c8743dca141b054bf7195b20d314d8e2"
|
||||
"checksum tempfile 3.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "b86c784c88d98c801132806dadd3819ed29d8600836c4088e855cdf3e178ed8a"
|
||||
"checksum scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "94258f53601af11e6a49f722422f6e3425c52b06245a5cf9bc09908b174f5e27"
|
||||
"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
|
||||
"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
|
||||
"checksum serde 1.0.85 (registry+https://github.com/rust-lang/crates.io-index)" = "534b8b91a95e0f71bca3ed5824752d558da048d4248c91af873b63bd60519752"
|
||||
"checksum serde_derive 1.0.85 (registry+https://github.com/rust-lang/crates.io-index)" = "a915306b0f1ac5607797697148c223bedeaa36bcc2e28a01441cd638cc6567b4"
|
||||
"checksum serde_json 1.0.36 (registry+https://github.com/rust-lang/crates.io-index)" = "574378d957d6dcdf1bbb5d562a15cbd5e644159432f84634b94e485267abbcc7"
|
||||
"checksum simd 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0048b17eb9577ac545c61d85c3559b41dfb4cbea41c9bd9ca6a4f73ff05fda84"
|
||||
"checksum smallvec 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)" = "b73ea3738b47563803ef814925e69be00799a8c07420be8b996f8e98fb2336db"
|
||||
"checksum stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dba1a27d3efae4351c8051072d619e3ade2820635c3958d826bfea39d59b54c8"
|
||||
"checksum strsim 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb4f380125926a99e52bc279241539c018323fab05ad6368b56f93d9369ff550"
|
||||
"checksum syn 0.15.26 (registry+https://github.com/rust-lang/crates.io-index)" = "f92e629aa1d9c827b2bb8297046c1ccffc57c99b947a680d3ccff1f136a3bee9"
|
||||
"checksum tempfile 3.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "7e91405c14320e5c79b3d148e1c86f40749a36e490642202a31689cb1a3452b2"
|
||||
"checksum termcolor 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4096add70612622289f2fdcdbd5086dc81c1e2675e6ae58d6c4f62a16c6d7f2f"
|
||||
"checksum termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "689a3bdfaab439fd92bc87df5c4c78417d3cbe537487274e9b0b2dce76e92096"
|
||||
"checksum textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
|
||||
"checksum textwrap 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "307686869c93e71f94da64286f9a9524c0f308a9e1c87a583de8e9c9039ad3f6"
|
||||
"checksum thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b"
|
||||
"checksum ucd-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "535c204ee4d8434478593480b8f86ab45ec9aae0e83c568ca81abf0fd0e88f86"
|
||||
"checksum unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "882386231c45df4700b275c7ff55b6f3698780a650026380e72dabe76fa46526"
|
||||
"checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc"
|
||||
"checksum unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56"
|
||||
"checksum utf8-ranges 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "796f7e48bef87609f7ade7e06495a87d5cd06c7866e6a5cbfceffc558a243737"
|
||||
"checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"
|
||||
"checksum walkdir 2.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "9d9d7ed3431229a144296213105a390676cc49c9b6a72bd19f3176c98e129fa1"
|
||||
"checksum winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "f10e386af2b13e47c89e7236a7a14a086791a2b88ebad6df9bf42040195cf770"
|
||||
"checksum winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "92c1eb33641e276cfa214a0522acad57be5c56b10cb348b3c5117db75f3ac4b0"
|
||||
"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
"checksum winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7168bab6e1daee33b4557efd0e95d5ca70a03706d39fa5f3fe7a236f584b03c9"
|
||||
"checksum winapi-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "afc5508759c5bf4285e61feb862b6083c8480aec864fa17a81fdec6f69b461ab"
|
||||
"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
"checksum wincolor 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "561ed901ae465d6185fa7864d63fbd5720d0ef718366c9a4dc83cf6170d7e9ba"
|
||||
|
@@ -46,7 +46,6 @@ members = [
|
||||
]
|
||||
|
||||
[dependencies]
|
||||
bstr = "0.1.2"
|
||||
grep = { version = "0.2.3", path = "grep" }
|
||||
ignore = { version = "0.4.4", path = "ignore" }
|
||||
lazy_static = "1.1.0"
|
||||
|
15
FAQ.md
15
FAQ.md
@@ -118,7 +118,7 @@ from run to run of ripgrep.
|
||||
The only way to make the order of results consistent is to ask ripgrep to
|
||||
sort the output. Currently, this will disable all parallelism. (On smaller
|
||||
repositories, you might not notice much of a performance difference!) You
|
||||
can achieve this with the `--sort path` flag.
|
||||
can achieve this with the `--sort-files` flag.
|
||||
|
||||
There is more discussion on this topic here:
|
||||
https://github.com/BurntSushi/ripgrep/issues/152
|
||||
@@ -136,10 +136,10 @@ How do I search compressed files?
|
||||
</h3>
|
||||
|
||||
ripgrep's `-z/--search-zip` flag will cause it to search compressed files
|
||||
automatically. Currently, this supports gzip, bzip2, xz, lzma, lz4, Brotli and
|
||||
Zstd. Each of these requires requires the corresponding `gzip`, `bzip2`, `xz`,
|
||||
`lz4`, `brotli` and `zstd` binaries to be installed on your system. (That is,
|
||||
ripgrep does decompression by shelling out to another process.)
|
||||
automatically. Currently, this supports gzip, bzip2, lzma, lz4 and xz only and
|
||||
requires the corresponding `gzip`, `bzip2` and `xz` binaries to be installed on
|
||||
your system. (That is, ripgrep does decompression by shelling out to another
|
||||
process.)
|
||||
|
||||
ripgrep currently does not search archive formats, so `*.tar.gz` files, for
|
||||
example, are skipped.
|
||||
@@ -149,8 +149,9 @@ example, are skipped.
|
||||
How do I search over multiple lines?
|
||||
</h3>
|
||||
|
||||
The `-U/--multiline` flag enables ripgrep to report results that span over
|
||||
multiple lines.
|
||||
This isn't currently possible. ripgrep is fundamentally a line-oriented search
|
||||
tool. With that said,
|
||||
[multiline search is a planned opt-in feature](https://github.com/BurntSushi/ripgrep/issues/176).
|
||||
|
||||
|
||||
<h3 name="fancy">
|
||||
|
52
GUIDE.md
52
GUIDE.md
@@ -235,11 +235,6 @@ Like `.gitignore`, a `.ignore` file can be placed in any directory. Its rules
|
||||
will be processed with respect to the directory it resides in, just like
|
||||
`.gitignore`.
|
||||
|
||||
To process `.gitignore` and `.ignore` files case insensitively, use the flag
|
||||
`--ignore-file-case-insensitive`. This is especially useful on case insensitive
|
||||
file systems like those on Windows and macOS. Note though that this can come
|
||||
with a significant performance penalty, and is therefore disabled by default.
|
||||
|
||||
For a more in depth description of how glob patterns in a `.gitignore` file
|
||||
are interpreted, please see `man gitignore`.
|
||||
|
||||
@@ -525,9 +520,9 @@ config file. Once the environment variable is set, open the file and just type
|
||||
in the flags you want set automatically. There are only two rules for
|
||||
describing the format of the config file:
|
||||
|
||||
1. Every line is a shell argument, after trimming whitespace.
|
||||
2. Lines starting with `#` (optionally preceded by any amount of whitespace)
|
||||
are ignored.
|
||||
1. Every line is a shell argument, after trimming ASCII whitespace.
|
||||
2. Lines starting with `#` (optionally preceded by any amount of
|
||||
ASCII whitespace) are ignored.
|
||||
|
||||
In particular, there is no escaping. Each line is given to ripgrep as a single
|
||||
command line argument verbatim.
|
||||
@@ -603,14 +598,13 @@ topic, but we can try to summarize its relevancy to ripgrep:
|
||||
* Files are generally just a bundle of bytes. There is no reliable way to know
|
||||
their encoding.
|
||||
* Either the encoding of the pattern must match the encoding of the files being
|
||||
searched, or a form of transcoding must be performed that converts either the
|
||||
searched, or a form of transcoding must be performed converts either the
|
||||
pattern or the file to the same encoding as the other.
|
||||
* ripgrep tends to work best on plain text files, and among plain text files,
|
||||
the most popular encodings likely consist of ASCII, latin1 or UTF-8. As
|
||||
a special exception, UTF-16 is prevalent in Windows environments
|
||||
|
||||
In light of the above, here is how ripgrep behaves when `--encoding auto` is
|
||||
given, which is the default:
|
||||
In light of the above, here is how ripgrep behaves:
|
||||
|
||||
* All input is assumed to be ASCII compatible (which means every byte that
|
||||
corresponds to an ASCII codepoint actually is an ASCII codepoint). This
|
||||
@@ -626,15 +620,12 @@ given, which is the default:
|
||||
they correspond to a UTF-16 BOM, then ripgrep will transcode the contents of
|
||||
the file from UTF-16 to UTF-8, and then execute the search on the transcoded
|
||||
version of the file. (This incurs a performance penalty since transcoding
|
||||
is slower than regex searching.) If the file contains invalid UTF-16, then
|
||||
the Unicode replacement codepoint is substituted in place of invalid code
|
||||
units.
|
||||
is slower than regex searching.)
|
||||
* To handle other cases, ripgrep provides a `-E/--encoding` flag, which permits
|
||||
you to specify an encoding from the
|
||||
[Encoding Standard](https://encoding.spec.whatwg.org/#concept-encoding-get).
|
||||
ripgrep will assume *all* files searched are the encoding specified (unless
|
||||
the file has a BOM) and will perform a transcoding step just like in the
|
||||
UTF-16 case described above.
|
||||
ripgrep will assume *all* files searched are the encoding specified and
|
||||
will perform a transcoding step just like in the UTF-16 case described above.
|
||||
|
||||
By default, ripgrep will not require its input be valid UTF-8. That is, ripgrep
|
||||
can and will search arbitrary bytes. The key here is that if you're searching
|
||||
@@ -644,26 +635,9 @@ pattern won't find anything. With all that said, this mode of operation is
|
||||
important, because it lets you find ASCII or UTF-8 *within* files that are
|
||||
otherwise arbitrary bytes.
|
||||
|
||||
As a special case, the `-E/--encoding` flag supports the value `none`, which
|
||||
will completely disable all encoding related logic, including BOM sniffing.
|
||||
When `-E/--encoding` is set to `none`, ripgrep will search the raw bytes of
|
||||
the underlying file with no transcoding step. For example, here's how you might
|
||||
search the raw UTF-16 encoding of the string `Шерлок`:
|
||||
|
||||
```
|
||||
$ rg '(?-u)\(\x045\x04@\x04;\x04>\x04:\x04' -E none -a some-utf16-file
|
||||
```
|
||||
|
||||
Of course, that's just an example meant to show how one can drop down into
|
||||
raw bytes. Namely, the simpler command works as you might expect automatically:
|
||||
|
||||
```
|
||||
$ rg 'Шерлок' some-utf16-file
|
||||
```
|
||||
|
||||
Finally, it is possible to disable ripgrep's Unicode support from within the
|
||||
regular expression. For example, let's say you wanted `.` to match any byte
|
||||
rather than any Unicode codepoint. (You might want this while searching a
|
||||
pattern regular expression. For example, let's say you wanted `.` to match any
|
||||
byte rather than any Unicode codepoint. (You might want this while searching a
|
||||
binary file, since `.` by default will not match invalid UTF-8.) You could do
|
||||
this by disabling Unicode via a regular expression flag:
|
||||
|
||||
@@ -701,10 +675,10 @@ used options that will likely impact how you use ripgrep on a regular basis.
|
||||
* `--files`: Print the files that ripgrep *would* search, but don't actually
|
||||
search them.
|
||||
* `-a/--text`: Search binary files as if they were plain text.
|
||||
* `-z/--search-zip`: Search compressed files (gzip, bzip2, lzma, xz, lz4,
|
||||
brotli, zstd). This is disabled by default.
|
||||
* `-z/--search-zip`: Search compressed files (gzip, bzip2, lzma, xz). This is
|
||||
disabled by default.
|
||||
* `-C/--context`: Show the lines surrounding a match.
|
||||
* `--sort path`: Force ripgrep to sort its output by file name. (This disables
|
||||
* `--sort-files`: Force ripgrep to sort its output by file name. (This disables
|
||||
parallelism, so it might be slower.)
|
||||
* `-L/--follow`: Follow symbolic links while recursively searching.
|
||||
* `-M/--max-columns`: Limit the length of lines printed by ripgrep.
|
||||
|
42
README.md
42
README.md
@@ -1,12 +1,11 @@
|
||||
ripgrep (rg)
|
||||
------------
|
||||
ripgrep is a line-oriented search tool that recursively searches your current
|
||||
directory for a regex pattern. By default, ripgrep will respect your .gitignore
|
||||
and automatically skip hidden files/directories and binary files. ripgrep
|
||||
directory for a regex pattern while respecting your gitignore rules. ripgrep
|
||||
has first class support on Windows, macOS and Linux, with binary downloads
|
||||
available for [every release](https://github.com/BurntSushi/ripgrep/releases).
|
||||
ripgrep is similar to other popular search tools like The Silver Searcher, ack
|
||||
and grep.
|
||||
ripgrep is similar to other popular search tools like The Silver Searcher,
|
||||
ack and grep.
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/ripgrep)
|
||||
[](https://ci.appveyor.com/project/BurntSushi/ripgrep)
|
||||
@@ -249,22 +248,21 @@ If you're a **Gentoo** user, you can install ripgrep from the
|
||||
$ emerge sys-apps/ripgrep
|
||||
```
|
||||
|
||||
If you're a **Fedora** user, you can install ripgrep from official
|
||||
If you're a **Fedora 27+** user, you can install ripgrep from official
|
||||
repositories.
|
||||
|
||||
```
|
||||
$ sudo dnf install ripgrep
|
||||
```
|
||||
|
||||
If you're an **openSUSE Leap 15.0** user, you can install ripgrep from the
|
||||
[utilities repo](https://build.opensuse.org/package/show/utilities/ripgrep):
|
||||
If you're a **Fedora 24+** user, you can install ripgrep from
|
||||
[copr](https://copr.fedorainfracloud.org/coprs/carlwgeorge/ripgrep/):
|
||||
|
||||
```
|
||||
$ sudo zypper ar https://download.opensuse.org/repositories/utilities/openSUSE_Leap_15.0/utilities.repo
|
||||
$ sudo zypper install ripgrep
|
||||
$ sudo dnf copr enable carlwgeorge/ripgrep
|
||||
$ sudo dnf install ripgrep
|
||||
```
|
||||
|
||||
|
||||
If you're an **openSUSE Tumbleweed** user, you can install ripgrep from the
|
||||
[official repo](http://software.opensuse.org/package/ripgrep):
|
||||
|
||||
@@ -339,7 +337,7 @@ If you're a **NetBSD** user, then you can install ripgrep from
|
||||
|
||||
If you're a **Rust programmer**, ripgrep can be installed with `cargo`.
|
||||
|
||||
* Note that the minimum supported version of Rust for ripgrep is **1.32.0**,
|
||||
* Note that the minimum supported version of Rust for ripgrep is **1.28.0**,
|
||||
although ripgrep may work with older versions.
|
||||
* Note that the binary may be bigger than expected because it contains debug
|
||||
symbols. This is intentional. To remove debug symbols and therefore reduce
|
||||
@@ -360,7 +358,7 @@ ripgrep isn't currently in any other package repositories.
|
||||
|
||||
ripgrep is written in Rust, so you'll need to grab a
|
||||
[Rust installation](https://www.rust-lang.org/) in order to compile it.
|
||||
ripgrep compiles with Rust 1.32.0 (stable) or newer. In general, ripgrep tracks
|
||||
ripgrep compiles with Rust 1.28.0 (stable) or newer. In general, ripgrep tracks
|
||||
the latest stable release of the Rust compiler.
|
||||
|
||||
To build ripgrep:
|
||||
@@ -377,14 +375,18 @@ If you have a Rust nightly compiler and a recent Intel CPU, then you can enable
|
||||
additional optional SIMD acceleration like so:
|
||||
|
||||
```
|
||||
RUSTFLAGS="-C target-cpu=native" cargo build --release --features 'simd-accel'
|
||||
RUSTFLAGS="-C target-cpu=native" cargo build --release --features 'simd-accel avx-accel'
|
||||
```
|
||||
|
||||
The `simd-accel` feature enables SIMD support in certain ripgrep dependencies
|
||||
(responsible for transcoding). They are not necessary to get SIMD optimizations
|
||||
for search; those are enabled automatically. Hopefully, some day, the
|
||||
`simd-accel` feature will similarly become unnecessary. **WARNING:** Currently,
|
||||
enabling this option can increase compilation times dramatically.
|
||||
If your machine doesn't support AVX instructions, then simply remove
|
||||
`avx-accel` from the features list. Similarly for SIMD (which corresponds
|
||||
roughly to SSE instructions).
|
||||
|
||||
The `simd-accel` and `avx-accel` features enable SIMD support in certain
|
||||
ripgrep dependencies (responsible for counting lines and transcoding). They
|
||||
are not necessary to get SIMD optimizations for search; those are enabled
|
||||
automatically. Hopefully, some day, the `simd-accel` and `avx-accel` features
|
||||
will similarly become unnecessary.
|
||||
|
||||
Finally, optional PCRE2 support can be built with ripgrep by enabling the
|
||||
`pcre2` feature:
|
||||
@@ -393,8 +395,8 @@ Finally, optional PCRE2 support can be built with ripgrep by enabling the
|
||||
$ cargo build --release --features 'pcre2'
|
||||
```
|
||||
|
||||
(Tip: use `--features 'pcre2 simd-accel'` to also include compile time SIMD
|
||||
optimizations, which will only work with a nightly compiler.)
|
||||
(Tip: use `--features 'pcre2 simd-accel avx-accel'` to also include compile
|
||||
time SIMD optimizations, which will only work with a nightly compiler.)
|
||||
|
||||
Enabling the PCRE2 feature works with a stable Rust compiler and will
|
||||
attempt to automatically find and link with your system's PCRE2 library via
|
||||
|
@@ -77,5 +77,5 @@ deploy:
|
||||
|
||||
branches:
|
||||
only:
|
||||
- /^\d+\.\d+\.\d+$/
|
||||
- /\d+\.\d+\.\d+/
|
||||
- master
|
||||
|
14
complete/_rg
14
complete/_rg
@@ -112,12 +112,8 @@ _rg() {
|
||||
$no"--no-hidden[don't search hidden files and directories]"
|
||||
|
||||
+ '(ignore)' # Ignore-file options
|
||||
"(--no-ignore-global --no-ignore-parent --no-ignore-vcs --no-ignore-dot)--no-ignore[don't respect ignore files]"
|
||||
$no'(--ignore-global --ignore-parent --ignore-vcs --ignore-dot)--ignore[respect ignore files]'
|
||||
|
||||
+ '(ignore-file-case-insensitive)' # Ignore-file case sensitivity options
|
||||
'--ignore-file-case-insensitive[process ignore files case insensitively]'
|
||||
$no'--no-ignore-file-case-insensitive[process ignore files case sensitively]'
|
||||
"(--no-ignore-global --no-ignore-parent --no-ignore-vcs)--no-ignore[don't respect ignore files]"
|
||||
$no'(--ignore-global --ignore-parent --ignore-vcs)--ignore[respect ignore files]'
|
||||
|
||||
+ '(ignore-global)' # Global ignore-file options
|
||||
"--no-ignore-global[don't respect global ignore files]"
|
||||
@@ -131,10 +127,6 @@ _rg() {
|
||||
"--no-ignore-vcs[don't respect version control ignore files]"
|
||||
$no'--ignore-vcs[respect version control ignore files]'
|
||||
|
||||
+ '(ignore-dot)' # .ignore-file options
|
||||
"--no-ignore-dot[don't respect .ignore files]"
|
||||
$no'--ignore-dot[respect .ignore files]'
|
||||
|
||||
+ '(json)' # JSON options
|
||||
'--json[output results in JSON Lines format]'
|
||||
$no"--no-json[don't output results in JSON Lines format]"
|
||||
@@ -378,7 +370,7 @@ _rg_encodings() {
|
||||
shift{-,_}jis csshiftjis {,x-}sjis ms_kanji ms932
|
||||
utf{,-}8 utf-16{,be,le} unicode-1-1-utf-8
|
||||
windows-{31j,874,949,125{0..8}} dos-874 tis-620 ansi_x3.4-1968
|
||||
x-user-defined auto none
|
||||
x-user-defined auto
|
||||
)
|
||||
|
||||
_wanted encodings expl encoding compadd -a "$@" - _encodings
|
||||
|
@@ -34,12 +34,12 @@ files/directories and binary files.
|
||||
ripgrep's default regex engine uses finite automata and guarantees linear
|
||||
time searching. Because of this, features like backreferences and arbitrary
|
||||
look-around are not supported. However, if ripgrep is built with PCRE2, then
|
||||
the *--pcre2* flag can be used to enable backreferences and look-around.
|
||||
the --pcre2 flag can be used to enable backreferences and look-around.
|
||||
|
||||
ripgrep supports configuration files. Set *RIPGREP_CONFIG_PATH* to a
|
||||
ripgrep supports configuration files. Set RIPGREP_CONFIG_PATH to a
|
||||
configuration file. The file can specify one shell argument per line. Lines
|
||||
starting with *#* are ignored. For more details, see the man page or the
|
||||
*README*.
|
||||
starting with '#' are ignored. For more details, see the man page or the
|
||||
README.
|
||||
|
||||
|
||||
REGEX SYNTAX
|
||||
@@ -52,10 +52,10 @@ https://docs.rs/regex/*/regex/bytes/index.html#syntax
|
||||
|
||||
To a first approximation, ripgrep uses Perl-like regexes without look-around or
|
||||
backreferences. This makes them very similar to the "extended" (ERE) regular
|
||||
expressions supported by *egrep*, but with a few additional features like
|
||||
expressions supported by `egrep`, but with a few additional features like
|
||||
Unicode character classes.
|
||||
|
||||
If you're using ripgrep with the *--pcre2* flag, then please consult
|
||||
If you're using ripgrep with the --pcre2 flag, then please consult
|
||||
https://www.pcre.org or the PCRE2 man pages for documentation on the supported
|
||||
syntax.
|
||||
|
||||
@@ -73,32 +73,13 @@ _PATH_::
|
||||
|
||||
OPTIONS
|
||||
-------
|
||||
Note that for many options, there exist flags to disable them. In some cases,
|
||||
those flags are not listed in a first class way below. For example, the
|
||||
*--column* flag (listed below) enables column numbers in ripgrep's output, but
|
||||
the *--no-column* flag (not listed below) disables them. The reverse can also
|
||||
exist. For example, the *--no-ignore* flag (listed below) disables ripgrep's
|
||||
*gitignore* logic, but the *--ignore* flag (not listed below) enables it. These
|
||||
flags are useful for overriding a ripgrep configuration file on the command
|
||||
line. Each flag's documentation notes whether an inverted flag exists. In all
|
||||
cases, the flag specified last takes precedence.
|
||||
|
||||
{OPTIONS}
|
||||
|
||||
|
||||
EXIT STATUS
|
||||
-----------
|
||||
If ripgrep finds a match, then the exit status of the program is 0. If no match
|
||||
could be found, then the exit status is 1. If an error occurred, then the exit
|
||||
status is always 2 unless ripgrep was run with the *--quiet* flag and a match
|
||||
was found. In summary:
|
||||
|
||||
* `0` exit status occurs only when at least one match was found, and if
|
||||
no error occurred, unless *--quiet* was given.
|
||||
* `1` exit status occurs only when no match was found and no error occurred.
|
||||
* `2` exit status occurs when an error occurred. This is true for both
|
||||
catastrophic errors (e.g., a regex syntax error) and for soft errors (e.g.,
|
||||
unable to read a file).
|
||||
could be found, then the exit status is non-zero.
|
||||
|
||||
|
||||
CONFIGURATION FILES
|
||||
@@ -107,12 +88,12 @@ ripgrep supports reading configuration files that change ripgrep's default
|
||||
behavior. The format of the configuration file is an "rc" style and is very
|
||||
simple. It is defined by two rules:
|
||||
|
||||
1. Every line is a shell argument, after trimming whitespace.
|
||||
2. Lines starting with *#* (optionally preceded by any amount of
|
||||
whitespace) are ignored.
|
||||
1. Every line is a shell argument, after trimming ASCII whitespace.
|
||||
2. Lines starting with _#_ (optionally preceded by any amount of
|
||||
ASCII whitespace) are ignored.
|
||||
|
||||
ripgrep will look for a single configuration file if and only if the
|
||||
*RIPGREP_CONFIG_PATH* environment variable is set and is non-empty.
|
||||
_RIPGREP_CONFIG_PATH_ environment variable is set and is non-empty.
|
||||
ripgrep will parse shell arguments from this file on startup and will
|
||||
behave as if the arguments in this file were prepended to any explicit
|
||||
arguments given to ripgrep on the command line.
|
||||
@@ -174,20 +155,20 @@ SHELL COMPLETION
|
||||
Shell completion files are included in the release tarball for Bash, Fish, Zsh
|
||||
and PowerShell.
|
||||
|
||||
For *bash*, move *rg.bash* to *$XDG_CONFIG_HOME/bash_completion*
|
||||
or */etc/bash_completion.d/*.
|
||||
For *bash*, move `rg.bash` to `$XDG_CONFIG_HOME/bash_completion`
|
||||
or `/etc/bash_completion.d/`.
|
||||
|
||||
For *fish*, move *rg.fish* to *$HOME/.config/fish/completions*.
|
||||
For *fish*, move `rg.fish` to `$HOME/.config/fish/completions`.
|
||||
|
||||
For *zsh*, move *_rg* to one of your *$fpath* directories.
|
||||
For *zsh*, move `_rg` to one of your `$fpath` directories.
|
||||
|
||||
|
||||
CAVEATS
|
||||
-------
|
||||
ripgrep may abort unexpectedly when using default settings if it searches a
|
||||
file that is simultaneously truncated. This behavior can be avoided by passing
|
||||
the *--no-mmap* flag which will forcefully disable the use of memory maps in
|
||||
all cases.
|
||||
the --no-mmap flag which will forcefully disable the use of memory maps in all
|
||||
cases.
|
||||
|
||||
|
||||
VERSION
|
||||
@@ -199,11 +180,7 @@ HOMEPAGE
|
||||
--------
|
||||
https://github.com/BurntSushi/ripgrep
|
||||
|
||||
Please report bugs and feature requests in the issue tracker. Please do your
|
||||
best to provide a reproducible test case for bugs. This should include the
|
||||
corpus being searched, the *rg* command, the actual output and the expected
|
||||
output. Please also include the output of running the same *rg* command but
|
||||
with the *--debug* flag.
|
||||
Please report bugs and feature requests in the issue tracker.
|
||||
|
||||
|
||||
AUTHORS
|
||||
|
@@ -19,11 +19,11 @@ name = "globset"
|
||||
bench = false
|
||||
|
||||
[dependencies]
|
||||
aho-corasick = "0.7.3"
|
||||
bstr = { version = "0.1.2", default-features = false, features = ["std"] }
|
||||
aho-corasick = "0.6.8"
|
||||
fnv = "1.0.6"
|
||||
log = "0.4.5"
|
||||
regex = "1.1.5"
|
||||
memchr = "2.1.0"
|
||||
regex = "1.1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
glob = "0.2.11"
|
||||
|
@@ -120,7 +120,7 @@ impl GlobMatcher {
|
||||
|
||||
/// Tests whether the given path matches this pattern or not.
|
||||
pub fn is_match_candidate(&self, path: &Candidate) -> bool {
|
||||
self.re.is_match(path.path.as_bytes())
|
||||
self.re.is_match(&path.path)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -145,7 +145,7 @@ impl GlobStrategic {
|
||||
|
||||
/// Tests whether the given path matches this pattern or not.
|
||||
fn is_match_candidate(&self, candidate: &Candidate) -> bool {
|
||||
let byte_path = candidate.path.as_bytes();
|
||||
let byte_path = &*candidate.path;
|
||||
|
||||
match self.strategy {
|
||||
MatchStrategy::Literal(ref lit) => lit.as_bytes() == byte_path,
|
||||
@@ -837,66 +837,40 @@ impl<'a> Parser<'a> {
|
||||
|
||||
fn parse_star(&mut self) -> Result<(), Error> {
|
||||
let prev = self.prev;
|
||||
if self.peek() != Some('*') {
|
||||
if self.chars.peek() != Some(&'*') {
|
||||
self.push_token(Token::ZeroOrMore)?;
|
||||
return Ok(());
|
||||
}
|
||||
assert!(self.bump() == Some('*'));
|
||||
if !self.have_tokens()? {
|
||||
if !self.peek().map_or(true, is_separator) {
|
||||
self.push_token(Token::ZeroOrMore)?;
|
||||
self.push_token(Token::ZeroOrMore)?;
|
||||
} else {
|
||||
self.push_token(Token::RecursivePrefix)?;
|
||||
assert!(self.bump().map_or(true, is_separator));
|
||||
self.push_token(Token::RecursivePrefix)?;
|
||||
let next = self.bump();
|
||||
if !next.map(is_separator).unwrap_or(true) {
|
||||
return Err(self.error(ErrorKind::InvalidRecursive));
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.pop_token()?;
|
||||
if !prev.map(is_separator).unwrap_or(false) {
|
||||
if self.stack.len() <= 1
|
||||
|| (prev != Some(',') && prev != Some('{'))
|
||||
{
|
||||
self.push_token(Token::ZeroOrMore)?;
|
||||
self.push_token(Token::ZeroOrMore)?;
|
||||
return Ok(());
|
||||
|| (prev != Some(',') && prev != Some('{')) {
|
||||
return Err(self.error(ErrorKind::InvalidRecursive));
|
||||
}
|
||||
}
|
||||
let is_suffix =
|
||||
match self.peek() {
|
||||
None => {
|
||||
assert!(self.bump().is_none());
|
||||
true
|
||||
}
|
||||
Some(',') | Some('}') if self.stack.len() >= 2 => {
|
||||
true
|
||||
}
|
||||
Some(c) if is_separator(c) => {
|
||||
assert!(self.bump().map(is_separator).unwrap_or(false));
|
||||
false
|
||||
}
|
||||
_ => {
|
||||
self.push_token(Token::ZeroOrMore)?;
|
||||
self.push_token(Token::ZeroOrMore)?;
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
match self.pop_token()? {
|
||||
Token::RecursivePrefix => {
|
||||
self.push_token(Token::RecursivePrefix)?;
|
||||
match self.chars.peek() {
|
||||
None => {
|
||||
assert!(self.bump().is_none());
|
||||
self.push_token(Token::RecursiveSuffix)
|
||||
}
|
||||
Token::RecursiveSuffix => {
|
||||
self.push_token(Token::RecursiveSuffix)?;
|
||||
Some(&',') | Some(&'}') if self.stack.len() >= 2 => {
|
||||
self.push_token(Token::RecursiveSuffix)
|
||||
}
|
||||
_ => {
|
||||
if is_suffix {
|
||||
self.push_token(Token::RecursiveSuffix)?;
|
||||
} else {
|
||||
self.push_token(Token::RecursiveZeroOrMore)?;
|
||||
}
|
||||
Some(&c) if is_separator(c) => {
|
||||
assert!(self.bump().map(is_separator).unwrap_or(false));
|
||||
self.push_token(Token::RecursiveZeroOrMore)
|
||||
}
|
||||
_ => Err(self.error(ErrorKind::InvalidRecursive)),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn parse_class(&mut self) -> Result<(), Error> {
|
||||
@@ -985,10 +959,6 @@ impl<'a> Parser<'a> {
|
||||
self.cur = self.chars.next();
|
||||
self.cur
|
||||
}
|
||||
|
||||
fn peek(&mut self) -> Option<char> {
|
||||
self.chars.peek().map(|&ch| ch)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -1174,6 +1144,13 @@ mod tests {
|
||||
syntax!(cls20, "[^a]", vec![classn('a', 'a')]);
|
||||
syntax!(cls21, "[^a-z]", vec![classn('a', 'z')]);
|
||||
|
||||
syntaxerr!(err_rseq1, "a**", ErrorKind::InvalidRecursive);
|
||||
syntaxerr!(err_rseq2, "**a", ErrorKind::InvalidRecursive);
|
||||
syntaxerr!(err_rseq3, "a**b", ErrorKind::InvalidRecursive);
|
||||
syntaxerr!(err_rseq4, "***", ErrorKind::InvalidRecursive);
|
||||
syntaxerr!(err_rseq5, "/a**", ErrorKind::InvalidRecursive);
|
||||
syntaxerr!(err_rseq6, "/**a", ErrorKind::InvalidRecursive);
|
||||
syntaxerr!(err_rseq7, "/a**b", ErrorKind::InvalidRecursive);
|
||||
syntaxerr!(err_unclosed1, "[", ErrorKind::UnclosedClass);
|
||||
syntaxerr!(err_unclosed2, "[]", ErrorKind::UnclosedClass);
|
||||
syntaxerr!(err_unclosed3, "[!", ErrorKind::UnclosedClass);
|
||||
@@ -1217,30 +1194,8 @@ mod tests {
|
||||
toregex!(re8, "[*]", r"^[\*]$");
|
||||
toregex!(re9, "[+]", r"^[\+]$");
|
||||
toregex!(re10, "+", r"^\+$");
|
||||
toregex!(re11, "☃", r"^\xe2\x98\x83$");
|
||||
toregex!(re12, "**", r"^.*$");
|
||||
toregex!(re13, "**/", r"^.*$");
|
||||
toregex!(re14, "**/*", r"^(?:/?|.*/).*$");
|
||||
toregex!(re15, "**/**", r"^.*$");
|
||||
toregex!(re16, "**/**/*", r"^(?:/?|.*/).*$");
|
||||
toregex!(re17, "**/**/**", r"^.*$");
|
||||
toregex!(re18, "**/**/**/*", r"^(?:/?|.*/).*$");
|
||||
toregex!(re19, "a/**", r"^a(?:/?|/.*)$");
|
||||
toregex!(re20, "a/**/**", r"^a(?:/?|/.*)$");
|
||||
toregex!(re21, "a/**/**/**", r"^a(?:/?|/.*)$");
|
||||
toregex!(re22, "a/**/b", r"^a(?:/|/.*/)b$");
|
||||
toregex!(re23, "a/**/**/b", r"^a(?:/|/.*/)b$");
|
||||
toregex!(re24, "a/**/**/**/b", r"^a(?:/|/.*/)b$");
|
||||
toregex!(re25, "**/b", r"^(?:/?|.*/)b$");
|
||||
toregex!(re26, "**/**/b", r"^(?:/?|.*/)b$");
|
||||
toregex!(re27, "**/**/**/b", r"^(?:/?|.*/)b$");
|
||||
toregex!(re28, "a**", r"^a.*.*$");
|
||||
toregex!(re29, "**a", r"^.*.*a$");
|
||||
toregex!(re30, "a**b", r"^a.*.*b$");
|
||||
toregex!(re31, "***", r"^.*.*.*$");
|
||||
toregex!(re32, "/a**", r"^/a.*.*$");
|
||||
toregex!(re33, "/**a", r"^/.*.*a$");
|
||||
toregex!(re34, "/a**b", r"^/a.*.*b$");
|
||||
toregex!(re11, "**", r"^.*$");
|
||||
toregex!(re12, "☃", r"^\xe2\x98\x83$");
|
||||
|
||||
matches!(match1, "a", "a");
|
||||
matches!(match2, "a*b", "a_b");
|
||||
|
@@ -104,25 +104,27 @@ or to enable case insensitive matching.
|
||||
#![deny(missing_docs)]
|
||||
|
||||
extern crate aho_corasick;
|
||||
extern crate bstr;
|
||||
extern crate fnv;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate memchr;
|
||||
extern crate regex;
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::error::Error as StdError;
|
||||
use std::ffi::OsStr;
|
||||
use std::fmt;
|
||||
use std::hash;
|
||||
use std::path::Path;
|
||||
use std::str;
|
||||
|
||||
use aho_corasick::AhoCorasick;
|
||||
use bstr::{B, BStr, BString};
|
||||
use aho_corasick::{Automaton, AcAutomaton, FullAcAutomaton};
|
||||
use regex::bytes::{Regex, RegexBuilder, RegexSet};
|
||||
|
||||
use pathutil::{file_name, file_name_ext, normalize_path};
|
||||
use pathutil::{
|
||||
file_name, file_name_ext, normalize_path, os_str_bytes, path_bytes,
|
||||
};
|
||||
use glob::MatchStrategy;
|
||||
pub use glob::{Glob, GlobBuilder, GlobMatcher};
|
||||
|
||||
@@ -141,13 +143,8 @@ pub struct Error {
|
||||
/// The kind of error that can occur when parsing a glob pattern.
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub enum ErrorKind {
|
||||
/// **DEPRECATED**.
|
||||
///
|
||||
/// This error used to occur for consistency with git's glob specification,
|
||||
/// but the specification now accepts all uses of `**`. When `**` does not
|
||||
/// appear adjacent to a path separator or at the beginning/end of a glob,
|
||||
/// it is now treated as two consecutive `*` patterns. As such, this error
|
||||
/// is no longer used.
|
||||
/// Occurs when a use of `**` is invalid. Namely, `**` can only appear
|
||||
/// adjacent to a path separator, or the beginning/end of a glob.
|
||||
InvalidRecursive,
|
||||
/// Occurs when a character class (e.g., `[abc]`) is not closed.
|
||||
UnclosedClass,
|
||||
@@ -292,7 +289,6 @@ pub struct GlobSet {
|
||||
|
||||
impl GlobSet {
|
||||
/// Create an empty `GlobSet`. An empty set matches nothing.
|
||||
#[inline]
|
||||
pub fn empty() -> GlobSet {
|
||||
GlobSet {
|
||||
len: 0,
|
||||
@@ -301,13 +297,11 @@ impl GlobSet {
|
||||
}
|
||||
|
||||
/// Returns true if this set is empty, and therefore matches nothing.
|
||||
#[inline]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len == 0
|
||||
}
|
||||
|
||||
/// Returns the number of globs in this set.
|
||||
#[inline]
|
||||
pub fn len(&self) -> usize {
|
||||
self.len
|
||||
}
|
||||
@@ -490,25 +484,24 @@ impl GlobSetBuilder {
|
||||
/// path against multiple globs or sets of globs.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Candidate<'a> {
|
||||
path: Cow<'a, BStr>,
|
||||
basename: Cow<'a, BStr>,
|
||||
ext: Cow<'a, BStr>,
|
||||
path: Cow<'a, [u8]>,
|
||||
basename: Cow<'a, [u8]>,
|
||||
ext: Cow<'a, [u8]>,
|
||||
}
|
||||
|
||||
impl<'a> Candidate<'a> {
|
||||
/// Create a new candidate for matching from the given path.
|
||||
pub fn new<P: AsRef<Path> + ?Sized>(path: &'a P) -> Candidate<'a> {
|
||||
let path = normalize_path(BString::from_path_lossy(path.as_ref()));
|
||||
let basename = file_name(&path).unwrap_or(Cow::Borrowed(B("")));
|
||||
let ext = file_name_ext(&basename).unwrap_or(Cow::Borrowed(B("")));
|
||||
let path = path.as_ref();
|
||||
let basename = file_name(path).unwrap_or(OsStr::new(""));
|
||||
Candidate {
|
||||
path: path,
|
||||
basename: basename,
|
||||
ext: ext,
|
||||
path: normalize_path(path_bytes(path)),
|
||||
basename: os_str_bytes(basename),
|
||||
ext: file_name_ext(basename).unwrap_or(Cow::Borrowed(b"")),
|
||||
}
|
||||
}
|
||||
|
||||
fn path_prefix(&self, max: usize) -> &BStr {
|
||||
fn path_prefix(&self, max: usize) -> &[u8] {
|
||||
if self.path.len() <= max {
|
||||
&*self.path
|
||||
} else {
|
||||
@@ -516,7 +509,7 @@ impl<'a> Candidate<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
fn path_suffix(&self, max: usize) -> &BStr {
|
||||
fn path_suffix(&self, max: usize) -> &[u8] {
|
||||
if self.path.len() <= max {
|
||||
&*self.path
|
||||
} else {
|
||||
@@ -577,12 +570,12 @@ impl LiteralStrategy {
|
||||
}
|
||||
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
self.0.contains_key(candidate.path.as_bytes())
|
||||
self.0.contains_key(&*candidate.path)
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
if let Some(hits) = self.0.get(candidate.path.as_bytes()) {
|
||||
if let Some(hits) = self.0.get(&*candidate.path) {
|
||||
matches.extend(hits);
|
||||
}
|
||||
}
|
||||
@@ -604,7 +597,7 @@ impl BasenameLiteralStrategy {
|
||||
if candidate.basename.is_empty() {
|
||||
return false;
|
||||
}
|
||||
self.0.contains_key(candidate.basename.as_bytes())
|
||||
self.0.contains_key(&*candidate.basename)
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
@@ -612,7 +605,7 @@ impl BasenameLiteralStrategy {
|
||||
if candidate.basename.is_empty() {
|
||||
return;
|
||||
}
|
||||
if let Some(hits) = self.0.get(candidate.basename.as_bytes()) {
|
||||
if let Some(hits) = self.0.get(&*candidate.basename) {
|
||||
matches.extend(hits);
|
||||
}
|
||||
}
|
||||
@@ -634,7 +627,7 @@ impl ExtensionStrategy {
|
||||
if candidate.ext.is_empty() {
|
||||
return false;
|
||||
}
|
||||
self.0.contains_key(candidate.ext.as_bytes())
|
||||
self.0.contains_key(&*candidate.ext)
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
@@ -642,7 +635,7 @@ impl ExtensionStrategy {
|
||||
if candidate.ext.is_empty() {
|
||||
return;
|
||||
}
|
||||
if let Some(hits) = self.0.get(candidate.ext.as_bytes()) {
|
||||
if let Some(hits) = self.0.get(&*candidate.ext) {
|
||||
matches.extend(hits);
|
||||
}
|
||||
}
|
||||
@@ -650,7 +643,7 @@ impl ExtensionStrategy {
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct PrefixStrategy {
|
||||
matcher: AhoCorasick,
|
||||
matcher: FullAcAutomaton<Vec<u8>>,
|
||||
map: Vec<usize>,
|
||||
longest: usize,
|
||||
}
|
||||
@@ -658,8 +651,8 @@ struct PrefixStrategy {
|
||||
impl PrefixStrategy {
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
let path = candidate.path_prefix(self.longest);
|
||||
for m in self.matcher.find_overlapping_iter(path) {
|
||||
if m.start() == 0 {
|
||||
for m in self.matcher.find_overlapping(path) {
|
||||
if m.start == 0 {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -668,9 +661,9 @@ impl PrefixStrategy {
|
||||
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
let path = candidate.path_prefix(self.longest);
|
||||
for m in self.matcher.find_overlapping_iter(path) {
|
||||
if m.start() == 0 {
|
||||
matches.push(self.map[m.pattern()]);
|
||||
for m in self.matcher.find_overlapping(path) {
|
||||
if m.start == 0 {
|
||||
matches.push(self.map[m.pati]);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -678,7 +671,7 @@ impl PrefixStrategy {
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct SuffixStrategy {
|
||||
matcher: AhoCorasick,
|
||||
matcher: FullAcAutomaton<Vec<u8>>,
|
||||
map: Vec<usize>,
|
||||
longest: usize,
|
||||
}
|
||||
@@ -686,8 +679,8 @@ struct SuffixStrategy {
|
||||
impl SuffixStrategy {
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
let path = candidate.path_suffix(self.longest);
|
||||
for m in self.matcher.find_overlapping_iter(path) {
|
||||
if m.end() == path.len() {
|
||||
for m in self.matcher.find_overlapping(path) {
|
||||
if m.end == path.len() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -696,9 +689,9 @@ impl SuffixStrategy {
|
||||
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
let path = candidate.path_suffix(self.longest);
|
||||
for m in self.matcher.find_overlapping_iter(path) {
|
||||
if m.end() == path.len() {
|
||||
matches.push(self.map[m.pattern()]);
|
||||
for m in self.matcher.find_overlapping(path) {
|
||||
if m.end == path.len() {
|
||||
matches.push(self.map[m.pati]);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -712,11 +705,11 @@ impl RequiredExtensionStrategy {
|
||||
if candidate.ext.is_empty() {
|
||||
return false;
|
||||
}
|
||||
match self.0.get(candidate.ext.as_bytes()) {
|
||||
match self.0.get(&*candidate.ext) {
|
||||
None => false,
|
||||
Some(regexes) => {
|
||||
for &(_, ref re) in regexes {
|
||||
if re.is_match(candidate.path.as_bytes()) {
|
||||
if re.is_match(&*candidate.path) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -730,9 +723,9 @@ impl RequiredExtensionStrategy {
|
||||
if candidate.ext.is_empty() {
|
||||
return;
|
||||
}
|
||||
if let Some(regexes) = self.0.get(candidate.ext.as_bytes()) {
|
||||
if let Some(regexes) = self.0.get(&*candidate.ext) {
|
||||
for &(global_index, ref re) in regexes {
|
||||
if re.is_match(candidate.path.as_bytes()) {
|
||||
if re.is_match(&*candidate.path) {
|
||||
matches.push(global_index);
|
||||
}
|
||||
}
|
||||
@@ -748,11 +741,11 @@ struct RegexSetStrategy {
|
||||
|
||||
impl RegexSetStrategy {
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
self.matcher.is_match(candidate.path.as_bytes())
|
||||
self.matcher.is_match(&*candidate.path)
|
||||
}
|
||||
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
for i in self.matcher.matches(candidate.path.as_bytes()) {
|
||||
for i in self.matcher.matches(&*candidate.path) {
|
||||
matches.push(self.map[i]);
|
||||
}
|
||||
}
|
||||
@@ -783,16 +776,18 @@ impl MultiStrategyBuilder {
|
||||
}
|
||||
|
||||
fn prefix(self) -> PrefixStrategy {
|
||||
let it = self.literals.into_iter().map(|s| s.into_bytes());
|
||||
PrefixStrategy {
|
||||
matcher: AhoCorasick::new_auto_configured(&self.literals),
|
||||
matcher: AcAutomaton::new(it).into_full(),
|
||||
map: self.map,
|
||||
longest: self.longest,
|
||||
}
|
||||
}
|
||||
|
||||
fn suffix(self) -> SuffixStrategy {
|
||||
let it = self.literals.into_iter().map(|s| s.into_bytes());
|
||||
SuffixStrategy {
|
||||
matcher: AhoCorasick::new_auto_configured(&self.literals),
|
||||
matcher: AcAutomaton::new(it).into_full(),
|
||||
map: self.map,
|
||||
longest: self.longest,
|
||||
}
|
||||
|
@@ -1,26 +1,41 @@
|
||||
use std::borrow::Cow;
|
||||
|
||||
use bstr::BStr;
|
||||
use std::ffi::OsStr;
|
||||
use std::path::Path;
|
||||
|
||||
/// The final component of the path, if it is a normal file.
|
||||
///
|
||||
/// If the path terminates in ., .., or consists solely of a root of prefix,
|
||||
/// file_name will return None.
|
||||
pub fn file_name<'a>(path: &Cow<'a, BStr>) -> Option<Cow<'a, BStr>> {
|
||||
#[cfg(unix)]
|
||||
pub fn file_name<'a, P: AsRef<Path> + ?Sized>(
|
||||
path: &'a P,
|
||||
) -> Option<&'a OsStr> {
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use memchr::memrchr;
|
||||
|
||||
let path = path.as_ref().as_os_str().as_bytes();
|
||||
if path.is_empty() {
|
||||
return None;
|
||||
} else if path.last() == Some(b'.') {
|
||||
} else if path.len() == 1 && path[0] == b'.' {
|
||||
return None;
|
||||
} else if path.last() == Some(&b'.') {
|
||||
return None;
|
||||
} else if path.len() >= 2 && &path[path.len() - 2..] == &b".."[..] {
|
||||
return None;
|
||||
}
|
||||
let last_slash = path.rfind_byte(b'/').map(|i| i + 1).unwrap_or(0);
|
||||
Some(match *path {
|
||||
Cow::Borrowed(path) => Cow::Borrowed(&path[last_slash..]),
|
||||
Cow::Owned(ref path) => {
|
||||
let mut path = path.clone();
|
||||
path.drain_bytes(..last_slash);
|
||||
Cow::Owned(path)
|
||||
}
|
||||
})
|
||||
let last_slash = memrchr(b'/', path).map(|i| i + 1).unwrap_or(0);
|
||||
Some(OsStr::from_bytes(&path[last_slash..]))
|
||||
}
|
||||
|
||||
/// The final component of the path, if it is a normal file.
|
||||
///
|
||||
/// If the path terminates in ., .., or consists solely of a root of prefix,
|
||||
/// file_name will return None.
|
||||
#[cfg(not(unix))]
|
||||
pub fn file_name<'a, P: AsRef<Path> + ?Sized>(
|
||||
path: &'a P,
|
||||
) -> Option<&'a OsStr> {
|
||||
path.as_ref().file_name()
|
||||
}
|
||||
|
||||
/// Return a file extension given a path's file name.
|
||||
@@ -39,28 +54,59 @@ pub fn file_name<'a>(path: &Cow<'a, BStr>) -> Option<Cow<'a, BStr>> {
|
||||
/// a pattern like `*.rs` is obviously trying to match files with a `rs`
|
||||
/// extension, but it also matches files like `.rs`, which doesn't have an
|
||||
/// extension according to std::path::Path::extension.
|
||||
pub fn file_name_ext<'a>(name: &Cow<'a, BStr>) -> Option<Cow<'a, BStr>> {
|
||||
pub fn file_name_ext(name: &OsStr) -> Option<Cow<[u8]>> {
|
||||
if name.is_empty() {
|
||||
return None;
|
||||
}
|
||||
let last_dot_at = match name.rfind_byte(b'.') {
|
||||
None => return None,
|
||||
Some(i) => i,
|
||||
let name = os_str_bytes(name);
|
||||
let last_dot_at = {
|
||||
let result = name
|
||||
.iter().enumerate().rev()
|
||||
.find(|&(_, &b)| b == b'.')
|
||||
.map(|(i, _)| i);
|
||||
match result {
|
||||
None => return None,
|
||||
Some(i) => i,
|
||||
}
|
||||
};
|
||||
Some(match *name {
|
||||
Some(match name {
|
||||
Cow::Borrowed(name) => Cow::Borrowed(&name[last_dot_at..]),
|
||||
Cow::Owned(ref name) => {
|
||||
let mut name = name.clone();
|
||||
name.drain_bytes(..last_dot_at);
|
||||
Cow::Owned(mut name) => {
|
||||
name.drain(..last_dot_at);
|
||||
Cow::Owned(name)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Return raw bytes of a path, transcoded to UTF-8 if necessary.
|
||||
pub fn path_bytes(path: &Path) -> Cow<[u8]> {
|
||||
os_str_bytes(path.as_os_str())
|
||||
}
|
||||
|
||||
/// Return the raw bytes of the given OS string, possibly transcoded to UTF-8.
|
||||
#[cfg(unix)]
|
||||
pub fn os_str_bytes(s: &OsStr) -> Cow<[u8]> {
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
Cow::Borrowed(s.as_bytes())
|
||||
}
|
||||
|
||||
/// Return the raw bytes of the given OS string, possibly transcoded to UTF-8.
|
||||
#[cfg(not(unix))]
|
||||
pub fn os_str_bytes(s: &OsStr) -> Cow<[u8]> {
|
||||
// TODO(burntsushi): On Windows, OS strings are WTF-8, which is a superset
|
||||
// of UTF-8, so even if we could get at the raw bytes, they wouldn't
|
||||
// be useful. We *must* convert to UTF-8 before doing path matching.
|
||||
// Unfortunate, but necessary.
|
||||
match s.to_string_lossy() {
|
||||
Cow::Owned(s) => Cow::Owned(s.into_bytes()),
|
||||
Cow::Borrowed(s) => Cow::Borrowed(s.as_bytes()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Normalizes a path to use `/` as a separator everywhere, even on platforms
|
||||
/// that recognize other characters as separators.
|
||||
#[cfg(unix)]
|
||||
pub fn normalize_path(path: Cow<BStr>) -> Cow<BStr> {
|
||||
pub fn normalize_path(path: Cow<[u8]>) -> Cow<[u8]> {
|
||||
// UNIX only uses /, so we're good.
|
||||
path
|
||||
}
|
||||
@@ -68,7 +114,7 @@ pub fn normalize_path(path: Cow<BStr>) -> Cow<BStr> {
|
||||
/// Normalizes a path to use `/` as a separator everywhere, even on platforms
|
||||
/// that recognize other characters as separators.
|
||||
#[cfg(not(unix))]
|
||||
pub fn normalize_path(mut path: Cow<BStr>) -> Cow<BStr> {
|
||||
pub fn normalize_path(mut path: Cow<[u8]>) -> Cow<[u8]> {
|
||||
use std::path::is_separator;
|
||||
|
||||
for i in 0..path.len() {
|
||||
@@ -83,8 +129,7 @@ pub fn normalize_path(mut path: Cow<BStr>) -> Cow<BStr> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::borrow::Cow;
|
||||
|
||||
use bstr::{B, BString};
|
||||
use std::ffi::OsStr;
|
||||
|
||||
use super::{file_name_ext, normalize_path};
|
||||
|
||||
@@ -92,9 +137,8 @@ mod tests {
|
||||
($name:ident, $file_name:expr, $ext:expr) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let bs = BString::from($file_name);
|
||||
let got = file_name_ext(&Cow::Owned(bs));
|
||||
assert_eq!($ext.map(|s| Cow::Borrowed(B(s))), got);
|
||||
let got = file_name_ext(OsStr::new($file_name));
|
||||
assert_eq!($ext.map(|s| Cow::Borrowed(s.as_bytes())), got);
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -109,8 +153,7 @@ mod tests {
|
||||
($name:ident, $path:expr, $expected:expr) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let bs = BString::from_slice($path);
|
||||
let got = normalize_path(Cow::Owned(bs));
|
||||
let got = normalize_path(Cow::Owned($path.to_vec()));
|
||||
assert_eq!($expected.to_vec(), got.into_owned());
|
||||
}
|
||||
};
|
||||
|
@@ -14,7 +14,6 @@ license = "Unlicense/MIT"
|
||||
|
||||
[dependencies]
|
||||
atty = "0.2.11"
|
||||
bstr = "0.1.2"
|
||||
globset = { version = "0.4.2", path = "../globset" }
|
||||
lazy_static = "1.1.0"
|
||||
log = "0.4.5"
|
||||
|
@@ -352,8 +352,6 @@ fn default_decompression_commands() -> Vec<DecompressionCommand> {
|
||||
const ARGS_XZ: &[&str] = &["xz", "-d", "-c"];
|
||||
const ARGS_LZ4: &[&str] = &["lz4", "-d", "-c"];
|
||||
const ARGS_LZMA: &[&str] = &["xz", "--format=lzma", "-d", "-c"];
|
||||
const ARGS_BROTLI: &[&str] = &["brotli", "-d", "-c"];
|
||||
const ARGS_ZSTD: &[&str] = &["zstd", "-q", "-d", "-c"];
|
||||
|
||||
fn cmd(glob: &str, args: &[&str]) -> DecompressionCommand {
|
||||
DecompressionCommand {
|
||||
@@ -369,14 +367,15 @@ fn default_decompression_commands() -> Vec<DecompressionCommand> {
|
||||
vec![
|
||||
cmd("*.gz", ARGS_GZIP),
|
||||
cmd("*.tgz", ARGS_GZIP),
|
||||
|
||||
cmd("*.bz2", ARGS_BZIP),
|
||||
cmd("*.tbz2", ARGS_BZIP),
|
||||
|
||||
cmd("*.xz", ARGS_XZ),
|
||||
cmd("*.txz", ARGS_XZ),
|
||||
|
||||
cmd("*.lz4", ARGS_LZ4),
|
||||
|
||||
cmd("*.lzma", ARGS_LZMA),
|
||||
cmd("*.br", ARGS_BROTLI),
|
||||
cmd("*.zst", ARGS_ZSTD),
|
||||
cmd("*.zstd", ARGS_ZSTD),
|
||||
]
|
||||
}
|
||||
|
@@ -1,8 +1,6 @@
|
||||
use std::ffi::OsStr;
|
||||
use std::str;
|
||||
|
||||
use bstr::{BStr, BString};
|
||||
|
||||
/// A single state in the state machine used by `unescape`.
|
||||
#[derive(Clone, Copy, Eq, PartialEq)]
|
||||
enum State {
|
||||
@@ -37,16 +35,18 @@ enum State {
|
||||
///
|
||||
/// assert_eq!(r"foo\nbar\xFFbaz", escape(b"foo\nbar\xFFbaz"));
|
||||
/// ```
|
||||
pub fn escape(bytes: &[u8]) -> String {
|
||||
let bytes = BStr::new(bytes);
|
||||
pub fn escape(mut bytes: &[u8]) -> String {
|
||||
let mut escaped = String::new();
|
||||
for (s, e, ch) in bytes.char_indices() {
|
||||
if ch == '\u{FFFD}' {
|
||||
for b in bytes[s..e].bytes() {
|
||||
escape_byte(b, &mut escaped);
|
||||
while let Some(result) = decode_utf8(bytes) {
|
||||
match result {
|
||||
Ok(cp) => {
|
||||
escape_char(cp, &mut escaped);
|
||||
bytes = &bytes[cp.len_utf8()..];
|
||||
}
|
||||
Err(byte) => {
|
||||
escape_byte(byte, &mut escaped);
|
||||
bytes = &bytes[1..];
|
||||
}
|
||||
} else {
|
||||
escape_char(ch, &mut escaped);
|
||||
}
|
||||
}
|
||||
escaped
|
||||
@@ -56,7 +56,19 @@ pub fn escape(bytes: &[u8]) -> String {
|
||||
///
|
||||
/// This is like [`escape`](fn.escape.html), but accepts an OS string.
|
||||
pub fn escape_os(string: &OsStr) -> String {
|
||||
escape(BString::from_os_str_lossy(string).as_bytes())
|
||||
#[cfg(unix)]
|
||||
fn imp(string: &OsStr) -> String {
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
escape(string.as_bytes())
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
fn imp(string: &OsStr) -> String {
|
||||
escape(string.to_string_lossy().as_bytes())
|
||||
}
|
||||
|
||||
imp(string)
|
||||
}
|
||||
|
||||
/// Unescapes a string.
|
||||
@@ -183,6 +195,46 @@ fn escape_byte(byte: u8, into: &mut String) {
|
||||
}
|
||||
}
|
||||
|
||||
/// Decodes the next UTF-8 encoded codepoint from the given byte slice.
|
||||
///
|
||||
/// If no valid encoding of a codepoint exists at the beginning of the given
|
||||
/// byte slice, then the first byte is returned instead.
|
||||
///
|
||||
/// This returns `None` if and only if `bytes` is empty.
|
||||
fn decode_utf8(bytes: &[u8]) -> Option<Result<char, u8>> {
|
||||
if bytes.is_empty() {
|
||||
return None;
|
||||
}
|
||||
let len = match utf8_len(bytes[0]) {
|
||||
None => return Some(Err(bytes[0])),
|
||||
Some(len) if len > bytes.len() => return Some(Err(bytes[0])),
|
||||
Some(len) => len,
|
||||
};
|
||||
match str::from_utf8(&bytes[..len]) {
|
||||
Ok(s) => Some(Ok(s.chars().next().unwrap())),
|
||||
Err(_) => Some(Err(bytes[0])),
|
||||
}
|
||||
}
|
||||
|
||||
/// Given a UTF-8 leading byte, this returns the total number of code units
|
||||
/// in the following encoded codepoint.
|
||||
///
|
||||
/// If the given byte is not a valid UTF-8 leading byte, then this returns
|
||||
/// `None`.
|
||||
fn utf8_len(byte: u8) -> Option<usize> {
|
||||
if byte <= 0x7F {
|
||||
Some(1)
|
||||
} else if byte <= 0b110_11111 {
|
||||
Some(2)
|
||||
} else if byte <= 0b1110_1111 {
|
||||
Some(3)
|
||||
} else if byte <= 0b1111_0111 {
|
||||
Some(4)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{escape, unescape};
|
||||
|
@@ -159,7 +159,6 @@ error message is crafted that typically tells the user how to fix the problem.
|
||||
#![deny(missing_docs)]
|
||||
|
||||
extern crate atty;
|
||||
extern crate bstr;
|
||||
extern crate globset;
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
@@ -13,8 +13,11 @@ keywords = ["regex", "pattern", "trait"]
|
||||
license = "Unlicense/MIT"
|
||||
autotests = false
|
||||
|
||||
[dependencies]
|
||||
memchr = "2.1"
|
||||
[dependencies.bstr]
|
||||
version = "*"
|
||||
path = "/home/andrew/rust/bstr"
|
||||
default-features = false
|
||||
features = ["std"]
|
||||
|
||||
[dev-dependencies]
|
||||
regex = "1.1"
|
||||
|
@@ -1,6 +1,6 @@
|
||||
use std::str;
|
||||
|
||||
use memchr::memchr;
|
||||
use bstr::B;
|
||||
|
||||
/// Interpolate capture references in `replacement` and write the interpolation
|
||||
/// result to `dst`. References in `replacement` take the form of $N or $name,
|
||||
@@ -22,7 +22,7 @@ pub fn interpolate<A, N>(
|
||||
N: FnMut(&str) -> Option<usize>
|
||||
{
|
||||
while !replacement.is_empty() {
|
||||
match memchr(b'$', replacement) {
|
||||
match B(replacement).find_byte(b'$') {
|
||||
None => break,
|
||||
Some(i) => {
|
||||
dst.extend(&replacement[..i]);
|
||||
|
@@ -38,13 +38,15 @@ implementations.
|
||||
|
||||
#![deny(missing_docs)]
|
||||
|
||||
extern crate memchr;
|
||||
extern crate bstr;
|
||||
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
use std::ops;
|
||||
use std::u64;
|
||||
|
||||
use bstr::BStr;
|
||||
|
||||
use interpolate::interpolate;
|
||||
|
||||
mod interpolate;
|
||||
@@ -180,6 +182,22 @@ impl ops::IndexMut<Match> for [u8] {
|
||||
}
|
||||
}
|
||||
|
||||
impl ops::Index<Match> for BStr {
|
||||
type Output = BStr;
|
||||
|
||||
#[inline]
|
||||
fn index(&self, index: Match) -> &BStr {
|
||||
&self[index.start..index.end]
|
||||
}
|
||||
}
|
||||
|
||||
impl ops::IndexMut<Match> for BStr {
|
||||
#[inline]
|
||||
fn index_mut(&mut self, index: Match) -> &mut BStr {
|
||||
&mut self[index.start..index.end]
|
||||
}
|
||||
}
|
||||
|
||||
impl ops::Index<Match> for str {
|
||||
type Output = str;
|
||||
|
||||
|
@@ -19,7 +19,6 @@ serde1 = ["base64", "serde", "serde_derive", "serde_json"]
|
||||
|
||||
[dependencies]
|
||||
base64 = { version = "0.10.0", optional = true }
|
||||
bstr = "0.1.2"
|
||||
grep-matcher = { version = "0.1.1", path = "../grep-matcher" }
|
||||
grep-searcher = { version = "0.1.1", path = "../grep-searcher" }
|
||||
termcolor = "1.0.4"
|
||||
|
@@ -817,8 +817,7 @@ impl<'a> SubMatches<'a> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use grep_regex::{RegexMatcher, RegexMatcherBuilder};
|
||||
use grep_matcher::LineTerminator;
|
||||
use grep_regex::RegexMatcher;
|
||||
use grep_searcher::SearcherBuilder;
|
||||
|
||||
use super::{JSON, JSONBuilder};
|
||||
@@ -919,45 +918,4 @@ and exhibited clearly, with a label attached.\
|
||||
assert_eq!(got.lines().count(), 2);
|
||||
assert!(got.contains("begin") && got.contains("end"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn missing_crlf() {
|
||||
let haystack = "test\r\n".as_bytes();
|
||||
|
||||
let matcher = RegexMatcherBuilder::new()
|
||||
.build("test")
|
||||
.unwrap();
|
||||
let mut printer = JSONBuilder::new()
|
||||
.build(vec![]);
|
||||
SearcherBuilder::new()
|
||||
.build()
|
||||
.search_reader(&matcher, haystack, printer.sink(&matcher))
|
||||
.unwrap();
|
||||
let got = printer_contents(&mut printer);
|
||||
assert_eq!(got.lines().count(), 3);
|
||||
assert!(
|
||||
got.lines().nth(1).unwrap().contains(r"test\r\n"),
|
||||
r"missing 'test\r\n' in '{}'",
|
||||
got.lines().nth(1).unwrap(),
|
||||
);
|
||||
|
||||
let matcher = RegexMatcherBuilder::new()
|
||||
.crlf(true)
|
||||
.build("test")
|
||||
.unwrap();
|
||||
let mut printer = JSONBuilder::new()
|
||||
.build(vec![]);
|
||||
SearcherBuilder::new()
|
||||
.line_terminator(LineTerminator::crlf())
|
||||
.build()
|
||||
.search_reader(&matcher, haystack, printer.sink(&matcher))
|
||||
.unwrap();
|
||||
let got = printer_contents(&mut printer);
|
||||
assert_eq!(got.lines().count(), 3);
|
||||
assert!(
|
||||
got.lines().nth(1).unwrap().contains(r"test\r\n"),
|
||||
r"missing 'test\r\n' in '{}'",
|
||||
got.lines().nth(1).unwrap(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@@ -70,7 +70,6 @@ fn example() -> Result<(), Box<Error>> {
|
||||
|
||||
#[cfg(feature = "serde1")]
|
||||
extern crate base64;
|
||||
extern crate bstr;
|
||||
extern crate grep_matcher;
|
||||
#[cfg(test)]
|
||||
extern crate grep_regex;
|
||||
|
@@ -403,7 +403,7 @@ impl<W: WriteColor> Summary<W> {
|
||||
where M: Matcher,
|
||||
P: ?Sized + AsRef<Path>,
|
||||
{
|
||||
if !self.config.path && !self.config.kind.requires_path() {
|
||||
if !self.config.path {
|
||||
return self.sink(matcher);
|
||||
}
|
||||
let stats =
|
||||
@@ -477,10 +477,7 @@ impl<'p, 's, M: Matcher, W: WriteColor> SummarySink<'p, 's, M, W> {
|
||||
/// This is unaffected by the result of searches before the previous
|
||||
/// search.
|
||||
pub fn has_match(&self) -> bool {
|
||||
match self.summary.config.kind {
|
||||
SummaryKind::PathWithoutMatch => self.match_count == 0,
|
||||
_ => self.match_count > 0,
|
||||
}
|
||||
self.match_count > 0
|
||||
}
|
||||
|
||||
/// If binary data was found in the previous search, this returns the
|
||||
|
@@ -4,7 +4,6 @@ use std::io;
|
||||
use std::path::Path;
|
||||
use std::time;
|
||||
|
||||
use bstr::{BStr, BString};
|
||||
use grep_matcher::{Captures, LineTerminator, Match, Matcher};
|
||||
use grep_searcher::{
|
||||
LineIter,
|
||||
@@ -263,12 +262,26 @@ impl<'a> Sunk<'a> {
|
||||
/// portability with a small cost: on Windows, paths that are not valid UTF-16
|
||||
/// will not roundtrip correctly.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PrinterPath<'a>(Cow<'a, BStr>);
|
||||
pub struct PrinterPath<'a>(Cow<'a, [u8]>);
|
||||
|
||||
impl<'a> PrinterPath<'a> {
|
||||
/// Create a new path suitable for printing.
|
||||
pub fn new(path: &'a Path) -> PrinterPath<'a> {
|
||||
PrinterPath(BString::from_path_lossy(path))
|
||||
PrinterPath::new_impl(path)
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn new_impl(path: &'a Path) -> PrinterPath<'a> {
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
PrinterPath(Cow::Borrowed(path.as_os_str().as_bytes()))
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
fn new_impl(path: &'a Path) -> PrinterPath<'a> {
|
||||
PrinterPath(match path.to_string_lossy() {
|
||||
Cow::Owned(path) => Cow::Owned(path.into_bytes()),
|
||||
Cow::Borrowed(path) => Cow::Borrowed(path.as_bytes()),
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a new printer path from the given path which can be efficiently
|
||||
@@ -289,7 +302,7 @@ impl<'a> PrinterPath<'a> {
|
||||
/// path separators that are both replaced by `new_sep`. In all other
|
||||
/// environments, only `/` is treated as a path separator.
|
||||
fn replace_separator(&mut self, new_sep: u8) {
|
||||
let transformed_path: BString = self.0.bytes().map(|b| {
|
||||
let transformed_path: Vec<_> = self.as_bytes().iter().map(|&b| {
|
||||
if b == b'/' || (cfg!(windows) && b == b'\\') {
|
||||
new_sep
|
||||
} else {
|
||||
@@ -301,7 +314,7 @@ impl<'a> PrinterPath<'a> {
|
||||
|
||||
/// Return the raw bytes for this path.
|
||||
pub fn as_bytes(&self) -> &[u8] {
|
||||
self.0.as_bytes()
|
||||
&*self.0
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "grep-regex"
|
||||
version = "0.1.2" #:version
|
||||
version = "0.1.1" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
Use Rust's regex library with the 'grep' crate.
|
||||
@@ -16,6 +16,6 @@ license = "Unlicense/MIT"
|
||||
log = "0.4.5"
|
||||
grep-matcher = { version = "0.1.1", path = "../grep-matcher" }
|
||||
regex = "1.1"
|
||||
regex-syntax = "0.6.5"
|
||||
regex-syntax = "0.6.4"
|
||||
thread_local = "0.3.6"
|
||||
utf8-ranges = "1.0.1"
|
||||
|
@@ -160,14 +160,6 @@ impl ConfiguredHIR {
|
||||
non_matching_bytes(&self.expr)
|
||||
}
|
||||
|
||||
/// Returns true if and only if this regex needs to have its match offsets
|
||||
/// tweaked because of CRLF support. Specifically, this occurs when the
|
||||
/// CRLF hack is enabled and the regex is line anchored at the end. In
|
||||
/// this case, matches that end with a `\r` have the `\r` stripped.
|
||||
pub fn needs_crlf_stripped(&self) -> bool {
|
||||
self.config.crlf && self.expr.is_line_anchored_end()
|
||||
}
|
||||
|
||||
/// Builds a regular expression from this HIR expression.
|
||||
pub fn regex(&self) -> Result<Regex, Error> {
|
||||
self.pattern_to_regex(&self.expr.to_string())
|
||||
@@ -207,7 +199,7 @@ impl ConfiguredHIR {
|
||||
if self.config.line_terminator.is_none() {
|
||||
return Ok(None);
|
||||
}
|
||||
match LiteralSets::new(&self.expr).one_regex(self.config.word) {
|
||||
match LiteralSets::new(&self.expr).one_regex() {
|
||||
None => Ok(None),
|
||||
Some(pattern) => self.pattern_to_regex(&pattern).map(Some),
|
||||
}
|
||||
|
@@ -1,110 +1,5 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use grep_matcher::{Match, Matcher, NoError};
|
||||
use regex::bytes::Regex;
|
||||
use regex_syntax::hir::{self, Hir, HirKind};
|
||||
|
||||
use config::ConfiguredHIR;
|
||||
use error::Error;
|
||||
use matcher::RegexCaptures;
|
||||
|
||||
/// A matcher for implementing "word match" semantics.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct CRLFMatcher {
|
||||
/// The regex.
|
||||
regex: Regex,
|
||||
/// A map from capture group name to capture group index.
|
||||
names: HashMap<String, usize>,
|
||||
}
|
||||
|
||||
impl CRLFMatcher {
|
||||
/// Create a new matcher from the given pattern that strips `\r` from the
|
||||
/// end of every match.
|
||||
///
|
||||
/// This panics if the given expression doesn't need its CRLF stripped.
|
||||
pub fn new(expr: &ConfiguredHIR) -> Result<CRLFMatcher, Error> {
|
||||
assert!(expr.needs_crlf_stripped());
|
||||
|
||||
let regex = expr.regex()?;
|
||||
let mut names = HashMap::new();
|
||||
for (i, optional_name) in regex.capture_names().enumerate() {
|
||||
if let Some(name) = optional_name {
|
||||
names.insert(name.to_string(), i.checked_sub(1).unwrap());
|
||||
}
|
||||
}
|
||||
Ok(CRLFMatcher { regex, names })
|
||||
}
|
||||
|
||||
/// Return the underlying regex used by this matcher.
|
||||
pub fn regex(&self) -> &Regex {
|
||||
&self.regex
|
||||
}
|
||||
}
|
||||
|
||||
impl Matcher for CRLFMatcher {
|
||||
type Captures = RegexCaptures;
|
||||
type Error = NoError;
|
||||
|
||||
fn find_at(
|
||||
&self,
|
||||
haystack: &[u8],
|
||||
at: usize,
|
||||
) -> Result<Option<Match>, NoError> {
|
||||
let m = match self.regex.find_at(haystack, at) {
|
||||
None => return Ok(None),
|
||||
Some(m) => Match::new(m.start(), m.end()),
|
||||
};
|
||||
Ok(Some(adjust_match(haystack, m)))
|
||||
}
|
||||
|
||||
fn new_captures(&self) -> Result<RegexCaptures, NoError> {
|
||||
Ok(RegexCaptures::new(self.regex.capture_locations()))
|
||||
}
|
||||
|
||||
fn capture_count(&self) -> usize {
|
||||
self.regex.captures_len().checked_sub(1).unwrap()
|
||||
}
|
||||
|
||||
fn capture_index(&self, name: &str) -> Option<usize> {
|
||||
self.names.get(name).map(|i| *i)
|
||||
}
|
||||
|
||||
fn captures_at(
|
||||
&self,
|
||||
haystack: &[u8],
|
||||
at: usize,
|
||||
caps: &mut RegexCaptures,
|
||||
) -> Result<bool, NoError> {
|
||||
caps.strip_crlf(false);
|
||||
let r = self.regex.captures_read_at(caps.locations(), haystack, at);
|
||||
if !r.is_some() {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// If the end of our match includes a `\r`, then strip it from all
|
||||
// capture groups ending at the same location.
|
||||
let end = caps.locations().get(0).unwrap().1;
|
||||
if end > 0 && haystack.get(end - 1) == Some(&b'\r') {
|
||||
caps.strip_crlf(true);
|
||||
}
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
// We specifically do not implement other methods like find_iter or
|
||||
// captures_iter. Namely, the iter methods are guaranteed to be correct
|
||||
// by virtue of implementing find_at and captures_at above.
|
||||
}
|
||||
|
||||
/// If the given match ends with a `\r`, then return a new match that ends
|
||||
/// immediately before the `\r`.
|
||||
pub fn adjust_match(haystack: &[u8], m: Match) -> Match {
|
||||
if m.end() > 0 && haystack.get(m.end() - 1) == Some(&b'\r') {
|
||||
m.with_end(m.end() - 1)
|
||||
} else {
|
||||
m
|
||||
}
|
||||
}
|
||||
|
||||
/// Substitutes all occurrences of multi-line enabled `$` with `(?:\r?$)`.
|
||||
///
|
||||
/// This does not preserve the exact semantics of the given expression,
|
||||
|
@@ -47,23 +47,18 @@ impl LiteralSets {
|
||||
/// generated these literal sets. The idea here is that the pattern
|
||||
/// returned by this method is much cheaper to search for. i.e., It is
|
||||
/// usually a single literal or an alternation of literals.
|
||||
pub fn one_regex(&self, word: bool) -> Option<String> {
|
||||
pub fn one_regex(&self) -> Option<String> {
|
||||
// TODO: The logic in this function is basically inscrutable. It grew
|
||||
// organically in the old grep 0.1 crate. Ideally, it would be
|
||||
// re-worked. In fact, the entire inner literal extraction should be
|
||||
// re-worked. Actually, most of regex-syntax's literal extraction
|
||||
// should also be re-worked. Alas... only so much time in the day.
|
||||
|
||||
if !word {
|
||||
if self.prefixes.all_complete() && !self.prefixes.is_empty() {
|
||||
debug!("literal prefixes detected: {:?}", self.prefixes);
|
||||
// When this is true, the regex engine will do a literal scan,
|
||||
// so we don't need to return anything. But we only do this
|
||||
// if we aren't doing a word regex, since a word regex adds
|
||||
// a `(?:\W|^)` to the beginning of the regex, thereby
|
||||
// defeating the regex engine's literal detection.
|
||||
return None;
|
||||
}
|
||||
if self.prefixes.all_complete() && !self.prefixes.is_empty() {
|
||||
debug!("literal prefixes detected: {:?}", self.prefixes);
|
||||
// When this is true, the regex engine will do a literal scan,
|
||||
// so we don't need to return anything.
|
||||
return None;
|
||||
}
|
||||
|
||||
// Out of inner required literals, prefixes and suffixes, which one
|
||||
@@ -290,7 +285,7 @@ mod tests {
|
||||
}
|
||||
|
||||
fn one_regex(pattern: &str) -> Option<String> {
|
||||
sets(pattern).one_regex(false)
|
||||
sets(pattern).one_regex()
|
||||
}
|
||||
|
||||
// Put a pattern into the same format as the one returned by `one_regex`.
|
||||
|
@@ -6,7 +6,6 @@ use grep_matcher::{
|
||||
use regex::bytes::{CaptureLocations, Regex};
|
||||
|
||||
use config::{Config, ConfiguredHIR};
|
||||
use crlf::CRLFMatcher;
|
||||
use error::Error;
|
||||
use word::WordMatcher;
|
||||
|
||||
@@ -50,12 +49,9 @@ impl RegexMatcherBuilder {
|
||||
if let Some(ref re) = fast_line_regex {
|
||||
trace!("extracted fast line regex: {:?}", re);
|
||||
}
|
||||
|
||||
let matcher = RegexMatcherImpl::new(&chir)?;
|
||||
trace!("final regex: {:?}", matcher.regex().to_string());
|
||||
Ok(RegexMatcher {
|
||||
config: self.config.clone(),
|
||||
matcher: matcher,
|
||||
matcher: RegexMatcherImpl::new(&chir)?,
|
||||
fast_line_regex: fast_line_regex,
|
||||
non_matching_bytes: non_matching_bytes,
|
||||
})
|
||||
@@ -348,11 +344,6 @@ impl RegexMatcher {
|
||||
enum RegexMatcherImpl {
|
||||
/// The standard matcher used for all regular expressions.
|
||||
Standard(StandardMatcher),
|
||||
/// A matcher that strips `\r` from the end of matches.
|
||||
///
|
||||
/// This is only used when the CRLF hack is enabled and the regex is line
|
||||
/// anchored at the end.
|
||||
CRLF(CRLFMatcher),
|
||||
/// A matcher that only matches at word boundaries. This transforms the
|
||||
/// regex to `(^|\W)(...)($|\W)` instead of the more intuitive `\b(...)\b`.
|
||||
/// Because of this, the WordMatcher provides its own implementation of
|
||||
@@ -367,21 +358,10 @@ impl RegexMatcherImpl {
|
||||
fn new(expr: &ConfiguredHIR) -> Result<RegexMatcherImpl, Error> {
|
||||
if expr.config().word {
|
||||
Ok(RegexMatcherImpl::Word(WordMatcher::new(expr)?))
|
||||
} else if expr.needs_crlf_stripped() {
|
||||
Ok(RegexMatcherImpl::CRLF(CRLFMatcher::new(expr)?))
|
||||
} else {
|
||||
Ok(RegexMatcherImpl::Standard(StandardMatcher::new(expr)?))
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the underlying regex object used.
|
||||
fn regex(&self) -> &Regex {
|
||||
match *self {
|
||||
RegexMatcherImpl::Word(ref x) => x.regex(),
|
||||
RegexMatcherImpl::CRLF(ref x) => x.regex(),
|
||||
RegexMatcherImpl::Standard(ref x) => &x.regex,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This implementation just dispatches on the internal matcher impl except
|
||||
@@ -399,7 +379,6 @@ impl Matcher for RegexMatcher {
|
||||
use self::RegexMatcherImpl::*;
|
||||
match self.matcher {
|
||||
Standard(ref m) => m.find_at(haystack, at),
|
||||
CRLF(ref m) => m.find_at(haystack, at),
|
||||
Word(ref m) => m.find_at(haystack, at),
|
||||
}
|
||||
}
|
||||
@@ -408,7 +387,6 @@ impl Matcher for RegexMatcher {
|
||||
use self::RegexMatcherImpl::*;
|
||||
match self.matcher {
|
||||
Standard(ref m) => m.new_captures(),
|
||||
CRLF(ref m) => m.new_captures(),
|
||||
Word(ref m) => m.new_captures(),
|
||||
}
|
||||
}
|
||||
@@ -417,7 +395,6 @@ impl Matcher for RegexMatcher {
|
||||
use self::RegexMatcherImpl::*;
|
||||
match self.matcher {
|
||||
Standard(ref m) => m.capture_count(),
|
||||
CRLF(ref m) => m.capture_count(),
|
||||
Word(ref m) => m.capture_count(),
|
||||
}
|
||||
}
|
||||
@@ -426,7 +403,6 @@ impl Matcher for RegexMatcher {
|
||||
use self::RegexMatcherImpl::*;
|
||||
match self.matcher {
|
||||
Standard(ref m) => m.capture_index(name),
|
||||
CRLF(ref m) => m.capture_index(name),
|
||||
Word(ref m) => m.capture_index(name),
|
||||
}
|
||||
}
|
||||
@@ -435,7 +411,6 @@ impl Matcher for RegexMatcher {
|
||||
use self::RegexMatcherImpl::*;
|
||||
match self.matcher {
|
||||
Standard(ref m) => m.find(haystack),
|
||||
CRLF(ref m) => m.find(haystack),
|
||||
Word(ref m) => m.find(haystack),
|
||||
}
|
||||
}
|
||||
@@ -450,7 +425,6 @@ impl Matcher for RegexMatcher {
|
||||
use self::RegexMatcherImpl::*;
|
||||
match self.matcher {
|
||||
Standard(ref m) => m.find_iter(haystack, matched),
|
||||
CRLF(ref m) => m.find_iter(haystack, matched),
|
||||
Word(ref m) => m.find_iter(haystack, matched),
|
||||
}
|
||||
}
|
||||
@@ -465,7 +439,6 @@ impl Matcher for RegexMatcher {
|
||||
use self::RegexMatcherImpl::*;
|
||||
match self.matcher {
|
||||
Standard(ref m) => m.try_find_iter(haystack, matched),
|
||||
CRLF(ref m) => m.try_find_iter(haystack, matched),
|
||||
Word(ref m) => m.try_find_iter(haystack, matched),
|
||||
}
|
||||
}
|
||||
@@ -478,7 +451,6 @@ impl Matcher for RegexMatcher {
|
||||
use self::RegexMatcherImpl::*;
|
||||
match self.matcher {
|
||||
Standard(ref m) => m.captures(haystack, caps),
|
||||
CRLF(ref m) => m.captures(haystack, caps),
|
||||
Word(ref m) => m.captures(haystack, caps),
|
||||
}
|
||||
}
|
||||
@@ -494,7 +466,6 @@ impl Matcher for RegexMatcher {
|
||||
use self::RegexMatcherImpl::*;
|
||||
match self.matcher {
|
||||
Standard(ref m) => m.captures_iter(haystack, caps, matched),
|
||||
CRLF(ref m) => m.captures_iter(haystack, caps, matched),
|
||||
Word(ref m) => m.captures_iter(haystack, caps, matched),
|
||||
}
|
||||
}
|
||||
@@ -510,7 +481,6 @@ impl Matcher for RegexMatcher {
|
||||
use self::RegexMatcherImpl::*;
|
||||
match self.matcher {
|
||||
Standard(ref m) => m.try_captures_iter(haystack, caps, matched),
|
||||
CRLF(ref m) => m.try_captures_iter(haystack, caps, matched),
|
||||
Word(ref m) => m.try_captures_iter(haystack, caps, matched),
|
||||
}
|
||||
}
|
||||
@@ -524,7 +494,6 @@ impl Matcher for RegexMatcher {
|
||||
use self::RegexMatcherImpl::*;
|
||||
match self.matcher {
|
||||
Standard(ref m) => m.captures_at(haystack, at, caps),
|
||||
CRLF(ref m) => m.captures_at(haystack, at, caps),
|
||||
Word(ref m) => m.captures_at(haystack, at, caps),
|
||||
}
|
||||
}
|
||||
@@ -540,7 +509,6 @@ impl Matcher for RegexMatcher {
|
||||
use self::RegexMatcherImpl::*;
|
||||
match self.matcher {
|
||||
Standard(ref m) => m.replace(haystack, dst, append),
|
||||
CRLF(ref m) => m.replace(haystack, dst, append),
|
||||
Word(ref m) => m.replace(haystack, dst, append),
|
||||
}
|
||||
}
|
||||
@@ -559,9 +527,6 @@ impl Matcher for RegexMatcher {
|
||||
Standard(ref m) => {
|
||||
m.replace_with_captures(haystack, caps, dst, append)
|
||||
}
|
||||
CRLF(ref m) => {
|
||||
m.replace_with_captures(haystack, caps, dst, append)
|
||||
}
|
||||
Word(ref m) => {
|
||||
m.replace_with_captures(haystack, caps, dst, append)
|
||||
}
|
||||
@@ -572,7 +537,6 @@ impl Matcher for RegexMatcher {
|
||||
use self::RegexMatcherImpl::*;
|
||||
match self.matcher {
|
||||
Standard(ref m) => m.is_match(haystack),
|
||||
CRLF(ref m) => m.is_match(haystack),
|
||||
Word(ref m) => m.is_match(haystack),
|
||||
}
|
||||
}
|
||||
@@ -585,7 +549,6 @@ impl Matcher for RegexMatcher {
|
||||
use self::RegexMatcherImpl::*;
|
||||
match self.matcher {
|
||||
Standard(ref m) => m.is_match_at(haystack, at),
|
||||
CRLF(ref m) => m.is_match_at(haystack, at),
|
||||
Word(ref m) => m.is_match_at(haystack, at),
|
||||
}
|
||||
}
|
||||
@@ -597,7 +560,6 @@ impl Matcher for RegexMatcher {
|
||||
use self::RegexMatcherImpl::*;
|
||||
match self.matcher {
|
||||
Standard(ref m) => m.shortest_match(haystack),
|
||||
CRLF(ref m) => m.shortest_match(haystack),
|
||||
Word(ref m) => m.shortest_match(haystack),
|
||||
}
|
||||
}
|
||||
@@ -610,7 +572,6 @@ impl Matcher for RegexMatcher {
|
||||
use self::RegexMatcherImpl::*;
|
||||
match self.matcher {
|
||||
Standard(ref m) => m.shortest_match_at(haystack, at),
|
||||
CRLF(ref m) => m.shortest_match_at(haystack, at),
|
||||
Word(ref m) => m.shortest_match_at(haystack, at),
|
||||
}
|
||||
}
|
||||
@@ -751,9 +712,6 @@ pub struct RegexCaptures {
|
||||
/// and the capturing groups must behave as if `(re)` is the `0`th capture
|
||||
/// group.
|
||||
offset: usize,
|
||||
/// When enable, the end of a match has `\r` stripped from it, if one
|
||||
/// exists.
|
||||
strip_crlf: bool,
|
||||
}
|
||||
|
||||
impl Captures for RegexCaptures {
|
||||
@@ -762,25 +720,8 @@ impl Captures for RegexCaptures {
|
||||
}
|
||||
|
||||
fn get(&self, i: usize) -> Option<Match> {
|
||||
if !self.strip_crlf {
|
||||
let actual = i.checked_add(self.offset).unwrap();
|
||||
return self.locs.pos(actual).map(|(s, e)| Match::new(s, e));
|
||||
}
|
||||
|
||||
// currently don't support capture offsetting with CRLF stripping
|
||||
assert_eq!(self.offset, 0);
|
||||
let m = match self.locs.pos(i).map(|(s, e)| Match::new(s, e)) {
|
||||
None => return None,
|
||||
Some(m) => m,
|
||||
};
|
||||
// If the end position of this match corresponds to the end position
|
||||
// of the overall match, then we apply our CRLF stripping. Otherwise,
|
||||
// we cannot assume stripping is correct.
|
||||
if i == 0 || m.end() == self.locs.pos(0).unwrap().1 {
|
||||
Some(m.with_end(m.end() - 1))
|
||||
} else {
|
||||
Some(m)
|
||||
}
|
||||
let actual = i.checked_add(self.offset).unwrap();
|
||||
self.locs.pos(actual).map(|(s, e)| Match::new(s, e))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -793,16 +734,12 @@ impl RegexCaptures {
|
||||
locs: CaptureLocations,
|
||||
offset: usize,
|
||||
) -> RegexCaptures {
|
||||
RegexCaptures { locs, offset, strip_crlf: false }
|
||||
RegexCaptures { locs, offset }
|
||||
}
|
||||
|
||||
pub(crate) fn locations(&mut self) -> &mut CaptureLocations {
|
||||
&mut self.locs
|
||||
}
|
||||
|
||||
pub(crate) fn strip_crlf(&mut self, yes: bool) {
|
||||
self.strip_crlf = yes;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@@ -55,11 +55,6 @@ impl WordMatcher {
|
||||
}
|
||||
Ok(WordMatcher { regex, names, locs })
|
||||
}
|
||||
|
||||
/// Return the underlying regex used by this matcher.
|
||||
pub fn regex(&self) -> &Regex {
|
||||
&self.regex
|
||||
}
|
||||
}
|
||||
|
||||
impl Matcher for WordMatcher {
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "grep-searcher"
|
||||
version = "0.1.3" #:version
|
||||
version = "0.1.1" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
Fast line oriented regex searching as a library.
|
||||
@@ -13,14 +13,19 @@ keywords = ["regex", "grep", "egrep", "search", "pattern"]
|
||||
license = "Unlicense/MIT"
|
||||
|
||||
[dependencies]
|
||||
bstr = { version = "0.1.2", default-features = false, features = ["std"] }
|
||||
bytecount = "0.5"
|
||||
encoding_rs = "0.8.14"
|
||||
encoding_rs_io = "0.1.6"
|
||||
encoding_rs_io = "0.1.3"
|
||||
grep-matcher = { version = "0.1.1", path = "../grep-matcher" }
|
||||
log = "0.4.5"
|
||||
memmap = "0.7"
|
||||
|
||||
[dependencies.bstr]
|
||||
version = "*"
|
||||
path = "/home/andrew/rust/bstr"
|
||||
default-features = false
|
||||
features = ["std"]
|
||||
|
||||
[dev-dependencies]
|
||||
grep-regex = { version = "0.1.1", path = "../grep-regex" }
|
||||
regex = "1.1"
|
||||
|
@@ -253,13 +253,7 @@ impl<'b, R: io::Read> LineBufferReader<'b, R> {
|
||||
}
|
||||
|
||||
/// Return the contents of this buffer.
|
||||
pub fn buffer(&self) -> &[u8] {
|
||||
self.line_buffer.buffer().as_bytes()
|
||||
}
|
||||
|
||||
/// Return the underlying buffer as a byte string. Used for tests only.
|
||||
#[cfg(test)]
|
||||
fn bstr(&self) -> &BStr {
|
||||
pub fn buffer(&self) -> &BStr {
|
||||
self.line_buffer.buffer()
|
||||
}
|
||||
|
||||
@@ -578,7 +572,7 @@ and exhibited clearly, with a label attached.\
|
||||
assert!(rdr.buffer().is_empty());
|
||||
|
||||
assert!(rdr.fill().unwrap());
|
||||
assert_eq!(rdr.bstr(), "homer\nlisa\n");
|
||||
assert_eq!(rdr.buffer(), "homer\nlisa\n");
|
||||
assert_eq!(rdr.absolute_byte_offset(), 0);
|
||||
rdr.consume(5);
|
||||
assert_eq!(rdr.absolute_byte_offset(), 5);
|
||||
@@ -586,7 +580,7 @@ and exhibited clearly, with a label attached.\
|
||||
assert_eq!(rdr.absolute_byte_offset(), 11);
|
||||
|
||||
assert!(rdr.fill().unwrap());
|
||||
assert_eq!(rdr.bstr(), "maggie");
|
||||
assert_eq!(rdr.buffer(), "maggie");
|
||||
rdr.consume_all();
|
||||
|
||||
assert!(!rdr.fill().unwrap());
|
||||
@@ -601,7 +595,7 @@ and exhibited clearly, with a label attached.\
|
||||
let mut rdr = LineBufferReader::new(bytes.as_bytes(), &mut linebuf);
|
||||
|
||||
assert!(rdr.fill().unwrap());
|
||||
assert_eq!(rdr.bstr(), "homer\nlisa\nmaggie\n");
|
||||
assert_eq!(rdr.buffer(), "homer\nlisa\nmaggie\n");
|
||||
rdr.consume_all();
|
||||
|
||||
assert!(!rdr.fill().unwrap());
|
||||
@@ -616,7 +610,7 @@ and exhibited clearly, with a label attached.\
|
||||
let mut rdr = LineBufferReader::new(bytes.as_bytes(), &mut linebuf);
|
||||
|
||||
assert!(rdr.fill().unwrap());
|
||||
assert_eq!(rdr.bstr(), "\n");
|
||||
assert_eq!(rdr.buffer(), "\n");
|
||||
rdr.consume_all();
|
||||
|
||||
assert!(!rdr.fill().unwrap());
|
||||
@@ -631,7 +625,7 @@ and exhibited clearly, with a label attached.\
|
||||
let mut rdr = LineBufferReader::new(bytes.as_bytes(), &mut linebuf);
|
||||
|
||||
assert!(rdr.fill().unwrap());
|
||||
assert_eq!(rdr.bstr(), "\n\n");
|
||||
assert_eq!(rdr.buffer(), "\n\n");
|
||||
rdr.consume_all();
|
||||
|
||||
assert!(!rdr.fill().unwrap());
|
||||
@@ -689,11 +683,11 @@ and exhibited clearly, with a label attached.\
|
||||
let mut rdr = LineBufferReader::new(bytes.as_bytes(), &mut linebuf);
|
||||
|
||||
assert!(rdr.fill().unwrap());
|
||||
assert_eq!(rdr.bstr(), "homer\n");
|
||||
assert_eq!(rdr.buffer(), "homer\n");
|
||||
rdr.consume_all();
|
||||
|
||||
assert!(rdr.fill().unwrap());
|
||||
assert_eq!(rdr.bstr(), "lisa\n");
|
||||
assert_eq!(rdr.buffer(), "lisa\n");
|
||||
rdr.consume_all();
|
||||
|
||||
// This returns an error because while we have just enough room to
|
||||
@@ -703,11 +697,11 @@ and exhibited clearly, with a label attached.\
|
||||
assert!(rdr.fill().is_err());
|
||||
|
||||
// We can mush on though!
|
||||
assert_eq!(rdr.bstr(), "m");
|
||||
assert_eq!(rdr.buffer(), "m");
|
||||
rdr.consume_all();
|
||||
|
||||
assert!(rdr.fill().unwrap());
|
||||
assert_eq!(rdr.bstr(), "aggie");
|
||||
assert_eq!(rdr.buffer(), "aggie");
|
||||
rdr.consume_all();
|
||||
|
||||
assert!(!rdr.fill().unwrap());
|
||||
@@ -723,16 +717,16 @@ and exhibited clearly, with a label attached.\
|
||||
let mut rdr = LineBufferReader::new(bytes.as_bytes(), &mut linebuf);
|
||||
|
||||
assert!(rdr.fill().unwrap());
|
||||
assert_eq!(rdr.bstr(), "homer\n");
|
||||
assert_eq!(rdr.buffer(), "homer\n");
|
||||
rdr.consume_all();
|
||||
|
||||
assert!(rdr.fill().unwrap());
|
||||
assert_eq!(rdr.bstr(), "lisa\n");
|
||||
assert_eq!(rdr.buffer(), "lisa\n");
|
||||
rdr.consume_all();
|
||||
|
||||
// We have just enough space.
|
||||
assert!(rdr.fill().unwrap());
|
||||
assert_eq!(rdr.bstr(), "maggie");
|
||||
assert_eq!(rdr.buffer(), "maggie");
|
||||
rdr.consume_all();
|
||||
|
||||
assert!(!rdr.fill().unwrap());
|
||||
@@ -748,7 +742,7 @@ and exhibited clearly, with a label attached.\
|
||||
let mut rdr = LineBufferReader::new(bytes.as_bytes(), &mut linebuf);
|
||||
|
||||
assert!(rdr.fill().is_err());
|
||||
assert_eq!(rdr.bstr(), "");
|
||||
assert_eq!(rdr.buffer(), "");
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -760,7 +754,7 @@ and exhibited clearly, with a label attached.\
|
||||
assert!(rdr.buffer().is_empty());
|
||||
|
||||
assert!(rdr.fill().unwrap());
|
||||
assert_eq!(rdr.bstr(), "homer\nli\x00sa\nmaggie\n");
|
||||
assert_eq!(rdr.buffer(), "homer\nli\x00sa\nmaggie\n");
|
||||
rdr.consume_all();
|
||||
|
||||
assert!(!rdr.fill().unwrap());
|
||||
@@ -779,7 +773,7 @@ and exhibited clearly, with a label attached.\
|
||||
assert!(rdr.buffer().is_empty());
|
||||
|
||||
assert!(rdr.fill().unwrap());
|
||||
assert_eq!(rdr.bstr(), "homer\nli");
|
||||
assert_eq!(rdr.buffer(), "homer\nli");
|
||||
rdr.consume_all();
|
||||
|
||||
assert!(!rdr.fill().unwrap());
|
||||
@@ -796,7 +790,7 @@ and exhibited clearly, with a label attached.\
|
||||
let mut rdr = LineBufferReader::new(bytes.as_bytes(), &mut linebuf);
|
||||
|
||||
assert!(!rdr.fill().unwrap());
|
||||
assert_eq!(rdr.bstr(), "");
|
||||
assert_eq!(rdr.buffer(), "");
|
||||
assert_eq!(rdr.absolute_byte_offset(), 0);
|
||||
assert_eq!(rdr.binary_byte_offset(), Some(0));
|
||||
}
|
||||
@@ -812,7 +806,7 @@ and exhibited clearly, with a label attached.\
|
||||
assert!(rdr.buffer().is_empty());
|
||||
|
||||
assert!(rdr.fill().unwrap());
|
||||
assert_eq!(rdr.bstr(), "homer\nlisa\nmaggie\n");
|
||||
assert_eq!(rdr.buffer(), "homer\nlisa\nmaggie\n");
|
||||
rdr.consume_all();
|
||||
|
||||
assert!(!rdr.fill().unwrap());
|
||||
@@ -831,7 +825,7 @@ and exhibited clearly, with a label attached.\
|
||||
assert!(rdr.buffer().is_empty());
|
||||
|
||||
assert!(rdr.fill().unwrap());
|
||||
assert_eq!(rdr.bstr(), "homer\nlisa\nmaggie");
|
||||
assert_eq!(rdr.buffer(), "homer\nlisa\nmaggie");
|
||||
rdr.consume_all();
|
||||
|
||||
assert!(!rdr.fill().unwrap());
|
||||
@@ -849,7 +843,7 @@ and exhibited clearly, with a label attached.\
|
||||
assert!(rdr.buffer().is_empty());
|
||||
|
||||
assert!(rdr.fill().unwrap());
|
||||
assert_eq!(rdr.bstr(), "\
|
||||
assert_eq!(rdr.buffer(), "\
|
||||
For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
Holmeses, s\
|
||||
");
|
||||
@@ -872,7 +866,7 @@ Holmeses, s\
|
||||
assert!(rdr.buffer().is_empty());
|
||||
|
||||
assert!(rdr.fill().unwrap());
|
||||
assert_eq!(rdr.bstr(), "homer\nli\nsa\nmaggie\n");
|
||||
assert_eq!(rdr.buffer(), "homer\nli\nsa\nmaggie\n");
|
||||
rdr.consume_all();
|
||||
|
||||
assert!(!rdr.fill().unwrap());
|
||||
@@ -891,7 +885,7 @@ Holmeses, s\
|
||||
assert!(rdr.buffer().is_empty());
|
||||
|
||||
assert!(rdr.fill().unwrap());
|
||||
assert_eq!(rdr.bstr(), "\nhomer\nlisa\nmaggie\n");
|
||||
assert_eq!(rdr.buffer(), "\nhomer\nlisa\nmaggie\n");
|
||||
rdr.consume_all();
|
||||
|
||||
assert!(!rdr.fill().unwrap());
|
||||
@@ -910,7 +904,7 @@ Holmeses, s\
|
||||
assert!(rdr.buffer().is_empty());
|
||||
|
||||
assert!(rdr.fill().unwrap());
|
||||
assert_eq!(rdr.bstr(), "homer\nlisa\nmaggie\n\n");
|
||||
assert_eq!(rdr.buffer(), "homer\nlisa\nmaggie\n\n");
|
||||
rdr.consume_all();
|
||||
|
||||
assert!(!rdr.fill().unwrap());
|
||||
@@ -929,7 +923,7 @@ Holmeses, s\
|
||||
assert!(rdr.buffer().is_empty());
|
||||
|
||||
assert!(rdr.fill().unwrap());
|
||||
assert_eq!(rdr.bstr(), "homer\nlisa\nmaggie\n\n");
|
||||
assert_eq!(rdr.buffer(), "homer\nlisa\nmaggie\n\n");
|
||||
rdr.consume_all();
|
||||
|
||||
assert!(!rdr.fill().unwrap());
|
||||
|
@@ -2,7 +2,7 @@
|
||||
A collection of routines for performing operations on lines.
|
||||
*/
|
||||
|
||||
use bstr::B;
|
||||
use bstr::{B, BStr};
|
||||
use bytecount;
|
||||
use grep_matcher::{LineTerminator, Match};
|
||||
|
||||
@@ -14,7 +14,7 @@ use grep_matcher::{LineTerminator, Match};
|
||||
/// `'b` refers to the lifetime of the underlying bytes.
|
||||
#[derive(Debug)]
|
||||
pub struct LineIter<'b> {
|
||||
bytes: &'b [u8],
|
||||
bytes: &'b BStr,
|
||||
stepper: LineStep,
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ impl<'b> LineIter<'b> {
|
||||
/// are terminated by `line_term`.
|
||||
pub fn new(line_term: u8, bytes: &'b [u8]) -> LineIter<'b> {
|
||||
LineIter {
|
||||
bytes: bytes,
|
||||
bytes: B(bytes),
|
||||
stepper: LineStep::new(line_term, 0, bytes.len()),
|
||||
}
|
||||
}
|
||||
@@ -33,7 +33,7 @@ impl<'b> Iterator for LineIter<'b> {
|
||||
type Item = &'b [u8];
|
||||
|
||||
fn next(&mut self) -> Option<&'b [u8]> {
|
||||
self.stepper.next_match(self.bytes).map(|m| &self.bytes[m])
|
||||
self.stepper.next_match(self.bytes).map(|m| self.bytes[m].as_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,19 +73,19 @@ impl LineStep {
|
||||
/// The range returned includes the line terminator. Ranges are always
|
||||
/// non-empty.
|
||||
pub fn next(&mut self, bytes: &[u8]) -> Option<(usize, usize)> {
|
||||
self.next_impl(bytes)
|
||||
self.next_impl(B(bytes))
|
||||
}
|
||||
|
||||
/// Like next, but returns a `Match` instead of a tuple.
|
||||
#[inline(always)]
|
||||
pub(crate) fn next_match(&mut self, bytes: &[u8]) -> Option<Match> {
|
||||
pub(crate) fn next_match(&mut self, bytes: &BStr) -> Option<Match> {
|
||||
self.next_impl(bytes).map(|(s, e)| Match::new(s, e))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn next_impl(&mut self, mut bytes: &[u8]) -> Option<(usize, usize)> {
|
||||
fn next_impl(&mut self, mut bytes: &BStr) -> Option<(usize, usize)> {
|
||||
bytes = &bytes[..self.end];
|
||||
match B(&bytes[self.pos..]).find_byte(self.line_term) {
|
||||
match bytes[self.pos..].find_byte(self.line_term) {
|
||||
None => {
|
||||
if self.pos < bytes.len() {
|
||||
let m = (self.pos, bytes.len());
|
||||
@@ -109,15 +109,15 @@ impl LineStep {
|
||||
}
|
||||
|
||||
/// Count the number of occurrences of `line_term` in `bytes`.
|
||||
pub fn count(bytes: &[u8], line_term: u8) -> u64 {
|
||||
bytecount::count(bytes, line_term) as u64
|
||||
pub fn count(bytes: &BStr, line_term: u8) -> u64 {
|
||||
bytecount::count(bytes.as_bytes(), line_term) as u64
|
||||
}
|
||||
|
||||
/// Given a line that possibly ends with a terminator, return that line without
|
||||
/// the terminator.
|
||||
#[inline(always)]
|
||||
pub fn without_terminator(bytes: &[u8], line_term: LineTerminator) -> &[u8] {
|
||||
let line_term = line_term.as_bytes();
|
||||
pub fn without_terminator(bytes: &BStr, line_term: LineTerminator) -> &BStr {
|
||||
let line_term = BStr::new(line_term.as_bytes());
|
||||
let start = bytes.len().saturating_sub(line_term.len());
|
||||
if bytes.get(start..) == Some(line_term) {
|
||||
return &bytes[..bytes.len() - line_term.len()];
|
||||
@@ -131,18 +131,18 @@ pub fn without_terminator(bytes: &[u8], line_term: LineTerminator) -> &[u8] {
|
||||
/// Line terminators are considered part of the line they terminate.
|
||||
#[inline(always)]
|
||||
pub fn locate(
|
||||
bytes: &[u8],
|
||||
bytes: &BStr,
|
||||
line_term: u8,
|
||||
range: Match,
|
||||
) -> Match {
|
||||
let line_start = B(&bytes[..range.start()])
|
||||
let line_start = bytes[..range.start()]
|
||||
.rfind_byte(line_term)
|
||||
.map_or(0, |i| i + 1);
|
||||
let line_end =
|
||||
if range.end() > line_start && bytes[range.end() - 1] == line_term {
|
||||
range.end()
|
||||
} else {
|
||||
B(&bytes[range.end()..])
|
||||
bytes[range.end()..]
|
||||
.find_byte(line_term)
|
||||
.map_or(bytes.len(), |i| range.end() + i + 1)
|
||||
};
|
||||
@@ -157,7 +157,7 @@ pub fn locate(
|
||||
///
|
||||
/// If `bytes` ends with a line terminator, then the terminator itself is
|
||||
/// considered part of the last line.
|
||||
pub fn preceding(bytes: &[u8], line_term: u8, count: usize) -> usize {
|
||||
pub fn preceding(bytes: &BStr, line_term: u8, count: usize) -> usize {
|
||||
preceding_by_pos(bytes, bytes.len(), line_term, count)
|
||||
}
|
||||
|
||||
@@ -171,7 +171,7 @@ pub fn preceding(bytes: &[u8], line_term: u8, count: usize) -> usize {
|
||||
/// and `pos = 7`, `preceding(bytes, pos, b'\n', 0)` returns `4` (as does `pos
|
||||
/// = 8`) and `preceding(bytes, pos, `b'\n', 1)` returns `0`.
|
||||
fn preceding_by_pos(
|
||||
bytes: &[u8],
|
||||
bytes: &BStr,
|
||||
mut pos: usize,
|
||||
line_term: u8,
|
||||
mut count: usize,
|
||||
@@ -182,7 +182,7 @@ fn preceding_by_pos(
|
||||
pos -= 1;
|
||||
}
|
||||
loop {
|
||||
match B(&bytes[..pos]).rfind_byte(line_term) {
|
||||
match bytes[..pos].rfind_byte(line_term) {
|
||||
None => {
|
||||
return 0;
|
||||
}
|
||||
@@ -203,7 +203,10 @@ fn preceding_by_pos(
|
||||
mod tests {
|
||||
use std::ops::Range;
|
||||
use std::str;
|
||||
|
||||
use bstr::B;
|
||||
use grep_matcher::Match;
|
||||
|
||||
use super::*;
|
||||
|
||||
const SHERLOCK: &'static str = "\
|
||||
@@ -222,7 +225,7 @@ and exhibited clearly, with a label attached.\
|
||||
fn lines(text: &str) -> Vec<&str> {
|
||||
let mut results = vec![];
|
||||
let mut it = LineStep::new(b'\n', 0, text.len());
|
||||
while let Some(m) = it.next_match(text.as_bytes()) {
|
||||
while let Some(m) = it.next_match(B(text)) {
|
||||
results.push(&text[m]);
|
||||
}
|
||||
results
|
||||
@@ -231,26 +234,26 @@ and exhibited clearly, with a label attached.\
|
||||
fn line_ranges(text: &str) -> Vec<Range<usize>> {
|
||||
let mut results = vec![];
|
||||
let mut it = LineStep::new(b'\n', 0, text.len());
|
||||
while let Some(m) = it.next_match(text.as_bytes()) {
|
||||
while let Some(m) = it.next_match(B(text)) {
|
||||
results.push(m.start()..m.end());
|
||||
}
|
||||
results
|
||||
}
|
||||
|
||||
fn prev(text: &str, pos: usize, count: usize) -> usize {
|
||||
preceding_by_pos(text.as_bytes(), pos, b'\n', count)
|
||||
preceding_by_pos(B(text), pos, b'\n', count)
|
||||
}
|
||||
|
||||
fn loc(text: &str, start: usize, end: usize) -> Match {
|
||||
locate(text.as_bytes(), b'\n', Match::new(start, end))
|
||||
locate(B(text), b'\n', Match::new(start, end))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn line_count() {
|
||||
assert_eq!(0, count(b"", b'\n'));
|
||||
assert_eq!(1, count(b"\n", b'\n'));
|
||||
assert_eq!(2, count(b"\n\n", b'\n'));
|
||||
assert_eq!(2, count(b"a\nb\nc", b'\n'));
|
||||
assert_eq!(0, count(B(""), b'\n'));
|
||||
assert_eq!(1, count(B("\n"), b'\n'));
|
||||
assert_eq!(2, count(B("\n\n"), b'\n'));
|
||||
assert_eq!(2, count(B("a\nb\nc"), b'\n'));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -333,7 +336,7 @@ and exhibited clearly, with a label attached.\
|
||||
#[test]
|
||||
fn preceding_lines_doc() {
|
||||
// These are the examples mentions in the documentation of `preceding`.
|
||||
let bytes = b"abc\nxyz\n";
|
||||
let bytes = B("abc\nxyz\n");
|
||||
assert_eq!(4, preceding_by_pos(bytes, 7, b'\n', 0));
|
||||
assert_eq!(4, preceding_by_pos(bytes, 8, b'\n', 0));
|
||||
assert_eq!(0, preceding_by_pos(bytes, 7, b'\n', 1));
|
||||
|
@@ -1,6 +1,6 @@
|
||||
use std::cmp;
|
||||
|
||||
use bstr::B;
|
||||
use bstr::BStr;
|
||||
|
||||
use grep_matcher::{LineMatchKind, Matcher};
|
||||
use lines::{self, LineStep};
|
||||
@@ -84,7 +84,7 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> {
|
||||
|
||||
pub fn matched(
|
||||
&mut self,
|
||||
buf: &[u8],
|
||||
buf: &BStr,
|
||||
range: &Range,
|
||||
) -> Result<bool, S::Error> {
|
||||
self.sink_matched(buf, range)
|
||||
@@ -107,7 +107,7 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn match_by_line(&mut self, buf: &[u8]) -> Result<bool, S::Error> {
|
||||
pub fn match_by_line(&mut self, buf: &BStr) -> Result<bool, S::Error> {
|
||||
if self.is_line_by_line_fast() {
|
||||
self.match_by_line_fast(buf)
|
||||
} else {
|
||||
@@ -115,7 +115,7 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn roll(&mut self, buf: &[u8]) -> usize {
|
||||
pub fn roll(&mut self, buf: &BStr) -> usize {
|
||||
let consumed =
|
||||
if self.config.max_context() == 0 {
|
||||
buf.len()
|
||||
@@ -141,7 +141,7 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> {
|
||||
consumed
|
||||
}
|
||||
|
||||
pub fn detect_binary(&mut self, buf: &[u8], range: &Range) -> bool {
|
||||
pub fn detect_binary(&mut self, buf: &BStr, range: &Range) -> bool {
|
||||
if self.binary_byte_offset.is_some() {
|
||||
return true;
|
||||
}
|
||||
@@ -149,7 +149,7 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> {
|
||||
BinaryDetection::Quit(b) => b,
|
||||
_ => return false,
|
||||
};
|
||||
if let Some(i) = B(&buf[*range]).find_byte(binary_byte) {
|
||||
if let Some(i) = buf[*range].find_byte(binary_byte) {
|
||||
self.binary_byte_offset = Some(range.start() + i);
|
||||
true
|
||||
} else {
|
||||
@@ -159,7 +159,7 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> {
|
||||
|
||||
pub fn before_context_by_line(
|
||||
&mut self,
|
||||
buf: &[u8],
|
||||
buf: &BStr,
|
||||
upto: usize,
|
||||
) -> Result<bool, S::Error> {
|
||||
if self.config.before_context == 0 {
|
||||
@@ -194,7 +194,7 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> {
|
||||
|
||||
pub fn after_context_by_line(
|
||||
&mut self,
|
||||
buf: &[u8],
|
||||
buf: &BStr,
|
||||
upto: usize,
|
||||
) -> Result<bool, S::Error> {
|
||||
if self.after_context_left == 0 {
|
||||
@@ -219,7 +219,7 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> {
|
||||
|
||||
pub fn other_context_by_line(
|
||||
&mut self,
|
||||
buf: &[u8],
|
||||
buf: &BStr,
|
||||
upto: usize,
|
||||
) -> Result<bool, S::Error> {
|
||||
let range = Range::new(self.last_line_visited, upto);
|
||||
@@ -236,7 +236,7 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> {
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn match_by_line_slow(&mut self, buf: &[u8]) -> Result<bool, S::Error> {
|
||||
fn match_by_line_slow(&mut self, buf: &BStr) -> Result<bool, S::Error> {
|
||||
debug_assert!(!self.searcher.multi_line_with_matcher(&self.matcher));
|
||||
|
||||
let range = Range::new(self.pos(), buf.len());
|
||||
@@ -255,7 +255,7 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> {
|
||||
&buf[line],
|
||||
self.config.line_term,
|
||||
);
|
||||
match self.matcher.shortest_match(slice) {
|
||||
match self.matcher.shortest_match(slice.as_bytes()) {
|
||||
Err(err) => return Err(S::Error::error_message(err)),
|
||||
Ok(result) => result.is_some(),
|
||||
}
|
||||
@@ -281,7 +281,7 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> {
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn match_by_line_fast(&mut self, buf: &[u8]) -> Result<bool, S::Error> {
|
||||
fn match_by_line_fast(&mut self, buf: &BStr) -> Result<bool, S::Error> {
|
||||
debug_assert!(!self.config.passthru);
|
||||
|
||||
while !buf[self.pos()..].is_empty() {
|
||||
@@ -316,7 +316,7 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> {
|
||||
#[inline(always)]
|
||||
fn match_by_line_fast_invert(
|
||||
&mut self,
|
||||
buf: &[u8],
|
||||
buf: &BStr,
|
||||
) -> Result<bool, S::Error> {
|
||||
assert!(self.config.invert_match);
|
||||
|
||||
@@ -357,14 +357,14 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> {
|
||||
#[inline(always)]
|
||||
fn find_by_line_fast(
|
||||
&self,
|
||||
buf: &[u8],
|
||||
buf: &BStr,
|
||||
) -> Result<Option<Range>, S::Error> {
|
||||
debug_assert!(!self.searcher.multi_line_with_matcher(&self.matcher));
|
||||
debug_assert!(self.is_line_by_line_fast());
|
||||
|
||||
let mut pos = self.pos();
|
||||
while !buf[pos..].is_empty() {
|
||||
match self.matcher.find_candidate_line(&buf[pos..]) {
|
||||
match self.matcher.find_candidate_line(buf[pos..].as_bytes()) {
|
||||
Err(err) => return Err(S::Error::error_message(err)),
|
||||
Ok(None) => return Ok(None),
|
||||
Ok(Some(LineMatchKind::Confirmed(i))) => {
|
||||
@@ -396,7 +396,7 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> {
|
||||
&buf[line],
|
||||
self.config.line_term,
|
||||
);
|
||||
match self.matcher.is_match(slice) {
|
||||
match self.matcher.is_match(slice.as_bytes()) {
|
||||
Err(err) => return Err(S::Error::error_message(err)),
|
||||
Ok(true) => return Ok(Some(line)),
|
||||
Ok(false) => {
|
||||
@@ -413,7 +413,7 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> {
|
||||
#[inline(always)]
|
||||
fn sink_matched(
|
||||
&mut self,
|
||||
buf: &[u8],
|
||||
buf: &BStr,
|
||||
range: &Range,
|
||||
) -> Result<bool, S::Error> {
|
||||
if self.binary && self.detect_binary(buf, range) {
|
||||
@@ -424,12 +424,21 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> {
|
||||
}
|
||||
self.count_lines(buf, range.start());
|
||||
let offset = self.absolute_byte_offset + range.start() as u64;
|
||||
let linebuf = &buf[*range];
|
||||
let linebuf =
|
||||
if self.config.line_term.is_crlf() {
|
||||
// Normally, a line terminator is never part of a match, but
|
||||
// if the line terminator is CRLF, then it's possible for `\r`
|
||||
// to end up in the match, which we generally don't want. So
|
||||
// we strip it here.
|
||||
lines::without_terminator(&buf[*range], self.config.line_term)
|
||||
} else {
|
||||
&buf[*range]
|
||||
};
|
||||
let keepgoing = self.sink.matched(
|
||||
&self.searcher,
|
||||
&SinkMatch {
|
||||
line_term: self.config.line_term,
|
||||
bytes: linebuf,
|
||||
bytes: linebuf.as_bytes(),
|
||||
absolute_byte_offset: offset,
|
||||
line_number: self.line_number,
|
||||
},
|
||||
@@ -445,7 +454,7 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> {
|
||||
|
||||
fn sink_before_context(
|
||||
&mut self,
|
||||
buf: &[u8],
|
||||
buf: &BStr,
|
||||
range: &Range,
|
||||
) -> Result<bool, S::Error> {
|
||||
if self.binary && self.detect_binary(buf, range) {
|
||||
@@ -457,7 +466,7 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> {
|
||||
&self.searcher,
|
||||
&SinkContext {
|
||||
line_term: self.config.line_term,
|
||||
bytes: &buf[*range],
|
||||
bytes: buf[*range].as_bytes(),
|
||||
kind: SinkContextKind::Before,
|
||||
absolute_byte_offset: offset,
|
||||
line_number: self.line_number,
|
||||
@@ -473,7 +482,7 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> {
|
||||
|
||||
fn sink_after_context(
|
||||
&mut self,
|
||||
buf: &[u8],
|
||||
buf: &BStr,
|
||||
range: &Range,
|
||||
) -> Result<bool, S::Error> {
|
||||
assert!(self.after_context_left >= 1);
|
||||
@@ -487,7 +496,7 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> {
|
||||
&self.searcher,
|
||||
&SinkContext {
|
||||
line_term: self.config.line_term,
|
||||
bytes: &buf[*range],
|
||||
bytes: buf[*range].as_bytes(),
|
||||
kind: SinkContextKind::After,
|
||||
absolute_byte_offset: offset,
|
||||
line_number: self.line_number,
|
||||
@@ -504,7 +513,7 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> {
|
||||
|
||||
fn sink_other_context(
|
||||
&mut self,
|
||||
buf: &[u8],
|
||||
buf: &BStr,
|
||||
range: &Range,
|
||||
) -> Result<bool, S::Error> {
|
||||
if self.binary && self.detect_binary(buf, range) {
|
||||
@@ -516,7 +525,7 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> {
|
||||
&self.searcher,
|
||||
&SinkContext {
|
||||
line_term: self.config.line_term,
|
||||
bytes: &buf[*range],
|
||||
bytes: buf[*range].as_bytes(),
|
||||
kind: SinkContextKind::Other,
|
||||
absolute_byte_offset: offset,
|
||||
line_number: self.line_number,
|
||||
@@ -546,7 +555,7 @@ impl<'s, M: Matcher, S: Sink> Core<'s, M, S> {
|
||||
}
|
||||
}
|
||||
|
||||
fn count_lines(&mut self, buf: &[u8], upto: usize) {
|
||||
fn count_lines(&mut self, buf: &BStr, upto: usize) {
|
||||
if let Some(ref mut line_number) = self.line_number {
|
||||
if self.last_line_counted >= upto {
|
||||
return;
|
||||
|
@@ -1,7 +1,9 @@
|
||||
use std::cmp;
|
||||
use std::io;
|
||||
|
||||
use bstr::BStr;
|
||||
use grep_matcher::Matcher;
|
||||
|
||||
use lines::{self, LineStep};
|
||||
use line_buffer::{DEFAULT_BUFFER_CAPACITY, LineBufferReader};
|
||||
use sink::{Sink, SinkError};
|
||||
@@ -77,14 +79,14 @@ where M: Matcher,
|
||||
pub struct SliceByLine<'s, M: 's, S> {
|
||||
config: &'s Config,
|
||||
core: Core<'s, M, S>,
|
||||
slice: &'s [u8],
|
||||
slice: &'s BStr,
|
||||
}
|
||||
|
||||
impl<'s, M: Matcher, S: Sink> SliceByLine<'s, M, S> {
|
||||
pub fn new(
|
||||
searcher: &'s Searcher,
|
||||
matcher: M,
|
||||
slice: &'s [u8],
|
||||
slice: &'s BStr,
|
||||
write_to: S,
|
||||
) -> SliceByLine<'s, M, S> {
|
||||
debug_assert!(!searcher.multi_line_with_matcher(&matcher));
|
||||
@@ -127,7 +129,7 @@ impl<'s, M: Matcher, S: Sink> SliceByLine<'s, M, S> {
|
||||
pub struct MultiLine<'s, M: 's, S> {
|
||||
config: &'s Config,
|
||||
core: Core<'s, M, S>,
|
||||
slice: &'s [u8],
|
||||
slice: &'s BStr,
|
||||
last_match: Option<Range>,
|
||||
}
|
||||
|
||||
@@ -135,7 +137,7 @@ impl<'s, M: Matcher, S: Sink> MultiLine<'s, M, S> {
|
||||
pub fn new(
|
||||
searcher: &'s Searcher,
|
||||
matcher: M,
|
||||
slice: &'s [u8],
|
||||
slice: &'s BStr,
|
||||
write_to: S,
|
||||
) -> MultiLine<'s, M, S> {
|
||||
debug_assert!(searcher.multi_line_with_matcher(&matcher));
|
||||
@@ -306,7 +308,8 @@ impl<'s, M: Matcher, S: Sink> MultiLine<'s, M, S> {
|
||||
}
|
||||
|
||||
fn find(&mut self) -> Result<Option<Range>, S::Error> {
|
||||
match self.core.matcher().find(&self.slice[self.core.pos()..]) {
|
||||
let haystack = &self.slice[self.core.pos()..];
|
||||
match self.core.matcher().find(haystack.as_bytes()) {
|
||||
Err(err) => Err(S::Error::error_message(err)),
|
||||
Ok(None) => Ok(None),
|
||||
Ok(Some(m)) => Ok(Some(m.offset(self.core.pos()))),
|
||||
|
@@ -5,6 +5,7 @@ use std::fs::File;
|
||||
use std::io::{self, Read};
|
||||
use std::path::Path;
|
||||
|
||||
use bstr::{B, BStr, BString};
|
||||
use encoding_rs;
|
||||
use encoding_rs_io::DecodeReaderBytesBuilder;
|
||||
use grep_matcher::{LineTerminator, Match, Matcher};
|
||||
@@ -155,8 +156,6 @@ pub struct Config {
|
||||
/// An encoding that, when present, causes the searcher to transcode all
|
||||
/// input from the encoding to UTF-8.
|
||||
encoding: Option<Encoding>,
|
||||
/// Whether to do automatic transcoding based on a BOM or not.
|
||||
bom_sniffing: bool,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
@@ -173,7 +172,6 @@ impl Default for Config {
|
||||
binary: BinaryDetection::default(),
|
||||
multi_line: false,
|
||||
encoding: None,
|
||||
bom_sniffing: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -306,21 +304,17 @@ impl SearcherBuilder {
|
||||
config.before_context = 0;
|
||||
config.after_context = 0;
|
||||
}
|
||||
|
||||
let mut decode_builder = DecodeReaderBytesBuilder::new();
|
||||
decode_builder
|
||||
.encoding(self.config.encoding.as_ref().map(|e| e.0))
|
||||
.utf8_passthru(true)
|
||||
.strip_bom(self.config.bom_sniffing)
|
||||
.bom_override(true)
|
||||
.bom_sniffing(self.config.bom_sniffing);
|
||||
|
||||
.bom_override(true);
|
||||
Searcher {
|
||||
config: config,
|
||||
decode_builder: decode_builder,
|
||||
decode_buffer: RefCell::new(vec![0; 8 * (1<<10)]),
|
||||
decode_buffer: RefCell::new(BString::from(vec![0; 8 * (1<<10)])),
|
||||
line_buffer: RefCell::new(self.config.line_buffer()),
|
||||
multi_line_buffer: RefCell::new(vec![]),
|
||||
multi_line_buffer: RefCell::new(BString::new()),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -512,13 +506,12 @@ impl SearcherBuilder {
|
||||
/// transcoding process encounters an error, then bytes are replaced with
|
||||
/// the Unicode replacement codepoint.
|
||||
///
|
||||
/// When no encoding is specified (the default), then BOM sniffing is
|
||||
/// used (if it's enabled, which it is, by default) to determine whether
|
||||
/// the source data is UTF-8 or UTF-16, and transcoding will be performed
|
||||
/// automatically. If no BOM could be found, then the source data is
|
||||
/// searched _as if_ it were UTF-8. However, so long as the source data is
|
||||
/// at least ASCII compatible, then it is possible for a search to produce
|
||||
/// useful results.
|
||||
/// When no encoding is specified (the default), then BOM sniffing is used
|
||||
/// to determine whether the source data is UTF-8 or UTF-16, and
|
||||
/// transcoding will be performed automatically. If no BOM could be found,
|
||||
/// then the source data is searched _as if_ it were UTF-8. However, so
|
||||
/// long as the source data is at least ASCII compatible, then it is
|
||||
/// possible for a search to produce useful results.
|
||||
pub fn encoding(
|
||||
&mut self,
|
||||
encoding: Option<Encoding>,
|
||||
@@ -526,23 +519,6 @@ impl SearcherBuilder {
|
||||
self.config.encoding = encoding;
|
||||
self
|
||||
}
|
||||
|
||||
/// Enable automatic transcoding based on BOM sniffing.
|
||||
///
|
||||
/// When this is enabled and an explicit encoding is not set, then this
|
||||
/// searcher will try to detect the encoding of the bytes being searched
|
||||
/// by sniffing its byte-order mark (BOM). In particular, when this is
|
||||
/// enabled, UTF-16 encoded files will be searched seamlessly.
|
||||
///
|
||||
/// When this is disabled and if an explicit encoding is not set, then
|
||||
/// the bytes from the source stream will be passed through unchanged,
|
||||
/// including its BOM, if one is present.
|
||||
///
|
||||
/// This is enabled by default.
|
||||
pub fn bom_sniffing(&mut self, yes: bool) -> &mut SearcherBuilder {
|
||||
self.config.bom_sniffing = yes;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// A searcher executes searches over a haystack and writes results to a caller
|
||||
@@ -568,7 +544,7 @@ pub struct Searcher {
|
||||
/// through the underlying bytes with no additional overhead.
|
||||
decode_builder: DecodeReaderBytesBuilder,
|
||||
/// A buffer that is used for transcoding scratch space.
|
||||
decode_buffer: RefCell<Vec<u8>>,
|
||||
decode_buffer: RefCell<BString>,
|
||||
/// A line buffer for use in line oriented searching.
|
||||
///
|
||||
/// We wrap it in a RefCell to permit lending out borrows of `Searcher`
|
||||
@@ -580,7 +556,7 @@ pub struct Searcher {
|
||||
/// multi line search. In particular, multi line searches cannot be
|
||||
/// performed incrementally, and need the entire haystack in memory at
|
||||
/// once.
|
||||
multi_line_buffer: RefCell<Vec<u8>>,
|
||||
multi_line_buffer: RefCell<BString>,
|
||||
}
|
||||
|
||||
impl Searcher {
|
||||
@@ -691,7 +667,7 @@ impl Searcher {
|
||||
|
||||
let mut decode_buffer = self.decode_buffer.borrow_mut();
|
||||
let read_from = self.decode_builder
|
||||
.build_with_buffer(read_from, &mut *decode_buffer)
|
||||
.build_with_buffer(read_from, decode_buffer.as_mut_vec())
|
||||
.map_err(S::Error::error_io)?;
|
||||
|
||||
if self.multi_line_with_matcher(&matcher) {
|
||||
@@ -723,12 +699,13 @@ impl Searcher {
|
||||
where M: Matcher,
|
||||
S: Sink,
|
||||
{
|
||||
let slice = B(slice);
|
||||
self.check_config(&matcher).map_err(S::Error::error_config)?;
|
||||
|
||||
// We can search the slice directly, unless we need to do transcoding.
|
||||
if self.slice_needs_transcoding(slice) {
|
||||
trace!("slice reader: needs transcoding, using generic reader");
|
||||
return self.search_reader(matcher, slice, write_to);
|
||||
return self.search_reader(matcher, slice.as_bytes(), write_to);
|
||||
}
|
||||
if self.multi_line_with_matcher(&matcher) {
|
||||
trace!("slice reader: searching via multiline strategy");
|
||||
@@ -761,9 +738,8 @@ impl Searcher {
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given slice needs to be transcoded.
|
||||
fn slice_needs_transcoding(&self, slice: &[u8]) -> bool {
|
||||
self.config.encoding.is_some()
|
||||
|| (self.config.bom_sniffing && slice_has_utf16_bom(slice))
|
||||
fn slice_needs_transcoding(&self, slice: &BStr) -> bool {
|
||||
self.config.encoding.is_some() || slice_has_utf16_bom(slice)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -877,7 +853,9 @@ impl Searcher {
|
||||
.map(|m| m.len() as usize + 1)
|
||||
.unwrap_or(0);
|
||||
buf.reserve(cap);
|
||||
read_from.read_to_end(&mut *buf).map_err(S::Error::error_io)?;
|
||||
read_from
|
||||
.read_to_end(buf.as_mut_vec())
|
||||
.map_err(S::Error::error_io)?;
|
||||
return Ok(());
|
||||
}
|
||||
self.fill_multi_line_buffer_from_reader::<_, S>(read_from)
|
||||
@@ -894,6 +872,7 @@ impl Searcher {
|
||||
assert!(self.config.multi_line);
|
||||
|
||||
let mut buf = self.multi_line_buffer.borrow_mut();
|
||||
let buf = buf.as_mut_vec();
|
||||
buf.clear();
|
||||
|
||||
// If we don't have a heap limit, then we can defer to std's
|
||||
@@ -945,8 +924,8 @@ impl Searcher {
|
||||
///
|
||||
/// This is used by the searcher to determine if a transcoder is necessary.
|
||||
/// Otherwise, it is advantageous to search the slice directly.
|
||||
fn slice_has_utf16_bom(slice: &[u8]) -> bool {
|
||||
let enc = match encoding_rs::Encoding::for_bom(slice) {
|
||||
fn slice_has_utf16_bom(slice: &BStr) -> bool {
|
||||
let enc = match encoding_rs::Encoding::for_bom(slice.as_bytes()) {
|
||||
None => return false,
|
||||
Some((enc, _)) => enc,
|
||||
};
|
||||
|
@@ -96,7 +96,6 @@ impl Matcher for RegexMatcher {
|
||||
// line.
|
||||
let i = B(haystack)
|
||||
.find_byte(self.line_term.unwrap().as_byte())
|
||||
.map(|i| i)
|
||||
.unwrap_or(haystack.len() - 1);
|
||||
Ok(Some(LineMatchKind::Candidate(i)))
|
||||
} else {
|
||||
|
@@ -21,9 +21,16 @@ grep-regex = { version = "0.1.1", path = "../grep-regex" }
|
||||
grep-searcher = { version = "0.1.1", path = "../grep-searcher" }
|
||||
|
||||
[dev-dependencies]
|
||||
atty = "0.2.11"
|
||||
regex = "1.1"
|
||||
termcolor = "1.0.4"
|
||||
walkdir = "2.2.7"
|
||||
|
||||
[dev-dependencies.clap]
|
||||
version = "2.32.0"
|
||||
default-features = false
|
||||
features = ["suggestions"]
|
||||
|
||||
[features]
|
||||
simd-accel = ["grep-searcher/simd-accel"]
|
||||
pcre2 = ["grep-pcre2"]
|
||||
|
@@ -29,7 +29,7 @@ thread_local = "0.3.6"
|
||||
walkdir = "2.2.7"
|
||||
|
||||
[target.'cfg(windows)'.dependencies.winapi-util]
|
||||
version = "0.1.2"
|
||||
version = "0.1.1"
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.0.5"
|
||||
|
@@ -22,7 +22,6 @@ use gitignore::{self, Gitignore, GitignoreBuilder};
|
||||
use pathutil::{is_hidden, strip_prefix};
|
||||
use overrides::{self, Override};
|
||||
use types::{self, Types};
|
||||
use walk::DirEntry;
|
||||
use {Error, Match, PartialErrorBuilder};
|
||||
|
||||
/// IgnoreMatch represents information about where a match came from when using
|
||||
@@ -74,8 +73,6 @@ struct IgnoreOptions {
|
||||
git_ignore: bool,
|
||||
/// Whether to read .git/info/exclude files.
|
||||
git_exclude: bool,
|
||||
/// Whether to ignore files case insensitively
|
||||
ignore_case_insensitive: bool,
|
||||
}
|
||||
|
||||
/// Ignore is a matcher useful for recursively walking one or more directories.
|
||||
@@ -228,11 +225,7 @@ impl Ignore {
|
||||
Gitignore::empty()
|
||||
} else {
|
||||
let (m, err) =
|
||||
create_gitignore(
|
||||
&dir,
|
||||
&self.0.custom_ignore_filenames,
|
||||
self.0.opts.ignore_case_insensitive,
|
||||
);
|
||||
create_gitignore(&dir, &self.0.custom_ignore_filenames);
|
||||
errs.maybe_push(err);
|
||||
m
|
||||
};
|
||||
@@ -240,12 +233,7 @@ impl Ignore {
|
||||
if !self.0.opts.ignore {
|
||||
Gitignore::empty()
|
||||
} else {
|
||||
let (m, err) =
|
||||
create_gitignore(
|
||||
&dir,
|
||||
&[".ignore"],
|
||||
self.0.opts.ignore_case_insensitive,
|
||||
);
|
||||
let (m, err) = create_gitignore(&dir, &[".ignore"]);
|
||||
errs.maybe_push(err);
|
||||
m
|
||||
};
|
||||
@@ -253,12 +241,7 @@ impl Ignore {
|
||||
if !self.0.opts.git_ignore {
|
||||
Gitignore::empty()
|
||||
} else {
|
||||
let (m, err) =
|
||||
create_gitignore(
|
||||
&dir,
|
||||
&[".gitignore"],
|
||||
self.0.opts.ignore_case_insensitive,
|
||||
);
|
||||
let (m, err) = create_gitignore(&dir, &[".gitignore"]);
|
||||
errs.maybe_push(err);
|
||||
m
|
||||
};
|
||||
@@ -266,12 +249,7 @@ impl Ignore {
|
||||
if !self.0.opts.git_exclude {
|
||||
Gitignore::empty()
|
||||
} else {
|
||||
let (m, err) =
|
||||
create_gitignore(
|
||||
&dir,
|
||||
&[".git/info/exclude"],
|
||||
self.0.opts.ignore_case_insensitive,
|
||||
);
|
||||
let (m, err) = create_gitignore(&dir, &[".git/info/exclude"]);
|
||||
errs.maybe_push(err);
|
||||
m
|
||||
};
|
||||
@@ -307,23 +285,11 @@ impl Ignore {
|
||||
|| has_explicit_ignores
|
||||
}
|
||||
|
||||
/// Like `matched`, but works with a directory entry instead.
|
||||
pub fn matched_dir_entry<'a>(
|
||||
&'a self,
|
||||
dent: &DirEntry,
|
||||
) -> Match<IgnoreMatch<'a>> {
|
||||
let m = self.matched(dent.path(), dent.is_dir());
|
||||
if m.is_none() && self.0.opts.hidden && is_hidden(dent) {
|
||||
return Match::Ignore(IgnoreMatch::hidden());
|
||||
}
|
||||
m
|
||||
}
|
||||
|
||||
/// Returns a match indicating whether the given file path should be
|
||||
/// ignored or not.
|
||||
///
|
||||
/// The match contains information about its origin.
|
||||
fn matched<'a, P: AsRef<Path>>(
|
||||
pub fn matched<'a, P: AsRef<Path>>(
|
||||
&'a self,
|
||||
path: P,
|
||||
is_dir: bool,
|
||||
@@ -364,6 +330,9 @@ impl Ignore {
|
||||
whitelisted = mat;
|
||||
}
|
||||
}
|
||||
if whitelisted.is_none() && self.0.opts.hidden && is_hidden(path) {
|
||||
return Match::Ignore(IgnoreMatch::hidden());
|
||||
}
|
||||
whitelisted
|
||||
}
|
||||
|
||||
@@ -514,7 +483,6 @@ impl IgnoreBuilder {
|
||||
git_global: true,
|
||||
git_ignore: true,
|
||||
git_exclude: true,
|
||||
ignore_case_insensitive: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -528,11 +496,7 @@ impl IgnoreBuilder {
|
||||
if !self.opts.git_global {
|
||||
Gitignore::empty()
|
||||
} else {
|
||||
let mut builder = GitignoreBuilder::new("");
|
||||
builder
|
||||
.case_insensitive(self.opts.ignore_case_insensitive)
|
||||
.unwrap();
|
||||
let (gi, err) = builder.build_global();
|
||||
let (gi, err) = Gitignore::global();
|
||||
if let Some(err) = err {
|
||||
debug!("{}", err);
|
||||
}
|
||||
@@ -663,17 +627,6 @@ impl IgnoreBuilder {
|
||||
self.opts.git_exclude = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Process ignore files case insensitively
|
||||
///
|
||||
/// This is disabled by default.
|
||||
pub fn ignore_case_insensitive(
|
||||
&mut self,
|
||||
yes: bool,
|
||||
) -> &mut IgnoreBuilder {
|
||||
self.opts.ignore_case_insensitive = yes;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new gitignore matcher for the directory given.
|
||||
@@ -685,11 +638,9 @@ impl IgnoreBuilder {
|
||||
pub fn create_gitignore<T: AsRef<OsStr>>(
|
||||
dir: &Path,
|
||||
names: &[T],
|
||||
case_insensitive: bool,
|
||||
) -> (Gitignore, Option<Error>) {
|
||||
let mut builder = GitignoreBuilder::new(dir);
|
||||
let mut errs = PartialErrorBuilder::default();
|
||||
builder.case_insensitive(case_insensitive).unwrap();
|
||||
for name in names {
|
||||
let gipath = dir.join(name.as_ref());
|
||||
errs.maybe_push_ignore_io(builder.add(gipath));
|
||||
@@ -879,7 +830,7 @@ mod tests {
|
||||
#[test]
|
||||
fn errored() {
|
||||
let td = tmpdir("ignore-test-");
|
||||
wfile(td.path().join(".gitignore"), "{foo");
|
||||
wfile(td.path().join(".gitignore"), "f**oo");
|
||||
|
||||
let (_, err) = IgnoreBuilder::new().build().add_child(td.path());
|
||||
assert!(err.is_some());
|
||||
@@ -888,8 +839,8 @@ mod tests {
|
||||
#[test]
|
||||
fn errored_both() {
|
||||
let td = tmpdir("ignore-test-");
|
||||
wfile(td.path().join(".gitignore"), "{foo");
|
||||
wfile(td.path().join(".ignore"), "{bar");
|
||||
wfile(td.path().join(".gitignore"), "f**oo");
|
||||
wfile(td.path().join(".ignore"), "fo**o");
|
||||
|
||||
let (_, err) = IgnoreBuilder::new().build().add_child(td.path());
|
||||
assert_eq!(2, partial(err.expect("an error")).len());
|
||||
@@ -899,7 +850,7 @@ mod tests {
|
||||
fn errored_partial() {
|
||||
let td = tmpdir("ignore-test-");
|
||||
mkdirp(td.path().join(".git"));
|
||||
wfile(td.path().join(".gitignore"), "{foo\nbar");
|
||||
wfile(td.path().join(".gitignore"), "f**oo\nbar");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new().build().add_child(td.path());
|
||||
assert!(err.is_some());
|
||||
@@ -909,7 +860,7 @@ mod tests {
|
||||
#[test]
|
||||
fn errored_partial_and_ignore() {
|
||||
let td = tmpdir("ignore-test-");
|
||||
wfile(td.path().join(".gitignore"), "{foo\nbar");
|
||||
wfile(td.path().join(".gitignore"), "f**oo\nbar");
|
||||
wfile(td.path().join(".ignore"), "!bar");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new().build().add_child(td.path());
|
||||
|
@@ -69,7 +69,8 @@ impl Glob {
|
||||
|
||||
/// Returns true if and only if this glob has a `**/` prefix.
|
||||
fn has_doublestar_prefix(&self) -> bool {
|
||||
self.actual.starts_with("**/") || self.actual == "**"
|
||||
self.actual.starts_with("**/")
|
||||
|| (self.actual == "**" && self.is_only_dir)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -126,7 +127,16 @@ impl Gitignore {
|
||||
/// `$XDG_CONFIG_HOME/git/ignore` is read. If `$XDG_CONFIG_HOME` is not
|
||||
/// set or is empty, then `$HOME/.config/git/ignore` is used instead.
|
||||
pub fn global() -> (Gitignore, Option<Error>) {
|
||||
GitignoreBuilder::new("").build_global()
|
||||
match gitconfig_excludes_path() {
|
||||
None => (Gitignore::empty(), None),
|
||||
Some(path) => {
|
||||
if !path.is_file() {
|
||||
(Gitignore::empty(), None)
|
||||
} else {
|
||||
Gitignore::new(path)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new empty gitignore matcher that never matches anything.
|
||||
@@ -349,36 +359,6 @@ impl GitignoreBuilder {
|
||||
})
|
||||
}
|
||||
|
||||
/// Build a global gitignore matcher using the configuration in this
|
||||
/// builder.
|
||||
///
|
||||
/// This consumes ownership of the builder unlike `build` because it
|
||||
/// must mutate the builder to add the global gitignore globs.
|
||||
///
|
||||
/// Note that this ignores the path given to this builder's constructor
|
||||
/// and instead derives the path automatically from git's global
|
||||
/// configuration.
|
||||
pub fn build_global(mut self) -> (Gitignore, Option<Error>) {
|
||||
match gitconfig_excludes_path() {
|
||||
None => (Gitignore::empty(), None),
|
||||
Some(path) => {
|
||||
if !path.is_file() {
|
||||
(Gitignore::empty(), None)
|
||||
} else {
|
||||
let mut errs = PartialErrorBuilder::default();
|
||||
errs.maybe_push_ignore_io(self.add(path));
|
||||
match self.build() {
|
||||
Ok(gi) => (gi, errs.into_error_option()),
|
||||
Err(err) => {
|
||||
errs.push(err);
|
||||
(Gitignore::empty(), errs.into_error_option())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Add each glob from the file path given.
|
||||
///
|
||||
/// The file given should be formatted as a `gitignore` file.
|
||||
@@ -457,6 +437,7 @@ impl GitignoreBuilder {
|
||||
is_whitelist: false,
|
||||
is_only_dir: false,
|
||||
};
|
||||
let mut literal_separator = false;
|
||||
let mut is_absolute = false;
|
||||
if line.starts_with("\\!") || line.starts_with("\\#") {
|
||||
line = &line[1..];
|
||||
@@ -471,6 +452,7 @@ impl GitignoreBuilder {
|
||||
// then the glob can only match the beginning of a path
|
||||
// (relative to the location of gitignore). We achieve this by
|
||||
// simply banning wildcards from matching /.
|
||||
literal_separator = true;
|
||||
line = &line[1..];
|
||||
is_absolute = true;
|
||||
}
|
||||
@@ -483,11 +465,16 @@ impl GitignoreBuilder {
|
||||
line = &line[..i];
|
||||
}
|
||||
}
|
||||
// If there is a literal slash, then we note that so that globbing
|
||||
// doesn't let wildcards match slashes.
|
||||
glob.actual = line.to_string();
|
||||
// If there is a literal slash, then this is a glob that must match the
|
||||
// entire path name. Otherwise, we should let it match anywhere, so use
|
||||
// a **/ prefix.
|
||||
if !is_absolute && !line.chars().any(|c| c == '/') {
|
||||
if is_absolute || line.chars().any(|c| c == '/') {
|
||||
literal_separator = true;
|
||||
}
|
||||
// If there was a slash, then this is a glob that must match the entire
|
||||
// path name. Otherwise, we should let it match anywhere, so use a **/
|
||||
// prefix.
|
||||
if !literal_separator {
|
||||
// ... but only if we don't already have a **/ prefix.
|
||||
if !glob.has_doublestar_prefix() {
|
||||
glob.actual = format!("**/{}", glob.actual);
|
||||
@@ -501,7 +488,7 @@ impl GitignoreBuilder {
|
||||
}
|
||||
let parsed =
|
||||
GlobBuilder::new(&glob.actual)
|
||||
.literal_separator(true)
|
||||
.literal_separator(literal_separator)
|
||||
.case_insensitive(self.case_insensitive)
|
||||
.backslash_escape(true)
|
||||
.build()
|
||||
@@ -518,16 +505,12 @@ impl GitignoreBuilder {
|
||||
|
||||
/// Toggle whether the globs should be matched case insensitively or not.
|
||||
///
|
||||
/// When this option is changed, only globs added after the change will be
|
||||
/// affected.
|
||||
/// When this option is changed, only globs added after the change will be affected.
|
||||
///
|
||||
/// This is disabled by default.
|
||||
pub fn case_insensitive(
|
||||
&mut self,
|
||||
yes: bool,
|
||||
&mut self, yes: bool
|
||||
) -> Result<&mut GitignoreBuilder, Error> {
|
||||
// TODO: This should not return a `Result`. Fix this in the next semver
|
||||
// release.
|
||||
self.case_insensitive = yes;
|
||||
Ok(self)
|
||||
}
|
||||
@@ -708,9 +691,6 @@ mod tests {
|
||||
ignored!(ig39, ROOT, "\\?", "?");
|
||||
ignored!(ig40, ROOT, "\\*", "*");
|
||||
ignored!(ig41, ROOT, "\\a", "a");
|
||||
ignored!(ig42, ROOT, "s*.rs", "sfoo.rs");
|
||||
ignored!(ig43, ROOT, "**", "foo.rs");
|
||||
ignored!(ig44, ROOT, "**/**/*", "a/foo.rs");
|
||||
|
||||
not_ignored!(ignot1, ROOT, "amonths", "months");
|
||||
not_ignored!(ignot2, ROOT, "monthsa", "months");
|
||||
@@ -732,7 +712,6 @@ mod tests {
|
||||
not_ignored!(ignot16, ROOT, "*\n!**/", "foo", true);
|
||||
not_ignored!(ignot17, ROOT, "src/*.rs", "src/grep/src/main.rs");
|
||||
not_ignored!(ignot18, ROOT, "path1/*", "path2/path1/foo");
|
||||
not_ignored!(ignot19, ROOT, "s*.rs", "src/foo.rs");
|
||||
|
||||
fn bytes(s: &str) -> Vec<u8> {
|
||||
s.to_string().into_bytes()
|
||||
|
@@ -139,16 +139,13 @@ impl OverrideBuilder {
|
||||
}
|
||||
|
||||
/// Toggle whether the globs should be matched case insensitively or not.
|
||||
///
|
||||
///
|
||||
/// When this option is changed, only globs added after the change will be affected.
|
||||
///
|
||||
/// This is disabled by default.
|
||||
pub fn case_insensitive(
|
||||
&mut self,
|
||||
yes: bool,
|
||||
&mut self, yes: bool
|
||||
) -> Result<&mut OverrideBuilder, Error> {
|
||||
// TODO: This should not return a `Result`. Fix this in the next semver
|
||||
// release.
|
||||
self.builder.case_insensitive(yes)?;
|
||||
Ok(self)
|
||||
}
|
||||
|
@@ -1,56 +1,22 @@
|
||||
use std::ffi::OsStr;
|
||||
use std::path::Path;
|
||||
|
||||
use walk::DirEntry;
|
||||
|
||||
/// Returns true if and only if this entry is considered to be hidden.
|
||||
///
|
||||
/// This only returns true if the base name of the path starts with a `.`.
|
||||
///
|
||||
/// On Unix, this implements a more optimized check.
|
||||
/// Returns true if and only if this file path is considered to be hidden.
|
||||
#[cfg(unix)]
|
||||
pub fn is_hidden(dent: &DirEntry) -> bool {
|
||||
pub fn is_hidden<P: AsRef<Path>>(path: P) -> bool {
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
if let Some(name) = file_name(dent.path()) {
|
||||
if let Some(name) = file_name(path.as_ref()) {
|
||||
name.as_bytes().get(0) == Some(&b'.')
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if and only if this entry is considered to be hidden.
|
||||
///
|
||||
/// On Windows, this returns true if one of the following is true:
|
||||
///
|
||||
/// * The base name of the path starts with a `.`.
|
||||
/// * The file attributes have the `HIDDEN` property set.
|
||||
#[cfg(windows)]
|
||||
pub fn is_hidden(dent: &DirEntry) -> bool {
|
||||
use std::os::windows::fs::MetadataExt;
|
||||
use winapi_util::file;
|
||||
|
||||
// This looks like we're doing an extra stat call, but on Windows, the
|
||||
// directory traverser reuses the metadata retrieved from each directory
|
||||
// entry and stores it on the DirEntry itself. So this is "free."
|
||||
if let Ok(md) = dent.metadata() {
|
||||
if file::is_hidden(md.file_attributes() as u64) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if let Some(name) = file_name(dent.path()) {
|
||||
name.to_str().map(|s| s.starts_with(".")).unwrap_or(false)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if and only if this entry is considered to be hidden.
|
||||
///
|
||||
/// This only returns true if the base name of the path starts with a `.`.
|
||||
#[cfg(not(any(unix, windows)))]
|
||||
pub fn is_hidden(dent: &DirEntry) -> bool {
|
||||
if let Some(name) = file_name(dent.path()) {
|
||||
/// Returns true if and only if this file path is considered to be hidden.
|
||||
#[cfg(not(unix))]
|
||||
pub fn is_hidden<P: AsRef<Path>>(path: P) -> bool {
|
||||
if let Some(name) = file_name(path.as_ref()) {
|
||||
name.to_str().map(|s| s.starts_with(".")).unwrap_or(false)
|
||||
} else {
|
||||
false
|
||||
|
@@ -108,9 +108,8 @@ const DEFAULT_TYPES: &'static [(&'static str, &'static [&'static str])] = &[
|
||||
("awk", &["*.awk"]),
|
||||
("bazel", &["*.bzl", "WORKSPACE", "BUILD", "BUILD.bazel"]),
|
||||
("bitbake", &["*.bb", "*.bbappend", "*.bbclass", "*.conf", "*.inc"]),
|
||||
("brotli", &["*.br"]),
|
||||
("buildstream", &["*.bst"]),
|
||||
("bzip2", &["*.bz2", "*.tbz2"]),
|
||||
("bzip2", &["*.bz2"]),
|
||||
("c", &["*.c", "*.h", "*.H", "*.cats"]),
|
||||
("cabal", &["*.cabal"]),
|
||||
("cbor", &["*.cbor"]),
|
||||
@@ -148,7 +147,7 @@ const DEFAULT_TYPES: &'static [(&'static str, &'static [&'static str])] = &[
|
||||
("fsharp", &["*.fs", "*.fsx", "*.fsi"]),
|
||||
("gn", &["*.gn", "*.gni"]),
|
||||
("go", &["*.go"]),
|
||||
("gzip", &["*.gz", "*.tgz"]),
|
||||
("gzip", &["*.gz"]),
|
||||
("groovy", &["*.groovy", "*.gradle"]),
|
||||
("h", &["*.h", "*.hpp"]),
|
||||
("hbs", &["*.hbs"]),
|
||||
@@ -283,7 +282,7 @@ const DEFAULT_TYPES: &'static [(&'static str, &'static [&'static str])] = &[
|
||||
]),
|
||||
("taskpaper", &["*.taskpaper"]),
|
||||
("tcl", &["*.tcl"]),
|
||||
("tex", &["*.tex", "*.ltx", "*.cls", "*.sty", "*.bib", "*.dtx", "*.ins"]),
|
||||
("tex", &["*.tex", "*.ltx", "*.cls", "*.sty", "*.bib"]),
|
||||
("textile", &["*.textile"]),
|
||||
("thrift", &["*.thrift"]),
|
||||
("tf", &["*.tf"]),
|
||||
@@ -300,10 +299,9 @@ const DEFAULT_TYPES: &'static [(&'static str, &'static [&'static str])] = &[
|
||||
("wiki", &["*.mediawiki", "*.wiki"]),
|
||||
("webidl", &["*.idl", "*.webidl", "*.widl"]),
|
||||
("xml", &["*.xml", "*.xml.dist"]),
|
||||
("xz", &["*.xz", "*.txz"]),
|
||||
("xz", &["*.xz"]),
|
||||
("yacc", &["*.y"]),
|
||||
("yaml", &["*.yaml", "*.yml"]),
|
||||
("zig", &["*.zig"]),
|
||||
("zsh", &[
|
||||
".zshenv", "zshenv",
|
||||
".zlogin", "zlogin",
|
||||
@@ -312,7 +310,6 @@ const DEFAULT_TYPES: &'static [(&'static str, &'static [&'static str])] = &[
|
||||
".zshrc", "zshrc",
|
||||
"*.zsh",
|
||||
]),
|
||||
("zstd", &["*.zst", "*.zstd"]),
|
||||
];
|
||||
|
||||
/// Glob represents a single glob in a set of file type definitions.
|
||||
@@ -351,18 +348,6 @@ impl<'a> Glob<'a> {
|
||||
fn unmatched() -> Glob<'a> {
|
||||
Glob(GlobInner::UnmatchedIgnore)
|
||||
}
|
||||
|
||||
/// Return the file type defintion that matched, if one exists. A file type
|
||||
/// definition always exists when a specific definition matches a file
|
||||
/// path.
|
||||
pub fn file_type_def(&self) -> Option<&FileTypeDef> {
|
||||
match self {
|
||||
Glob(GlobInner::UnmatchedIgnore) => None,
|
||||
Glob(GlobInner::Matched { def, .. }) => {
|
||||
Some(def)
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A single file type definition.
|
||||
|
@@ -99,7 +99,7 @@ impl DirEntry {
|
||||
}
|
||||
|
||||
/// Returns true if and only if this entry points to a directory.
|
||||
pub(crate) fn is_dir(&self) -> bool {
|
||||
fn is_dir(&self) -> bool {
|
||||
self.dent.is_dir()
|
||||
}
|
||||
|
||||
@@ -764,14 +764,6 @@ impl WalkBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Process ignore files case insensitively
|
||||
///
|
||||
/// This is disabled by default.
|
||||
pub fn ignore_case_insensitive(&mut self, yes: bool) -> &mut WalkBuilder {
|
||||
self.ig_builder.ignore_case_insensitive(yes);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set a function for sorting directory entries by their path.
|
||||
///
|
||||
/// If a compare function is set, the resulting iterator will return all
|
||||
@@ -883,17 +875,16 @@ impl Walk {
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
if should_skip_entry(&self.ig, ent) {
|
||||
return Ok(true);
|
||||
}
|
||||
if self.max_filesize.is_some() && !ent.is_dir() {
|
||||
return Ok(skip_filesize(
|
||||
self.max_filesize.unwrap(),
|
||||
ent.path(),
|
||||
&ent.metadata().ok(),
|
||||
));
|
||||
}
|
||||
Ok(false)
|
||||
let is_dir = ent.file_type().map_or(false, |ft| ft.is_dir());
|
||||
let max_size = self.max_filesize;
|
||||
let should_skip_path = skip_path(&self.ig, ent.path(), is_dir);
|
||||
let should_skip_filesize = if !is_dir && max_size.is_some() {
|
||||
skip_filesize(max_size.unwrap(), ent.path(), &ent.metadata().ok())
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
Ok(should_skip_path || should_skip_filesize)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1421,11 +1412,13 @@ impl Worker {
|
||||
return WalkState::Continue;
|
||||
}
|
||||
}
|
||||
let should_skip_path = should_skip_entry(ig, &dent);
|
||||
let is_dir = dent.is_dir();
|
||||
let max_size = self.max_filesize;
|
||||
let should_skip_path = skip_path(ig, dent.path(), is_dir);
|
||||
let should_skip_filesize =
|
||||
if self.max_filesize.is_some() && !dent.is_dir() {
|
||||
if !is_dir && max_size.is_some() {
|
||||
skip_filesize(
|
||||
self.max_filesize.unwrap(),
|
||||
max_size.unwrap(),
|
||||
dent.path(),
|
||||
&dent.metadata().ok(),
|
||||
)
|
||||
@@ -1608,16 +1601,17 @@ fn skip_filesize(
|
||||
}
|
||||
}
|
||||
|
||||
fn should_skip_entry(
|
||||
fn skip_path(
|
||||
ig: &Ignore,
|
||||
dent: &DirEntry,
|
||||
path: &Path,
|
||||
is_dir: bool,
|
||||
) -> bool {
|
||||
let m = ig.matched_dir_entry(dent);
|
||||
let m = ig.matched(path, is_dir);
|
||||
if m.is_ignore() {
|
||||
debug!("ignoring {}: {:?}", dent.path().display(), m);
|
||||
debug!("ignoring {}: {:?}", path.display(), m);
|
||||
true
|
||||
} else if m.is_whitelist() {
|
||||
debug!("whitelisting {}: {:?}", dent.path().display(), m);
|
||||
debug!("whitelisting {}: {:?}", path.display(), m);
|
||||
false
|
||||
} else {
|
||||
false
|
||||
|
@@ -1 +0,0 @@
|
||||
disable_all_formatting = true
|
59
src/app.rs
59
src/app.rs
@@ -571,7 +571,6 @@ pub fn all_args_and_flags() -> Vec<RGArg> {
|
||||
flag_iglob(&mut args);
|
||||
flag_ignore_case(&mut args);
|
||||
flag_ignore_file(&mut args);
|
||||
flag_ignore_file_case_insensitive(&mut args);
|
||||
flag_invert_match(&mut args);
|
||||
flag_json(&mut args);
|
||||
flag_line_buffered(&mut args);
|
||||
@@ -586,7 +585,6 @@ pub fn all_args_and_flags() -> Vec<RGArg> {
|
||||
flag_multiline_dotall(&mut args);
|
||||
flag_no_config(&mut args);
|
||||
flag_no_ignore(&mut args);
|
||||
flag_no_ignore_dot(&mut args);
|
||||
flag_no_ignore_global(&mut args);
|
||||
flag_no_ignore_messages(&mut args);
|
||||
flag_no_ignore_parent(&mut args);
|
||||
@@ -982,17 +980,10 @@ fn flag_encoding(args: &mut Vec<RGArg>) {
|
||||
const LONG: &str = long!("\
|
||||
Specify the text encoding that ripgrep will use on all files searched. The
|
||||
default value is 'auto', which will cause ripgrep to do a best effort automatic
|
||||
detection of encoding on a per-file basis. Automatic detection in this case
|
||||
only applies to files that begin with a UTF-8 or UTF-16 byte-order mark (BOM).
|
||||
No other automatic detection is performed. One can also specify 'none' which
|
||||
will then completely disable BOM sniffing and always result in searching the
|
||||
raw bytes, including a BOM if it's present, regardless of its encoding.
|
||||
|
||||
Other supported values can be found in the list of labels here:
|
||||
detection of encoding on a per-file basis. Other supported values can be found
|
||||
in the list of labels here:
|
||||
https://encoding.spec.whatwg.org/#concept-encoding-get
|
||||
|
||||
For more details on encoding and how ripgrep deals with it, see GUIDE.md.
|
||||
|
||||
This flag can be disabled with --no-encoding.
|
||||
");
|
||||
let arg = RGArg::flag("encoding", "ENCODING").short("E")
|
||||
@@ -1218,26 +1209,6 @@ directly on the command line, then used -g instead.
|
||||
args.push(arg);
|
||||
}
|
||||
|
||||
fn flag_ignore_file_case_insensitive(args: &mut Vec<RGArg>) {
|
||||
const SHORT: &str = "Process ignore files case insensitively.";
|
||||
const LONG: &str = long!("\
|
||||
Process ignore files (.gitignore, .ignore, etc.) case insensitively. Note that
|
||||
this comes with a performance penalty and is most useful on case insensitive
|
||||
file systems (such as Windows).
|
||||
|
||||
This flag can be disabled with the --no-ignore-file-case-insensitive flag.
|
||||
");
|
||||
let arg = RGArg::switch("ignore-file-case-insensitive")
|
||||
.help(SHORT).long_help(LONG)
|
||||
.overrides("no-ignore-file-case-insensitive");
|
||||
args.push(arg);
|
||||
|
||||
let arg = RGArg::switch("no-ignore-file-case-insensitive")
|
||||
.hidden()
|
||||
.overrides("ignore-file-case-insensitive");
|
||||
args.push(arg);
|
||||
}
|
||||
|
||||
fn flag_invert_match(args: &mut Vec<RGArg>) {
|
||||
const SHORT: &str = "Invert matching.";
|
||||
const LONG: &str = long!("\
|
||||
@@ -1565,7 +1536,7 @@ fn flag_no_ignore(args: &mut Vec<RGArg>) {
|
||||
const SHORT: &str = "Don't respect ignore files.";
|
||||
const LONG: &str = long!("\
|
||||
Don't respect ignore files (.gitignore, .ignore, etc.). This implies
|
||||
--no-ignore-parent, --no-ignore-dot and --no-ignore-vcs.
|
||||
--no-ignore-parent and --no-ignore-vcs.
|
||||
|
||||
This flag can be disabled with the --ignore flag.
|
||||
");
|
||||
@@ -1580,24 +1551,6 @@ This flag can be disabled with the --ignore flag.
|
||||
args.push(arg);
|
||||
}
|
||||
|
||||
fn flag_no_ignore_dot(args: &mut Vec<RGArg>) {
|
||||
const SHORT: &str = "Don't respect .ignore files.";
|
||||
const LONG: &str = long!("\
|
||||
Don't respect .ignore files.
|
||||
|
||||
This flag can be disabled with the --ignore-dot flag.
|
||||
");
|
||||
let arg = RGArg::switch("no-ignore-dot")
|
||||
.help(SHORT).long_help(LONG)
|
||||
.overrides("ignore-dot");
|
||||
args.push(arg);
|
||||
|
||||
let arg = RGArg::switch("ignore-dot")
|
||||
.hidden()
|
||||
.overrides("no-ignore-dot");
|
||||
args.push(arg);
|
||||
}
|
||||
|
||||
fn flag_no_ignore_global(args: &mut Vec<RGArg>) {
|
||||
const SHORT: &str = "Don't respect global ignore files.";
|
||||
const LONG: &str = long!("\
|
||||
@@ -2046,9 +1999,9 @@ This flag can be used with the -o/--only-matching flag.
|
||||
fn flag_search_zip(args: &mut Vec<RGArg>) {
|
||||
const SHORT: &str = "Search in compressed files.";
|
||||
const LONG: &str = long!("\
|
||||
Search in compressed files. Currently gzip, bzip2, xz, LZ4, LZMA, Brotli and
|
||||
Zstd files are supported. This option expects the decompression binaries to be
|
||||
available in your PATH.
|
||||
Search in compressed files. Currently gz, bz2, xz, lzma and lz4 files are
|
||||
supported. This option expects the decompression binaries to be available in
|
||||
your PATH.
|
||||
|
||||
This flag can be disabled with --no-search-zip.
|
||||
");
|
||||
|
168
src/args.rs
168
src/args.rs
@@ -1,10 +1,9 @@
|
||||
use std::cmp;
|
||||
use std::env;
|
||||
use std::ffi::{OsStr, OsString};
|
||||
use std::ffi::OsStr;
|
||||
use std::fs;
|
||||
use std::io::{self, Write};
|
||||
use std::io;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process;
|
||||
use std::sync::Arc;
|
||||
use std::time::SystemTime;
|
||||
|
||||
@@ -131,7 +130,7 @@ impl Args {
|
||||
// trying to parse config files. If a config file exists and has
|
||||
// arguments, then we re-parse argv, otherwise we just use the matches
|
||||
// we have here.
|
||||
let early_matches = ArgMatches::new(clap_matches(env::args_os())?);
|
||||
let early_matches = ArgMatches::new(app::app().get_matches());
|
||||
set_messages(!early_matches.is_present("no-messages"));
|
||||
set_ignore_messages(!early_matches.is_present("no-ignore-messages"));
|
||||
|
||||
@@ -146,7 +145,7 @@ impl Args {
|
||||
log::set_max_level(log::LevelFilter::Warn);
|
||||
}
|
||||
|
||||
let matches = early_matches.reconfigure()?;
|
||||
let matches = early_matches.reconfigure();
|
||||
// The logging level may have changed if we brought in additional
|
||||
// arguments from a configuration file, so recheck it and set the log
|
||||
// level as appropriate.
|
||||
@@ -268,11 +267,6 @@ impl Args {
|
||||
Ok(builder.build(wtr))
|
||||
}
|
||||
|
||||
/// Returns true if and only if ripgrep should be "quiet."
|
||||
pub fn quiet(&self) -> bool {
|
||||
self.matches().is_present("quiet")
|
||||
}
|
||||
|
||||
/// Returns true if and only if the search should quit after finding the
|
||||
/// first match.
|
||||
pub fn quit_after_match(&self) -> Result<bool> {
|
||||
@@ -483,37 +477,6 @@ impl SortByKind {
|
||||
}
|
||||
}
|
||||
|
||||
/// Encoding mode the searcher will use.
|
||||
#[derive(Clone, Debug)]
|
||||
enum EncodingMode {
|
||||
/// Use an explicit encoding forcefully, but let BOM sniffing override it.
|
||||
Some(Encoding),
|
||||
/// Use only BOM sniffing to auto-detect an encoding.
|
||||
Auto,
|
||||
/// Use no explicit encoding and disable all BOM sniffing. This will
|
||||
/// always result in searching the raw bytes, regardless of their
|
||||
/// true encoding.
|
||||
Disabled,
|
||||
}
|
||||
|
||||
impl EncodingMode {
|
||||
/// Checks if an explicit encoding has been set. Returns false for
|
||||
/// automatic BOM sniffing and no sniffing.
|
||||
///
|
||||
/// This is only used to determine whether PCRE2 needs to have its own
|
||||
/// UTF-8 checking enabled. If we have an explicit encoding set, then
|
||||
/// we're always guaranteed to get UTF-8, so we can disable PCRE2's check.
|
||||
/// Otherwise, we have no such guarantee, and must enable PCRE2' UTF-8
|
||||
/// check.
|
||||
#[cfg(feature = "pcre2")]
|
||||
fn has_explicit_encoding(&self) -> bool {
|
||||
match self {
|
||||
EncodingMode::Some(_) => true,
|
||||
_ => false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ArgMatches {
|
||||
/// Create an ArgMatches from clap's parse result.
|
||||
fn new(clap_matches: clap::ArgMatches<'static>) -> ArgMatches {
|
||||
@@ -527,19 +490,19 @@ impl ArgMatches {
|
||||
///
|
||||
/// If there are no additional arguments from the environment (e.g., a
|
||||
/// config file), then the given matches are returned as is.
|
||||
fn reconfigure(self) -> Result<ArgMatches> {
|
||||
fn reconfigure(self) -> ArgMatches {
|
||||
// If the end user says no config, then respect it.
|
||||
if self.is_present("no-config") {
|
||||
log::debug!(
|
||||
"not reading config files because --no-config is present"
|
||||
);
|
||||
return Ok(self);
|
||||
return self;
|
||||
}
|
||||
// If the user wants ripgrep to use a config file, then parse args
|
||||
// from that first.
|
||||
let mut args = config::args();
|
||||
if args.is_empty() {
|
||||
return Ok(self);
|
||||
return self;
|
||||
}
|
||||
let mut cliargs = env::args_os();
|
||||
if let Some(bin) = cliargs.next() {
|
||||
@@ -547,7 +510,7 @@ impl ArgMatches {
|
||||
}
|
||||
args.extend(cliargs);
|
||||
log::debug!("final argv: {:?}", args);
|
||||
Ok(ArgMatches(clap_matches(args)?))
|
||||
ArgMatches::new(app::app().get_matches_from(args))
|
||||
}
|
||||
|
||||
/// Convert the result of parsing CLI arguments into ripgrep's higher level
|
||||
@@ -681,7 +644,7 @@ impl ArgMatches {
|
||||
}
|
||||
if self.pcre2_unicode() {
|
||||
builder.utf(true).ucp(true);
|
||||
if self.encoding()?.has_explicit_encoding() {
|
||||
if self.encoding()?.is_some() {
|
||||
// SAFETY: If an encoding was specified, then we're guaranteed
|
||||
// to get valid UTF-8, so we can disable PCRE2's UTF checking.
|
||||
// (Feeding invalid UTF-8 to PCRE2 is undefined behavior.)
|
||||
@@ -797,16 +760,8 @@ impl ArgMatches {
|
||||
.after_context(ctx_after)
|
||||
.passthru(self.is_present("passthru"))
|
||||
.memory_map(self.mmap_choice(paths))
|
||||
.binary_detection(self.binary_detection());
|
||||
match self.encoding()? {
|
||||
EncodingMode::Some(enc) => {
|
||||
builder.encoding(Some(enc));
|
||||
}
|
||||
EncodingMode::Auto => {} // default for the searcher
|
||||
EncodingMode::Disabled => {
|
||||
builder.bom_sniffing(false);
|
||||
}
|
||||
}
|
||||
.binary_detection(self.binary_detection())
|
||||
.encoding(self.encoding()?);
|
||||
Ok(builder.build())
|
||||
}
|
||||
|
||||
@@ -831,16 +786,18 @@ impl ArgMatches {
|
||||
.max_filesize(self.max_file_size()?)
|
||||
.threads(self.threads()?)
|
||||
.same_file_system(self.is_present("one-file-system"))
|
||||
.skip_stdout(!self.is_present("files"))
|
||||
.skip_stdout(true)
|
||||
.overrides(self.overrides()?)
|
||||
.types(self.types()?)
|
||||
.hidden(!self.hidden())
|
||||
.parents(!self.no_ignore_parent())
|
||||
.ignore(!self.no_ignore_dot())
|
||||
.git_global(!self.no_ignore_vcs() && !self.no_ignore_global())
|
||||
.git_ignore(!self.no_ignore_vcs())
|
||||
.git_exclude(!self.no_ignore_vcs())
|
||||
.ignore_case_insensitive(self.ignore_file_case_insensitive());
|
||||
.ignore(!self.no_ignore())
|
||||
.git_global(
|
||||
!self.no_ignore()
|
||||
&& !self.no_ignore_vcs()
|
||||
&& !self.no_ignore_global())
|
||||
.git_ignore(!self.no_ignore() && !self.no_ignore_vcs())
|
||||
.git_exclude(!self.no_ignore() && !self.no_ignore_vcs());
|
||||
if !self.no_ignore() {
|
||||
builder.add_custom_ignore_filename(".rgignore");
|
||||
}
|
||||
@@ -991,30 +948,24 @@ impl ArgMatches {
|
||||
u64_to_usize("dfa-size-limit", r)
|
||||
}
|
||||
|
||||
/// Returns the encoding mode to use.
|
||||
/// Returns the type of encoding to use.
|
||||
///
|
||||
/// This only returns an encoding if one is explicitly specified. Otherwise
|
||||
/// if set to automatic, the Searcher will do BOM sniffing for UTF-16
|
||||
/// and transcode seamlessly. If disabled, no BOM sniffing nor transcoding
|
||||
/// will occur.
|
||||
fn encoding(&self) -> Result<EncodingMode> {
|
||||
/// This only returns an encoding if one is explicitly specified. When no
|
||||
/// encoding is present, the Searcher will still do BOM sniffing for UTF-16
|
||||
/// and transcode seamlessly.
|
||||
fn encoding(&self) -> Result<Option<Encoding>> {
|
||||
if self.is_present("no-encoding") {
|
||||
return Ok(EncodingMode::Auto);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let label = match self.value_of_lossy("encoding") {
|
||||
None if self.pcre2_unicode() => "utf-8".to_string(),
|
||||
None => return Ok(EncodingMode::Auto),
|
||||
None => return Ok(None),
|
||||
Some(label) => label,
|
||||
};
|
||||
|
||||
if label == "auto" {
|
||||
return Ok(EncodingMode::Auto);
|
||||
} else if label == "none" {
|
||||
return Ok(EncodingMode::Disabled);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
Ok(EncodingMode::Some(Encoding::new(&label)?))
|
||||
Ok(Some(Encoding::new(&label)?))
|
||||
}
|
||||
|
||||
/// Return the file separator to use based on the CLI configuration.
|
||||
@@ -1052,11 +1003,6 @@ impl ArgMatches {
|
||||
self.is_present("hidden") || self.unrestricted_count() >= 2
|
||||
}
|
||||
|
||||
/// Returns true if ignore files should be processed case insensitively.
|
||||
fn ignore_file_case_insensitive(&self) -> bool {
|
||||
self.is_present("ignore-file-case-insensitive")
|
||||
}
|
||||
|
||||
/// Return all of the ignore file paths given on the command line.
|
||||
fn ignore_paths(&self) -> Vec<PathBuf> {
|
||||
let paths = match self.values_of_os("ignore-file") {
|
||||
@@ -1151,11 +1097,6 @@ impl ArgMatches {
|
||||
self.is_present("no-ignore") || self.unrestricted_count() >= 1
|
||||
}
|
||||
|
||||
/// Returns true if .ignore files should be ignored.
|
||||
fn no_ignore_dot(&self) -> bool {
|
||||
self.is_present("no-ignore-dot") || self.no_ignore()
|
||||
}
|
||||
|
||||
/// Returns true if global ignore files should be ignored.
|
||||
fn no_ignore_global(&self) -> bool {
|
||||
self.is_present("no-ignore-global") || self.no_ignore()
|
||||
@@ -1202,7 +1143,7 @@ impl ArgMatches {
|
||||
builder.add(&glob)?;
|
||||
}
|
||||
// This only enables case insensitivity for subsequent globs.
|
||||
builder.case_insensitive(true).unwrap();
|
||||
builder.case_insensitive(true)?;
|
||||
for glob in self.values_of_lossy_vec("iglob") {
|
||||
builder.add(&glob)?;
|
||||
}
|
||||
@@ -1316,15 +1257,9 @@ impl ArgMatches {
|
||||
if let Some(paths) = self.values_of_os("file") {
|
||||
for path in paths {
|
||||
if path == "-" {
|
||||
pats.extend(cli::patterns_from_stdin()?
|
||||
.into_iter()
|
||||
.map(|p| self.pattern_from_string(p))
|
||||
);
|
||||
pats.extend(cli::patterns_from_stdin()?);
|
||||
} else {
|
||||
pats.extend(cli::patterns_from_path(path)?
|
||||
.into_iter()
|
||||
.map(|p| self.pattern_from_string(p))
|
||||
);
|
||||
pats.extend(cli::patterns_from_path(path)?);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1353,17 +1288,13 @@ impl ArgMatches {
|
||||
/// Converts a &str pattern to a String pattern. The pattern is escaped
|
||||
/// if -F/--fixed-strings is set.
|
||||
fn pattern_from_str(&self, pat: &str) -> String {
|
||||
self.pattern_from_string(pat.to_string())
|
||||
}
|
||||
let litpat = self.pattern_literal(pat.to_string());
|
||||
let s = self.pattern_line(litpat);
|
||||
|
||||
/// Applies additional processing on the given pattern if necessary
|
||||
/// (such as escaping meta characters or turning it into a line regex).
|
||||
fn pattern_from_string(&self, pat: String) -> String {
|
||||
let pat = self.pattern_line(self.pattern_literal(pat));
|
||||
if pat.is_empty() {
|
||||
if s.is_empty() {
|
||||
self.pattern_empty()
|
||||
} else {
|
||||
pat
|
||||
s
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1679,32 +1610,3 @@ where G: Fn(&fs::Metadata) -> io::Result<SystemTime>
|
||||
t1.cmp(&t2)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a clap matches object if the given arguments parse successfully.
|
||||
///
|
||||
/// Otherwise, if an error occurred, then it is returned unless the error
|
||||
/// corresponds to a `--help` or `--version` request. In which case, the
|
||||
/// corresponding output is printed and the current process is exited
|
||||
/// successfully.
|
||||
fn clap_matches<I, T>(
|
||||
args: I,
|
||||
) -> Result<clap::ArgMatches<'static>>
|
||||
where I: IntoIterator<Item=T>,
|
||||
T: Into<OsString> + Clone
|
||||
{
|
||||
let err = match app::app().get_matches_from_safe(args) {
|
||||
Ok(matches) => return Ok(matches),
|
||||
Err(err) => err,
|
||||
};
|
||||
if err.use_stderr() {
|
||||
return Err(err.into());
|
||||
}
|
||||
// Explicitly ignore any error returned by writeln!. The most likely error
|
||||
// at this point is a broken pipe error, in which case, we want to ignore
|
||||
// it and exit quietly.
|
||||
//
|
||||
// (This is the point of this helper function. clap's functionality for
|
||||
// doing this will panic on a broken pipe error.)
|
||||
let _ = writeln!(io::stdout(), "{}", err);
|
||||
process::exit(0);
|
||||
}
|
||||
|
@@ -5,11 +5,10 @@
|
||||
use std::env;
|
||||
use std::error::Error;
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::io::{self, BufRead};
|
||||
use std::ffi::OsString;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use bstr::io::BufReadExt;
|
||||
use log;
|
||||
|
||||
use crate::Result;
|
||||
@@ -77,29 +76,62 @@ fn parse<P: AsRef<Path>>(
|
||||
fn parse_reader<R: io::Read>(
|
||||
rdr: R,
|
||||
) -> Result<(Vec<OsString>, Vec<Box<Error>>)> {
|
||||
let bufrdr = io::BufReader::new(rdr);
|
||||
let mut bufrdr = io::BufReader::new(rdr);
|
||||
let (mut args, mut errs) = (vec![], vec![]);
|
||||
let mut line = vec![];
|
||||
let mut line_number = 0;
|
||||
bufrdr.for_byte_line_with_terminator(|line| {
|
||||
while {
|
||||
line.clear();
|
||||
line_number += 1;
|
||||
|
||||
let line = line.trim();
|
||||
bufrdr.read_until(b'\n', &mut line)? > 0
|
||||
} {
|
||||
trim(&mut line);
|
||||
if line.is_empty() || line[0] == b'#' {
|
||||
return Ok(true);
|
||||
continue;
|
||||
}
|
||||
match line.to_os_str() {
|
||||
match bytes_to_os_string(&line) {
|
||||
Ok(osstr) => {
|
||||
args.push(osstr.to_os_string());
|
||||
args.push(osstr);
|
||||
}
|
||||
Err(err) => {
|
||||
errs.push(format!("{}: {}", line_number, err).into());
|
||||
}
|
||||
}
|
||||
Ok(true)
|
||||
})?;
|
||||
}
|
||||
Ok((args, errs))
|
||||
}
|
||||
|
||||
/// Trim the given bytes of whitespace according to the ASCII definition.
|
||||
fn trim(x: &mut Vec<u8>) {
|
||||
let upto = x.iter().take_while(|b| is_space(**b)).count();
|
||||
x.drain(..upto);
|
||||
let revto = x.len() - x.iter().rev().take_while(|b| is_space(**b)).count();
|
||||
x.drain(revto..);
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given byte is an ASCII space character.
|
||||
fn is_space(b: u8) -> bool {
|
||||
b == b'\t'
|
||||
|| b == b'\n'
|
||||
|| b == b'\x0B'
|
||||
|| b == b'\x0C'
|
||||
|| b == b'\r'
|
||||
|| b == b' '
|
||||
}
|
||||
|
||||
/// On Unix, get an OsString from raw bytes.
|
||||
#[cfg(unix)]
|
||||
fn bytes_to_os_string(bytes: &[u8]) -> Result<OsString> {
|
||||
use std::os::unix::ffi::OsStringExt;
|
||||
Ok(OsString::from_vec(bytes.to_vec()))
|
||||
}
|
||||
|
||||
/// On non-Unix (like Windows), require UTF-8.
|
||||
#[cfg(not(unix))]
|
||||
fn bytes_to_os_string(bytes: &[u8]) -> Result<OsString> {
|
||||
String::from_utf8(bytes.to_vec()).map(OsString::from).map_err(From::from)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::ffi::OsString;
|
||||
|
50
src/main.rs
50
src/main.rs
@@ -22,37 +22,33 @@ mod subject;
|
||||
type Result<T> = ::std::result::Result<T, Box<::std::error::Error>>;
|
||||
|
||||
fn main() {
|
||||
if let Err(err) = Args::parse().and_then(try_main) {
|
||||
eprintln!("{}", err);
|
||||
process::exit(2);
|
||||
match Args::parse().and_then(try_main) {
|
||||
Ok(true) => process::exit(0),
|
||||
Ok(false) => process::exit(1),
|
||||
Err(err) => {
|
||||
eprintln!("{}", err);
|
||||
process::exit(2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn try_main(args: Args) -> Result<()> {
|
||||
fn try_main(args: Args) -> Result<bool> {
|
||||
use args::Command::*;
|
||||
|
||||
let matched =
|
||||
match args.command()? {
|
||||
Search => search(&args),
|
||||
SearchParallel => search_parallel(&args),
|
||||
SearchNever => Ok(false),
|
||||
Files => files(&args),
|
||||
FilesParallel => files_parallel(&args),
|
||||
Types => types(&args),
|
||||
}?;
|
||||
if matched && (args.quiet() || !messages::errored()) {
|
||||
process::exit(0)
|
||||
} else if messages::errored() {
|
||||
process::exit(2)
|
||||
} else {
|
||||
process::exit(1)
|
||||
match args.command()? {
|
||||
Search => search(args),
|
||||
SearchParallel => search_parallel(args),
|
||||
SearchNever => Ok(false),
|
||||
Files => files(args),
|
||||
FilesParallel => files_parallel(args),
|
||||
Types => types(args),
|
||||
}
|
||||
}
|
||||
|
||||
/// The top-level entry point for single-threaded search. This recursively
|
||||
/// steps through the file list (current directory by default) and searches
|
||||
/// each file sequentially.
|
||||
fn search(args: &Args) -> Result<bool> {
|
||||
fn search(args: Args) -> Result<bool> {
|
||||
let started_at = Instant::now();
|
||||
let quit_after_match = args.quit_after_match()?;
|
||||
let subject_builder = args.subject_builder();
|
||||
@@ -72,7 +68,7 @@ fn search(args: &Args) -> Result<bool> {
|
||||
if err.kind() == io::ErrorKind::BrokenPipe {
|
||||
break;
|
||||
}
|
||||
err_message!("{}: {}", subject.path().display(), err);
|
||||
message!("{}: {}", subject.path().display(), err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
@@ -95,7 +91,7 @@ fn search(args: &Args) -> Result<bool> {
|
||||
/// The top-level entry point for multi-threaded search. The parallelism is
|
||||
/// itself achieved by the recursive directory traversal. All we need to do is
|
||||
/// feed it a worker for performing a search on each file.
|
||||
fn search_parallel(args: &Args) -> Result<bool> {
|
||||
fn search_parallel(args: Args) -> Result<bool> {
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::Ordering::SeqCst;
|
||||
|
||||
@@ -131,7 +127,7 @@ fn search_parallel(args: &Args) -> Result<bool> {
|
||||
let search_result = match searcher.search(&subject) {
|
||||
Ok(search_result) => search_result,
|
||||
Err(err) => {
|
||||
err_message!("{}: {}", subject.path().display(), err);
|
||||
message!("{}: {}", subject.path().display(), err);
|
||||
return WalkState::Continue;
|
||||
}
|
||||
};
|
||||
@@ -148,7 +144,7 @@ fn search_parallel(args: &Args) -> Result<bool> {
|
||||
return WalkState::Quit;
|
||||
}
|
||||
// Otherwise, we continue on our merry way.
|
||||
err_message!("{}: {}", subject.path().display(), err);
|
||||
message!("{}: {}", subject.path().display(), err);
|
||||
}
|
||||
if matched.load(SeqCst) && quit_after_match {
|
||||
WalkState::Quit
|
||||
@@ -173,7 +169,7 @@ fn search_parallel(args: &Args) -> Result<bool> {
|
||||
/// The top-level entry point for listing files without searching them. This
|
||||
/// recursively steps through the file list (current directory by default) and
|
||||
/// prints each path sequentially using a single thread.
|
||||
fn files(args: &Args) -> Result<bool> {
|
||||
fn files(args: Args) -> Result<bool> {
|
||||
let quit_after_match = args.quit_after_match()?;
|
||||
let subject_builder = args.subject_builder();
|
||||
let mut matched = false;
|
||||
@@ -203,7 +199,7 @@ fn files(args: &Args) -> Result<bool> {
|
||||
/// The top-level entry point for listing files without searching them. This
|
||||
/// recursively steps through the file list (current directory by default) and
|
||||
/// prints each path sequentially using multiple threads.
|
||||
fn files_parallel(args: &Args) -> Result<bool> {
|
||||
fn files_parallel(args: Args) -> Result<bool> {
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::Ordering::SeqCst;
|
||||
use std::sync::mpsc;
|
||||
@@ -255,7 +251,7 @@ fn files_parallel(args: &Args) -> Result<bool> {
|
||||
}
|
||||
|
||||
/// The top-level entry point for --type-list.
|
||||
fn types(args: &Args) -> Result<bool> {
|
||||
fn types(args: Args) -> Result<bool> {
|
||||
let mut count = 0;
|
||||
let mut stdout = args.stdout();
|
||||
for def in args.type_defs()? {
|
||||
|
@@ -1,10 +1,8 @@
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::atomic::{ATOMIC_BOOL_INIT, AtomicBool, Ordering};
|
||||
|
||||
static MESSAGES: AtomicBool = AtomicBool::new(false);
|
||||
static IGNORE_MESSAGES: AtomicBool = AtomicBool::new(false);
|
||||
static ERRORED: AtomicBool = AtomicBool::new(false);
|
||||
static MESSAGES: AtomicBool = ATOMIC_BOOL_INIT;
|
||||
static IGNORE_MESSAGES: AtomicBool = ATOMIC_BOOL_INIT;
|
||||
|
||||
/// Emit a non-fatal error message, unless messages were disabled.
|
||||
#[macro_export]
|
||||
macro_rules! message {
|
||||
($($tt:tt)*) => {
|
||||
@@ -14,18 +12,6 @@ macro_rules! message {
|
||||
}
|
||||
}
|
||||
|
||||
/// Like message, but sets ripgrep's "errored" flag, which controls the exit
|
||||
/// status.
|
||||
#[macro_export]
|
||||
macro_rules! err_message {
|
||||
($($tt:tt)*) => {
|
||||
crate::messages::set_errored();
|
||||
message!($($tt)*);
|
||||
}
|
||||
}
|
||||
|
||||
/// Emit a non-fatal ignore-related error message (like a parse error), unless
|
||||
/// ignore-messages were disabled.
|
||||
#[macro_export]
|
||||
macro_rules! ignore_message {
|
||||
($($tt:tt)*) => {
|
||||
@@ -62,13 +48,3 @@ pub fn ignore_messages() -> bool {
|
||||
pub fn set_ignore_messages(yes: bool) {
|
||||
IGNORE_MESSAGES.store(yes, Ordering::SeqCst)
|
||||
}
|
||||
|
||||
/// Returns true if and only if ripgrep came across a non-fatal error.
|
||||
pub fn errored() -> bool {
|
||||
ERRORED.load(Ordering::SeqCst)
|
||||
}
|
||||
|
||||
/// Indicate that ripgrep has come across a non-fatal error.
|
||||
pub fn set_errored() {
|
||||
ERRORED.store(true, Ordering::SeqCst);
|
||||
}
|
||||
|
@@ -41,7 +41,7 @@ impl SubjectBuilder {
|
||||
match result {
|
||||
Ok(dent) => self.build(dent),
|
||||
Err(err) => {
|
||||
err_message!("{}", err);
|
||||
message!("{}", err);
|
||||
None
|
||||
}
|
||||
}
|
||||
@@ -127,19 +127,9 @@ impl Subject {
|
||||
self.dent.is_stdin()
|
||||
}
|
||||
|
||||
/// Returns true if and only if this subject points to a directory after
|
||||
/// following symbolic links.
|
||||
/// Returns true if and only if this subject points to a directory.
|
||||
fn is_dir(&self) -> bool {
|
||||
let ft = match self.dent.file_type() {
|
||||
None => return false,
|
||||
Some(ft) => ft,
|
||||
};
|
||||
if ft.is_dir() {
|
||||
return true;
|
||||
}
|
||||
// If this is a symlink, then we want to follow it to determine
|
||||
// whether it's a directory or not.
|
||||
self.dent.path_is_symlink() && self.dent.path().is_dir()
|
||||
self.dent.file_type().map_or(false, |ft| ft.is_dir())
|
||||
}
|
||||
|
||||
/// Returns true if and only if this subject points to a file.
|
||||
|
@@ -1,2 +0,0 @@
|
||||
n<01><><EFBFBD>-_<>.<2E> <0C><><11><>cM<63><4D><EFBFBD><EFBFBD>Y<EFBFBD>4<08><><EFBFBD><EFBFBD><EFBFBD>Ya<0B>-L<>O(<28>8<EFBFBD>sn^Gwш!,<2C>
|
||||
KD<EFBFBD><EFBFBD>/7<><37>th<74><1A><0C><10>]j<02><><EFBFBD><EFBFBD><EFBFBD>E_;d<1E>rF<72>Qs<51>/:DIVB}<7D>T7<54><37>ѵ<04><16>H<EFBFBD>2<EFBFBD><32><EFBFBD>)<29><>M[u<><75><EFBFBD>i<EFBFBD><69><EFBFBD><0F>50ڮ<30>Y6<><36><EFBFBD><EFBFBD><17><><EFBFBD><EFBFBD><07>%<25>ר_<D7A8><5F>U by<62>4<EFBFBD><34>Ϡ<EFBFBD>!&<26>g<><15>#<23>
|
Binary file not shown.
@@ -629,51 +629,3 @@ rgtest!(f993_null_data, |dir: Dir, mut cmd: TestCommand| {
|
||||
let expected = "foo\x00bar\x00baz\x00";
|
||||
eqnice!(expected, cmd.stdout());
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/1138
|
||||
rgtest!(f1138_no_ignore_dot, |dir: Dir, mut cmd: TestCommand| {
|
||||
dir.create_dir(".git");
|
||||
dir.create(".gitignore", "foo");
|
||||
dir.create(".ignore", "bar");
|
||||
dir.create(".fzf-ignore", "quux");
|
||||
dir.create("foo", "");
|
||||
dir.create("bar", "");
|
||||
dir.create("quux", "");
|
||||
|
||||
cmd.arg("--sort").arg("path").arg("--files");
|
||||
eqnice!("quux\n", cmd.stdout());
|
||||
eqnice!("bar\nquux\n", cmd.arg("--no-ignore-dot").stdout());
|
||||
eqnice!("bar\n", cmd.arg("--ignore-file").arg(".fzf-ignore").stdout());
|
||||
});
|
||||
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/1207
|
||||
//
|
||||
// Tests if without encoding 'none' flag null bytes are consumed by automatic
|
||||
// encoding detection.
|
||||
rgtest!(f1207_auto_encoding, |dir: Dir, mut cmd: TestCommand| {
|
||||
dir.create_bytes(
|
||||
"foo",
|
||||
b"\xFF\xFE\x00\x62"
|
||||
);
|
||||
cmd.arg("-a").arg("\\x00").arg("foo");
|
||||
cmd.assert_exit_code(1);
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/1207
|
||||
//
|
||||
// Tests if encoding 'none' flag does treat file as raw bytes
|
||||
rgtest!(f1207_ignore_encoding, |dir: Dir, mut cmd: TestCommand| {
|
||||
// PCRE2 chokes on this test because it can't search invalid non-UTF-8
|
||||
// and the point of this test is to search raw UTF-16.
|
||||
if dir.is_pcre2() {
|
||||
return;
|
||||
}
|
||||
|
||||
dir.create_bytes(
|
||||
"foo",
|
||||
b"\xFF\xFE\x00\x62"
|
||||
);
|
||||
cmd.arg("--encoding").arg("none").arg("-a").arg("\\x00").arg("foo");
|
||||
eqnice!("\u{FFFD}\u{FFFD}\x00b\n", cmd.stdout());
|
||||
});
|
||||
|
@@ -153,10 +153,7 @@ rgtest!(basic, |dir: Dir, mut cmd: TestCommand| {
|
||||
msgs[1].unwrap_context(),
|
||||
Context {
|
||||
path: Some(Data::text("sherlock")),
|
||||
lines: Data::text(
|
||||
"Holmeses, success in the province of \
|
||||
detective work must always\n",
|
||||
),
|
||||
lines: Data::text("Holmeses, success in the province of detective work must always\n"),
|
||||
line_number: Some(2),
|
||||
absolute_offset: 65,
|
||||
submatches: vec![],
|
||||
@@ -166,10 +163,7 @@ rgtest!(basic, |dir: Dir, mut cmd: TestCommand| {
|
||||
msgs[2].unwrap_match(),
|
||||
Match {
|
||||
path: Some(Data::text("sherlock")),
|
||||
lines: Data::text(
|
||||
"be, to a very large extent, the result of luck. \
|
||||
Sherlock Holmes\n",
|
||||
),
|
||||
lines: Data::text("be, to a very large extent, the result of luck. Sherlock Holmes\n"),
|
||||
line_number: Some(3),
|
||||
absolute_offset: 129,
|
||||
submatches: vec![
|
||||
@@ -218,9 +212,7 @@ rgtest!(notutf8, |dir: Dir, mut cmd: TestCommand| {
|
||||
let contents = &b"quux\xFFbaz"[..];
|
||||
|
||||
// APFS does not support creating files with invalid UTF-8 bytes, so just
|
||||
// skip the test if we can't create our file. Presumably we don't need this
|
||||
// check if we're already skipping it on macOS, but maybe other file
|
||||
// systems won't like this test either?
|
||||
// skip the test if we can't create our file.
|
||||
if !dir.try_create_bytes(OsStr::from_bytes(name), contents).is_ok() {
|
||||
return;
|
||||
}
|
||||
@@ -313,52 +305,3 @@ rgtest!(crlf, |dir: Dir, mut cmd: TestCommand| {
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/1095
|
||||
//
|
||||
// This test checks that we don't drop the \r\n in a matching line when --crlf
|
||||
// mode is enabled.
|
||||
rgtest!(r1095_missing_crlf, |dir: Dir, mut cmd: TestCommand| {
|
||||
dir.create("foo", "test\r\n");
|
||||
|
||||
// Check without --crlf flag.
|
||||
let msgs = json_decode(&cmd.arg("--json").arg("test").stdout());
|
||||
assert_eq!(msgs.len(), 4);
|
||||
assert_eq!(msgs[1].unwrap_match().lines, Data::text("test\r\n"));
|
||||
|
||||
// Now check with --crlf flag.
|
||||
let msgs = json_decode(&cmd.arg("--crlf").stdout());
|
||||
assert_eq!(msgs.len(), 4);
|
||||
assert_eq!(msgs[1].unwrap_match().lines, Data::text("test\r\n"));
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/1095
|
||||
//
|
||||
// This test checks that we don't return empty submatches when matching a `\n`
|
||||
// in CRLF mode.
|
||||
rgtest!(r1095_crlf_empty_match, |dir: Dir, mut cmd: TestCommand| {
|
||||
dir.create("foo", "test\r\n\n");
|
||||
|
||||
// Check without --crlf flag.
|
||||
let msgs = json_decode(&cmd.arg("-U").arg("--json").arg("\n").stdout());
|
||||
assert_eq!(msgs.len(), 5);
|
||||
|
||||
let m = msgs[1].unwrap_match();
|
||||
assert_eq!(m.lines, Data::text("test\r\n"));
|
||||
assert_eq!(m.submatches[0].m, Data::text("\n"));
|
||||
|
||||
let m = msgs[2].unwrap_match();
|
||||
assert_eq!(m.lines, Data::text("\n"));
|
||||
assert_eq!(m.submatches[0].m, Data::text("\n"));
|
||||
|
||||
// Now check with --crlf flag.
|
||||
let msgs = json_decode(&cmd.arg("--crlf").stdout());
|
||||
|
||||
let m = msgs[1].unwrap_match();
|
||||
assert_eq!(m.lines, Data::text("test\r\n"));
|
||||
assert_eq!(m.submatches[0].m, Data::text("\n"));
|
||||
|
||||
let m = msgs[2].unwrap_match();
|
||||
assert_eq!(m.lines, Data::text("\n"));
|
||||
assert_eq!(m.submatches[0].m, Data::text("\n"));
|
||||
});
|
||||
|
@@ -909,36 +909,6 @@ be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
eqnice!(expected, cmd.stdout());
|
||||
});
|
||||
|
||||
rgtest!(compressed_brotli, |dir: Dir, mut cmd: TestCommand| {
|
||||
if !cmd_exists("brotli") {
|
||||
return;
|
||||
}
|
||||
|
||||
dir.create_bytes("sherlock.br", include_bytes!("./data/sherlock.br"));
|
||||
cmd.arg("-z").arg("Sherlock").arg("sherlock.br");
|
||||
|
||||
let expected = "\
|
||||
For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
";
|
||||
eqnice!(expected, cmd.stdout());
|
||||
});
|
||||
|
||||
rgtest!(compressed_zstd, |dir: Dir, mut cmd: TestCommand| {
|
||||
if !cmd_exists("zstd") {
|
||||
return;
|
||||
}
|
||||
|
||||
dir.create_bytes("sherlock.zst", include_bytes!("./data/sherlock.zst"));
|
||||
cmd.arg("-z").arg("Sherlock").arg("sherlock.zst");
|
||||
|
||||
let expected = "\
|
||||
For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
";
|
||||
eqnice!(expected, cmd.stdout());
|
||||
});
|
||||
|
||||
rgtest!(compressed_failing_gzip, |dir: Dir, mut cmd: TestCommand| {
|
||||
if !cmd_exists("gzip") {
|
||||
return;
|
||||
|
@@ -568,140 +568,3 @@ rgtest!(r1064, |dir: Dir, mut cmd: TestCommand| {
|
||||
dir.create("input", "abc");
|
||||
eqnice!("input:abc\n", cmd.arg("a(.*c)").stdout());
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/1174
|
||||
rgtest!(r1098, |dir: Dir, mut cmd: TestCommand| {
|
||||
dir.create_dir(".git");
|
||||
dir.create(".gitignore", "a**b");
|
||||
dir.create("afoob", "test");
|
||||
cmd.arg("test").assert_err();
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/1130
|
||||
rgtest!(r1130, |dir: Dir, mut cmd: TestCommand| {
|
||||
dir.create("foo", "test");
|
||||
eqnice!(
|
||||
"foo\n",
|
||||
cmd.arg("--files-with-matches").arg("test").arg("foo").stdout()
|
||||
);
|
||||
|
||||
let mut cmd = dir.command();
|
||||
eqnice!(
|
||||
"foo\n",
|
||||
cmd.arg("--files-without-match").arg("nada").arg("foo").stdout()
|
||||
);
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/1159
|
||||
rgtest!(r1159_invalid_flag, |_: Dir, mut cmd: TestCommand| {
|
||||
cmd.arg("--wat").assert_exit_code(2);
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/1159
|
||||
rgtest!(r1159_exit_status, |dir: Dir, _: TestCommand| {
|
||||
dir.create("foo", "test");
|
||||
|
||||
// search with a match gets 0 exit status.
|
||||
let mut cmd = dir.command();
|
||||
cmd.arg("test").assert_exit_code(0);
|
||||
|
||||
// search with --quiet and a match gets 0 exit status.
|
||||
let mut cmd = dir.command();
|
||||
cmd.arg("-q").arg("test").assert_exit_code(0);
|
||||
|
||||
// search with a match and an error gets 2 exit status.
|
||||
let mut cmd = dir.command();
|
||||
cmd.arg("test").arg("no-file").assert_exit_code(2);
|
||||
|
||||
// search with a match in --quiet mode and an error gets 0 exit status.
|
||||
let mut cmd = dir.command();
|
||||
cmd.arg("-q").arg("test").arg("foo").arg("no-file").assert_exit_code(0);
|
||||
|
||||
// search with no match gets 1 exit status.
|
||||
let mut cmd = dir.command();
|
||||
cmd.arg("nada").assert_exit_code(1);
|
||||
|
||||
// search with --quiet and no match gets 1 exit status.
|
||||
let mut cmd = dir.command();
|
||||
cmd.arg("-q").arg("nada").assert_exit_code(1);
|
||||
|
||||
// search with no match and an error gets 2 exit status.
|
||||
let mut cmd = dir.command();
|
||||
cmd.arg("nada").arg("no-file").assert_exit_code(2);
|
||||
|
||||
// search with no match in --quiet mode and an error gets 2 exit status.
|
||||
let mut cmd = dir.command();
|
||||
cmd.arg("-q").arg("nada").arg("foo").arg("no-file").assert_exit_code(2);
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/1163
|
||||
rgtest!(r1163, |dir: Dir, mut cmd: TestCommand| {
|
||||
dir.create("bom.txt", "\u{FEFF}test123\ntest123");
|
||||
eqnice!(
|
||||
"bom.txt:test123\nbom.txt:test123\n",
|
||||
cmd.arg("^test123").stdout()
|
||||
);
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/1164
|
||||
rgtest!(r1164, |dir: Dir, mut cmd: TestCommand| {
|
||||
dir.create_dir(".git");
|
||||
dir.create(".gitignore", "myfile");
|
||||
dir.create("MYFILE", "test");
|
||||
|
||||
cmd.arg("--ignore-file-case-insensitive").arg("test").assert_err();
|
||||
eqnice!(
|
||||
"MYFILE:test\n",
|
||||
cmd.arg("--no-ignore-file-case-insensitive").stdout()
|
||||
);
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/1173
|
||||
rgtest!(r1173, |dir: Dir, mut cmd: TestCommand| {
|
||||
dir.create_dir(".git");
|
||||
dir.create(".gitignore", "**");
|
||||
dir.create("foo", "test");
|
||||
cmd.arg("test").assert_err();
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/1174
|
||||
rgtest!(r1174, |dir: Dir, mut cmd: TestCommand| {
|
||||
dir.create_dir(".git");
|
||||
dir.create(".gitignore", "**/**/*");
|
||||
dir.create_dir("a");
|
||||
dir.create("a/foo", "test");
|
||||
cmd.arg("test").assert_err();
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/1176
|
||||
rgtest!(r1176_literal_file, |dir: Dir, mut cmd: TestCommand| {
|
||||
dir.create("patterns", "foo(bar\n");
|
||||
dir.create("test", "foo(bar");
|
||||
|
||||
eqnice!(
|
||||
"foo(bar\n",
|
||||
cmd.arg("-F").arg("-f").arg("patterns").arg("test").stdout()
|
||||
);
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/1176
|
||||
rgtest!(r1176_line_regex, |dir: Dir, mut cmd: TestCommand| {
|
||||
dir.create("patterns", "foo\n");
|
||||
dir.create("test", "foobar\nfoo\nbarfoo\n");
|
||||
|
||||
eqnice!(
|
||||
"foo\n",
|
||||
cmd.arg("-x").arg("-f").arg("patterns").arg("test").stdout()
|
||||
);
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/1203
|
||||
rgtest!(r1203_reverse_suffix_literal, |dir: Dir, _: TestCommand| {
|
||||
dir.create("test", "153.230000\n");
|
||||
|
||||
let mut cmd = dir.command();
|
||||
eqnice!("153.230000\n", cmd.arg(r"\d\d\d00").arg("test").stdout());
|
||||
|
||||
let mut cmd = dir.command();
|
||||
eqnice!("153.230000\n", cmd.arg(r"\d\d\d000").arg("test").stdout());
|
||||
});
|
||||
|
@@ -5,12 +5,12 @@ use std::fs::{self, File};
|
||||
use std::io::{self, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::{self, Command};
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::atomic::{ATOMIC_USIZE_INIT, AtomicUsize, Ordering};
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
static TEST_DIR: &'static str = "ripgrep-tests";
|
||||
static NEXT_ID: AtomicUsize = AtomicUsize::new(0);
|
||||
static NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||
|
||||
/// Setup an empty work directory and return a command pointing to the ripgrep
|
||||
/// executable whose CWD is set to the work directory.
|
||||
|
Reference in New Issue
Block a user