repo: move all source code in crates directory

The top-level listing was just getting a bit too long for my taste. So
put all of the code in one directory and shrink the large top-level mess
to a small top-level mess.

NOTE: This commit only contains renames. The subsequent commit will
actually make ripgrep build again. We do it this way with the naive hope
that this will make it easier for git history to track the renames.
Sigh.
This commit is contained in:
Andrew Gallant
2020-02-17 18:19:19 -05:00
parent 0bc4f0447b
commit fdd8510fdd
113 changed files with 0 additions and 0 deletions

26
crates/cli/Cargo.toml Normal file
View File

@@ -0,0 +1,26 @@
[package]
name = "grep-cli"
version = "0.1.3" #:version
authors = ["Andrew Gallant <jamslam@gmail.com>"]
description = """
Utilities for search oriented command line applications.
"""
documentation = "https://docs.rs/grep-cli"
homepage = "https://github.com/BurntSushi/ripgrep"
repository = "https://github.com/BurntSushi/ripgrep"
readme = "README.md"
keywords = ["regex", "grep", "cli", "utility", "util"]
license = "Unlicense/MIT"
[dependencies]
atty = "0.2.11"
bstr = "0.2.0"
globset = { version = "0.4.3", path = "../globset" }
lazy_static = "1.1.0"
log = "0.4.5"
regex = "1.1"
same-file = "1.0.4"
termcolor = "1.0.4"
[target.'cfg(windows)'.dependencies.winapi-util]
version = "0.1.1"

21
crates/cli/LICENSE-MIT Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 Andrew Gallant
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

38
crates/cli/README.md Normal file
View File

@@ -0,0 +1,38 @@
grep-cli
--------
A utility library that provides common routines desired in search oriented
command line applications. This includes, but is not limited to, parsing hex
escapes, detecting whether stdin is readable and more. To the extent possible,
this crate strives for compatibility across Windows, macOS and Linux.
[![Linux build status](https://api.travis-ci.org/BurntSushi/ripgrep.svg)](https://travis-ci.org/BurntSushi/ripgrep)
[![Windows build status](https://ci.appveyor.com/api/projects/status/github/BurntSushi/ripgrep?svg=true)](https://ci.appveyor.com/project/BurntSushi/ripgrep)
[![](https://img.shields.io/crates/v/grep-cli.svg)](https://crates.io/crates/grep-cli)
Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org).
### Documentation
[https://docs.rs/grep-cli](https://docs.rs/grep-cli)
**NOTE:** You probably don't want to use this crate directly. Instead, you
should prefer the facade defined in the
[`grep`](https://docs.rs/grep)
crate.
### Usage
Add this to your `Cargo.toml`:
```toml
[dependencies]
grep-cli = "0.1"
```
and this to your crate root:
```rust
extern crate grep_cli;
```

24
crates/cli/UNLICENSE Normal file
View File

@@ -0,0 +1,24 @@
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>

View File

@@ -0,0 +1,376 @@
use std::ffi::{OsStr, OsString};
use std::fs::File;
use std::io;
use std::path::Path;
use std::process::Command;
use globset::{Glob, GlobSet, GlobSetBuilder};
use process::{CommandError, CommandReader, CommandReaderBuilder};
/// A builder for a matcher that determines which files get decompressed.
#[derive(Clone, Debug)]
pub struct DecompressionMatcherBuilder {
/// The commands for each matching glob.
commands: Vec<DecompressionCommand>,
/// Whether to include the default matching rules.
defaults: bool,
}
/// A representation of a single command for decompressing data
/// out-of-proccess.
#[derive(Clone, Debug)]
struct DecompressionCommand {
/// The glob that matches this command.
glob: String,
/// The command or binary name.
bin: OsString,
/// The arguments to invoke with the command.
args: Vec<OsString>,
}
impl Default for DecompressionMatcherBuilder {
fn default() -> DecompressionMatcherBuilder {
DecompressionMatcherBuilder::new()
}
}
impl DecompressionMatcherBuilder {
/// Create a new builder for configuring a decompression matcher.
pub fn new() -> DecompressionMatcherBuilder {
DecompressionMatcherBuilder { commands: vec![], defaults: true }
}
/// Build a matcher for determining how to decompress files.
///
/// If there was a problem compiling the matcher, then an error is
/// returned.
pub fn build(&self) -> Result<DecompressionMatcher, CommandError> {
let defaults = if !self.defaults {
vec![]
} else {
default_decompression_commands()
};
let mut glob_builder = GlobSetBuilder::new();
let mut commands = vec![];
for decomp_cmd in defaults.iter().chain(&self.commands) {
let glob = Glob::new(&decomp_cmd.glob).map_err(|err| {
CommandError::io(io::Error::new(io::ErrorKind::Other, err))
})?;
glob_builder.add(glob);
commands.push(decomp_cmd.clone());
}
let globs = glob_builder.build().map_err(|err| {
CommandError::io(io::Error::new(io::ErrorKind::Other, err))
})?;
Ok(DecompressionMatcher { globs, commands })
}
/// When enabled, the default matching rules will be compiled into this
/// matcher before any other associations. When disabled, only the
/// rules explicitly given to this builder will be used.
///
/// This is enabled by default.
pub fn defaults(&mut self, yes: bool) -> &mut DecompressionMatcherBuilder {
self.defaults = yes;
self
}
/// Associates a glob with a command to decompress files matching the glob.
///
/// If multiple globs match the same file, then the most recently added
/// glob takes precedence.
///
/// The syntax for the glob is documented in the
/// [`globset` crate](https://docs.rs/globset/#syntax).
pub fn associate<P, I, A>(
&mut self,
glob: &str,
program: P,
args: I,
) -> &mut DecompressionMatcherBuilder
where
P: AsRef<OsStr>,
I: IntoIterator<Item = A>,
A: AsRef<OsStr>,
{
let glob = glob.to_string();
let bin = program.as_ref().to_os_string();
let args =
args.into_iter().map(|a| a.as_ref().to_os_string()).collect();
self.commands.push(DecompressionCommand { glob, bin, args });
self
}
}
/// A matcher for determining how to decompress files.
#[derive(Clone, Debug)]
pub struct DecompressionMatcher {
/// The set of globs to match. Each glob has a corresponding entry in
/// `commands`. When a glob matches, the corresponding command should be
/// used to perform out-of-process decompression.
globs: GlobSet,
/// The commands for each matching glob.
commands: Vec<DecompressionCommand>,
}
impl Default for DecompressionMatcher {
fn default() -> DecompressionMatcher {
DecompressionMatcher::new()
}
}
impl DecompressionMatcher {
/// Create a new matcher with default rules.
///
/// To add more matching rules, build a matcher with
/// [`DecompressionMatcherBuilder`](struct.DecompressionMatcherBuilder.html).
pub fn new() -> DecompressionMatcher {
DecompressionMatcherBuilder::new()
.build()
.expect("built-in matching rules should always compile")
}
/// Return a pre-built command based on the given file path that can
/// decompress its contents. If no such decompressor is known, then this
/// returns `None`.
///
/// If there are multiple possible commands matching the given path, then
/// the command added last takes precedence.
pub fn command<P: AsRef<Path>>(&self, path: P) -> Option<Command> {
for i in self.globs.matches(path).into_iter().rev() {
let decomp_cmd = &self.commands[i];
let mut cmd = Command::new(&decomp_cmd.bin);
cmd.args(&decomp_cmd.args);
return Some(cmd);
}
None
}
/// Returns true if and only if the given file path has at least one
/// matching command to perform decompression on.
pub fn has_command<P: AsRef<Path>>(&self, path: P) -> bool {
self.globs.is_match(path)
}
}
/// Configures and builds a streaming reader for decompressing data.
#[derive(Clone, Debug, Default)]
pub struct DecompressionReaderBuilder {
matcher: DecompressionMatcher,
command_builder: CommandReaderBuilder,
}
impl DecompressionReaderBuilder {
/// Create a new builder with the default configuration.
pub fn new() -> DecompressionReaderBuilder {
DecompressionReaderBuilder::default()
}
/// Build a new streaming reader for decompressing data.
///
/// If decompression is done out-of-process and if there was a problem
/// spawning the process, then its error is logged at the debug level and a
/// passthru reader is returned that does no decompression. This behavior
/// typically occurs when the given file path matches a decompression
/// command, but is executing in an environment where the decompression
/// command is not available.
///
/// If the given file path could not be matched with a decompression
/// strategy, then a passthru reader is returned that does no
/// decompression.
pub fn build<P: AsRef<Path>>(
&self,
path: P,
) -> Result<DecompressionReader, CommandError> {
let path = path.as_ref();
let mut cmd = match self.matcher.command(path) {
None => return DecompressionReader::new_passthru(path),
Some(cmd) => cmd,
};
cmd.arg(path);
match self.command_builder.build(&mut cmd) {
Ok(cmd_reader) => Ok(DecompressionReader { rdr: Ok(cmd_reader) }),
Err(err) => {
debug!(
"{}: error spawning command '{:?}': {} \
(falling back to uncompressed reader)",
path.display(),
cmd,
err,
);
DecompressionReader::new_passthru(path)
}
}
}
/// Set the matcher to use to look up the decompression command for each
/// file path.
///
/// A set of sensible rules is enabled by default. Setting this will
/// completely replace the current rules.
pub fn matcher(
&mut self,
matcher: DecompressionMatcher,
) -> &mut DecompressionReaderBuilder {
self.matcher = matcher;
self
}
/// Get the underlying matcher currently used by this builder.
pub fn get_matcher(&self) -> &DecompressionMatcher {
&self.matcher
}
/// When enabled, the reader will asynchronously read the contents of the
/// command's stderr output. When disabled, stderr is only read after the
/// stdout stream has been exhausted (or if the process quits with an error
/// code).
///
/// Note that when enabled, this may require launching an additional
/// thread in order to read stderr. This is done so that the process being
/// executed is never blocked from writing to stdout or stderr. If this is
/// disabled, then it is possible for the process to fill up the stderr
/// buffer and deadlock.
///
/// This is enabled by default.
pub fn async_stderr(
&mut self,
yes: bool,
) -> &mut DecompressionReaderBuilder {
self.command_builder.async_stderr(yes);
self
}
}
/// A streaming reader for decompressing the contents of a file.
///
/// The purpose of this reader is to provide a seamless way to decompress the
/// contents of file using existing tools in the current environment. This is
/// meant to be an alternative to using decompression libraries in favor of the
/// simplicity and portability of using external commands such as `gzip` and
/// `xz`. This does impose the overhead of spawning a process, so other means
/// for performing decompression should be sought if this overhead isn't
/// acceptable.
///
/// A decompression reader comes with a default set of matching rules that are
/// meant to associate file paths with the corresponding command to use to
/// decompress them. For example, a glob like `*.gz` matches gzip compressed
/// files with the command `gzip -d -c`. If a file path does not match any
/// existing rules, or if it matches a rule whose command does not exist in the
/// current environment, then the decompression reader passes through the
/// contents of the underlying file without doing any decompression.
///
/// The default matching rules are probably good enough for most cases, and if
/// they require revision, pull requests are welcome. In cases where they must
/// be changed or extended, they can be customized through the use of
/// [`DecompressionMatcherBuilder`](struct.DecompressionMatcherBuilder.html)
/// and
/// [`DecompressionReaderBuilder`](struct.DecompressionReaderBuilder.html).
///
/// By default, this reader will asynchronously read the processes' stderr.
/// This prevents subtle deadlocking bugs for noisy processes that write a lot
/// to stderr. Currently, the entire contents of stderr is read on to the heap.
///
/// # Example
///
/// This example shows how to read the decompressed contents of a file without
/// needing to explicitly choose the decompression command to run.
///
/// Note that if you need to decompress multiple files, it is better to use
/// `DecompressionReaderBuilder`, which will amortize the cost of compiling the
/// matcher.
///
/// ```no_run
/// use std::io::Read;
/// use std::process::Command;
/// use grep_cli::DecompressionReader;
///
/// # fn example() -> Result<(), Box<::std::error::Error>> {
/// let mut rdr = DecompressionReader::new("/usr/share/man/man1/ls.1.gz")?;
/// let mut contents = vec![];
/// rdr.read_to_end(&mut contents)?;
/// # Ok(()) }
/// ```
#[derive(Debug)]
pub struct DecompressionReader {
rdr: Result<CommandReader, File>,
}
impl DecompressionReader {
/// Build a new streaming reader for decompressing data.
///
/// If decompression is done out-of-process and if there was a problem
/// spawning the process, then its error is returned.
///
/// If the given file path could not be matched with a decompression
/// strategy, then a passthru reader is returned that does no
/// decompression.
///
/// This uses the default matching rules for determining how to decompress
/// the given file. To change those matching rules, use
/// [`DecompressionReaderBuilder`](struct.DecompressionReaderBuilder.html)
/// and
/// [`DecompressionMatcherBuilder`](struct.DecompressionMatcherBuilder.html).
///
/// When creating readers for many paths. it is better to use the builder
/// since it will amortize the cost of constructing the matcher.
pub fn new<P: AsRef<Path>>(
path: P,
) -> Result<DecompressionReader, CommandError> {
DecompressionReaderBuilder::new().build(path)
}
/// Creates a new "passthru" decompression reader that reads from the file
/// corresponding to the given path without doing decompression and without
/// executing another process.
fn new_passthru(path: &Path) -> Result<DecompressionReader, CommandError> {
let file = File::open(path)?;
Ok(DecompressionReader { rdr: Err(file) })
}
}
impl io::Read for DecompressionReader {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
match self.rdr {
Ok(ref mut rdr) => rdr.read(buf),
Err(ref mut rdr) => rdr.read(buf),
}
}
}
fn default_decompression_commands() -> Vec<DecompressionCommand> {
const ARGS_GZIP: &[&str] = &["gzip", "-d", "-c"];
const ARGS_BZIP: &[&str] = &["bzip2", "-d", "-c"];
const ARGS_XZ: &[&str] = &["xz", "-d", "-c"];
const ARGS_LZ4: &[&str] = &["lz4", "-d", "-c"];
const ARGS_LZMA: &[&str] = &["xz", "--format=lzma", "-d", "-c"];
const ARGS_BROTLI: &[&str] = &["brotli", "-d", "-c"];
const ARGS_ZSTD: &[&str] = &["zstd", "-q", "-d", "-c"];
fn cmd(glob: &str, args: &[&str]) -> DecompressionCommand {
DecompressionCommand {
glob: glob.to_string(),
bin: OsStr::new(&args[0]).to_os_string(),
args: args
.iter()
.skip(1)
.map(|s| OsStr::new(s).to_os_string())
.collect(),
}
}
vec![
cmd("*.gz", ARGS_GZIP),
cmd("*.tgz", ARGS_GZIP),
cmd("*.bz2", ARGS_BZIP),
cmd("*.tbz2", ARGS_BZIP),
cmd("*.xz", ARGS_XZ),
cmd("*.txz", ARGS_XZ),
cmd("*.lz4", ARGS_LZ4),
cmd("*.lzma", ARGS_LZMA),
cmd("*.br", ARGS_BROTLI),
cmd("*.zst", ARGS_ZSTD),
cmd("*.zstd", ARGS_ZSTD),
]
}

272
crates/cli/src/escape.rs Normal file
View File

@@ -0,0 +1,272 @@
use std::ffi::OsStr;
use std::str;
use bstr::{ByteSlice, ByteVec};
/// A single state in the state machine used by `unescape`.
#[derive(Clone, Copy, Eq, PartialEq)]
enum State {
/// The state after seeing a `\`.
Escape,
/// The state after seeing a `\x`.
HexFirst,
/// The state after seeing a `\x[0-9A-Fa-f]`.
HexSecond(char),
/// Default state.
Literal,
}
/// Escapes arbitrary bytes into a human readable string.
///
/// This converts `\t`, `\r` and `\n` into their escaped forms. It also
/// converts the non-printable subset of ASCII in addition to invalid UTF-8
/// bytes to hexadecimal escape sequences. Everything else is left as is.
///
/// The dual of this routine is [`unescape`](fn.unescape.html).
///
/// # Example
///
/// This example shows how to convert a byte string that contains a `\n` and
/// invalid UTF-8 bytes into a `String`.
///
/// Pay special attention to the use of raw strings. That is, `r"\n"` is
/// equivalent to `"\\n"`.
///
/// ```
/// use grep_cli::escape;
///
/// assert_eq!(r"foo\nbar\xFFbaz", escape(b"foo\nbar\xFFbaz"));
/// ```
pub fn escape(bytes: &[u8]) -> String {
let mut escaped = String::new();
for (s, e, ch) in bytes.char_indices() {
if ch == '\u{FFFD}' {
for b in bytes[s..e].bytes() {
escape_byte(b, &mut escaped);
}
} else {
escape_char(ch, &mut escaped);
}
}
escaped
}
/// Escapes an OS string into a human readable string.
///
/// This is like [`escape`](fn.escape.html), but accepts an OS string.
pub fn escape_os(string: &OsStr) -> String {
escape(Vec::from_os_str_lossy(string).as_bytes())
}
/// Unescapes a string.
///
/// It supports a limited set of escape sequences:
///
/// * `\t`, `\r` and `\n` are mapped to their corresponding ASCII bytes.
/// * `\xZZ` hexadecimal escapes are mapped to their byte.
///
/// Everything else is left as is, including non-hexadecimal escapes like
/// `\xGG`.
///
/// This is useful when it is desirable for a command line argument to be
/// capable of specifying arbitrary bytes or otherwise make it easier to
/// specify non-printable characters.
///
/// The dual of this routine is [`escape`](fn.escape.html).
///
/// # Example
///
/// This example shows how to convert an escaped string (which is valid UTF-8)
/// into a corresponding sequence of bytes. Each escape sequence is mapped to
/// its bytes, which may include invalid UTF-8.
///
/// Pay special attention to the use of raw strings. That is, `r"\n"` is
/// equivalent to `"\\n"`.
///
/// ```
/// use grep_cli::unescape;
///
/// assert_eq!(&b"foo\nbar\xFFbaz"[..], &*unescape(r"foo\nbar\xFFbaz"));
/// ```
pub fn unescape(s: &str) -> Vec<u8> {
use self::State::*;
let mut bytes = vec![];
let mut state = Literal;
for c in s.chars() {
match state {
Escape => match c {
'\\' => {
bytes.push(b'\\');
state = Literal;
}
'n' => {
bytes.push(b'\n');
state = Literal;
}
'r' => {
bytes.push(b'\r');
state = Literal;
}
't' => {
bytes.push(b'\t');
state = Literal;
}
'x' => {
state = HexFirst;
}
c => {
bytes.extend(format!(r"\{}", c).into_bytes());
state = Literal;
}
},
HexFirst => match c {
'0'..='9' | 'A'..='F' | 'a'..='f' => {
state = HexSecond(c);
}
c => {
bytes.extend(format!(r"\x{}", c).into_bytes());
state = Literal;
}
},
HexSecond(first) => match c {
'0'..='9' | 'A'..='F' | 'a'..='f' => {
let ordinal = format!("{}{}", first, c);
let byte = u8::from_str_radix(&ordinal, 16).unwrap();
bytes.push(byte);
state = Literal;
}
c => {
let original = format!(r"\x{}{}", first, c);
bytes.extend(original.into_bytes());
state = Literal;
}
},
Literal => match c {
'\\' => {
state = Escape;
}
c => {
bytes.extend(c.to_string().as_bytes());
}
},
}
}
match state {
Escape => bytes.push(b'\\'),
HexFirst => bytes.extend(b"\\x"),
HexSecond(c) => bytes.extend(format!("\\x{}", c).into_bytes()),
Literal => {}
}
bytes
}
/// Unescapes an OS string.
///
/// This is like [`unescape`](fn.unescape.html), but accepts an OS string.
///
/// Note that this first lossily decodes the given OS string as UTF-8. That
/// is, an escaped string (the thing given) should be valid UTF-8.
pub fn unescape_os(string: &OsStr) -> Vec<u8> {
unescape(&string.to_string_lossy())
}
/// Adds the given codepoint to the given string, escaping it if necessary.
fn escape_char(cp: char, into: &mut String) {
if cp.is_ascii() {
escape_byte(cp as u8, into);
} else {
into.push(cp);
}
}
/// Adds the given byte to the given string, escaping it if necessary.
fn escape_byte(byte: u8, into: &mut String) {
match byte {
0x21..=0x5B | 0x5D..=0x7D => into.push(byte as char),
b'\n' => into.push_str(r"\n"),
b'\r' => into.push_str(r"\r"),
b'\t' => into.push_str(r"\t"),
b'\\' => into.push_str(r"\\"),
_ => into.push_str(&format!(r"\x{:02X}", byte)),
}
}
#[cfg(test)]
mod tests {
use super::{escape, unescape};
fn b(bytes: &'static [u8]) -> Vec<u8> {
bytes.to_vec()
}
#[test]
fn empty() {
assert_eq!(b(b""), unescape(r""));
assert_eq!(r"", escape(b""));
}
#[test]
fn backslash() {
assert_eq!(b(b"\\"), unescape(r"\\"));
assert_eq!(r"\\", escape(b"\\"));
}
#[test]
fn nul() {
assert_eq!(b(b"\x00"), unescape(r"\x00"));
assert_eq!(r"\x00", escape(b"\x00"));
}
#[test]
fn nl() {
assert_eq!(b(b"\n"), unescape(r"\n"));
assert_eq!(r"\n", escape(b"\n"));
}
#[test]
fn tab() {
assert_eq!(b(b"\t"), unescape(r"\t"));
assert_eq!(r"\t", escape(b"\t"));
}
#[test]
fn carriage() {
assert_eq!(b(b"\r"), unescape(r"\r"));
assert_eq!(r"\r", escape(b"\r"));
}
#[test]
fn nothing_simple() {
assert_eq!(b(b"\\a"), unescape(r"\a"));
assert_eq!(b(b"\\a"), unescape(r"\\a"));
assert_eq!(r"\\a", escape(b"\\a"));
}
#[test]
fn nothing_hex0() {
assert_eq!(b(b"\\x"), unescape(r"\x"));
assert_eq!(b(b"\\x"), unescape(r"\\x"));
assert_eq!(r"\\x", escape(b"\\x"));
}
#[test]
fn nothing_hex1() {
assert_eq!(b(b"\\xz"), unescape(r"\xz"));
assert_eq!(b(b"\\xz"), unescape(r"\\xz"));
assert_eq!(r"\\xz", escape(b"\\xz"));
}
#[test]
fn nothing_hex2() {
assert_eq!(b(b"\\xzz"), unescape(r"\xzz"));
assert_eq!(b(b"\\xzz"), unescape(r"\\xzz"));
assert_eq!(r"\\xzz", escape(b"\\xzz"));
}
#[test]
fn invalid_utf8() {
assert_eq!(r"\xFF", escape(b"\xFF"));
assert_eq!(r"a\xFFb", escape(b"a\xFFb"));
}
}

165
crates/cli/src/human.rs Normal file
View File

@@ -0,0 +1,165 @@
use std::error;
use std::fmt;
use std::io;
use std::num::ParseIntError;
use regex::Regex;
/// An error that occurs when parsing a human readable size description.
///
/// This error provides a end user friendly message describing why the
/// description coudln't be parsed and what the expected format is.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct ParseSizeError {
original: String,
kind: ParseSizeErrorKind,
}
#[derive(Clone, Debug, Eq, PartialEq)]
enum ParseSizeErrorKind {
InvalidFormat,
InvalidInt(ParseIntError),
Overflow,
}
impl ParseSizeError {
fn format(original: &str) -> ParseSizeError {
ParseSizeError {
original: original.to_string(),
kind: ParseSizeErrorKind::InvalidFormat,
}
}
fn int(original: &str, err: ParseIntError) -> ParseSizeError {
ParseSizeError {
original: original.to_string(),
kind: ParseSizeErrorKind::InvalidInt(err),
}
}
fn overflow(original: &str) -> ParseSizeError {
ParseSizeError {
original: original.to_string(),
kind: ParseSizeErrorKind::Overflow,
}
}
}
impl error::Error for ParseSizeError {
fn description(&self) -> &str {
"invalid size"
}
}
impl fmt::Display for ParseSizeError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::ParseSizeErrorKind::*;
match self.kind {
InvalidFormat => write!(
f,
"invalid format for size '{}', which should be a sequence \
of digits followed by an optional 'K', 'M' or 'G' \
suffix",
self.original
),
InvalidInt(ref err) => write!(
f,
"invalid integer found in size '{}': {}",
self.original, err
),
Overflow => write!(f, "size too big in '{}'", self.original),
}
}
}
impl From<ParseSizeError> for io::Error {
fn from(size_err: ParseSizeError) -> io::Error {
io::Error::new(io::ErrorKind::Other, size_err)
}
}
/// Parse a human readable size like `2M` into a corresponding number of bytes.
///
/// Supported size suffixes are `K` (for kilobyte), `M` (for megabyte) and `G`
/// (for gigabyte). If a size suffix is missing, then the size is interpreted
/// as bytes. If the size is too big to fit into a `u64`, then this returns an
/// error.
///
/// Additional suffixes may be added over time.
pub fn parse_human_readable_size(size: &str) -> Result<u64, ParseSizeError> {
lazy_static! {
// Normally I'd just parse something this simple by hand to avoid the
// regex dep, but we bring regex in any way for glob matching, so might
// as well use it.
static ref RE: Regex = Regex::new(r"^([0-9]+)([KMG])?$").unwrap();
}
let caps = match RE.captures(size) {
Some(caps) => caps,
None => return Err(ParseSizeError::format(size)),
};
let value: u64 =
caps[1].parse().map_err(|err| ParseSizeError::int(size, err))?;
let suffix = match caps.get(2) {
None => return Ok(value),
Some(cap) => cap.as_str(),
};
let bytes = match suffix {
"K" => value.checked_mul(1 << 10),
"M" => value.checked_mul(1 << 20),
"G" => value.checked_mul(1 << 30),
// Because if the regex matches this group, it must be [KMG].
_ => unreachable!(),
};
bytes.ok_or_else(|| ParseSizeError::overflow(size))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn suffix_none() {
let x = parse_human_readable_size("123").unwrap();
assert_eq!(123, x);
}
#[test]
fn suffix_k() {
let x = parse_human_readable_size("123K").unwrap();
assert_eq!(123 * (1 << 10), x);
}
#[test]
fn suffix_m() {
let x = parse_human_readable_size("123M").unwrap();
assert_eq!(123 * (1 << 20), x);
}
#[test]
fn suffix_g() {
let x = parse_human_readable_size("123G").unwrap();
assert_eq!(123 * (1 << 30), x);
}
#[test]
fn invalid_empty() {
assert!(parse_human_readable_size("").is_err());
}
#[test]
fn invalid_non_digit() {
assert!(parse_human_readable_size("a").is_err());
}
#[test]
fn invalid_overflow() {
assert!(parse_human_readable_size("9999999999999999G").is_err());
}
#[test]
fn invalid_suffix() {
assert!(parse_human_readable_size("123T").is_err());
}
}

250
crates/cli/src/lib.rs Normal file
View File

@@ -0,0 +1,250 @@
/*!
This crate provides common routines used in command line applications, with a
focus on routines useful for search oriented applications. As a utility
library, there is no central type or function. However, a key focus of this
crate is to improve failure modes and provide user friendly error messages
when things go wrong.
To the best extent possible, everything in this crate works on Windows, macOS
and Linux.
# Standard I/O
The
[`is_readable_stdin`](fn.is_readable_stdin.html),
[`is_tty_stderr`](fn.is_tty_stderr.html),
[`is_tty_stdin`](fn.is_tty_stdin.html)
and
[`is_tty_stdout`](fn.is_tty_stdout.html)
routines query aspects of standard I/O. `is_readable_stdin` determines whether
stdin can be usefully read from, while the `tty` methods determine whether a
tty is attached to stdin/stdout/stderr.
`is_readable_stdin` is useful when writing an application that changes behavior
based on whether the application was invoked with data on stdin. For example,
`rg foo` might recursively search the current working directory for
occurrences of `foo`, but `rg foo < file` might only search the contents of
`file`.
The `tty` methods are useful for similar reasons. Namely, commands like `ls`
will change their output depending on whether they are printing to a terminal
or not. For example, `ls` shows a file on each line when stdout is redirected
to a file or a pipe, but condenses the output to show possibly many files on
each line when stdout is connected to a tty.
# Coloring and buffering
The
[`stdout`](fn.stdout.html),
[`stdout_buffered_block`](fn.stdout_buffered_block.html)
and
[`stdout_buffered_line`](fn.stdout_buffered_line.html)
routines are alternative constructors for
[`StandardStream`](struct.StandardStream.html).
A `StandardStream` implements `termcolor::WriteColor`, which provides a way
to emit colors to terminals. Its key use is the encapsulation of buffering
style. Namely, `stdout` will return a line buffered `StandardStream` if and
only if stdout is connected to a tty, and will otherwise return a block
buffered `StandardStream`. Line buffering is important for use with a tty
because it typically decreases the latency at which the end user sees output.
Block buffering is used otherwise because it is faster, and redirecting stdout
to a file typically doesn't benefit from the decreased latency that line
buffering provides.
The `stdout_buffered_block` and `stdout_buffered_line` can be used to
explicitly set the buffering strategy regardless of whether stdout is connected
to a tty or not.
# Escaping
The
[`escape`](fn.escape.html),
[`escape_os`](fn.escape_os.html),
[`unescape`](fn.unescape.html)
and
[`unescape_os`](fn.unescape_os.html)
routines provide a user friendly way of dealing with UTF-8 encoded strings that
can express arbitrary bytes. For example, you might want to accept a string
containing arbitrary bytes as a command line argument, but most interactive
shells make such strings difficult to type. Instead, we can ask users to use
escape sequences.
For example, `a\xFFz` is itself a valid UTF-8 string corresponding to the
following bytes:
```ignore
[b'a', b'\\', b'x', b'F', b'F', b'z']
```
However, we can
interpret `\xFF` as an escape sequence with the `unescape`/`unescape_os`
routines, which will yield
```ignore
[b'a', b'\xFF', b'z']
```
instead. For example:
```
use grep_cli::unescape;
// Note the use of a raw string!
assert_eq!(vec![b'a', b'\xFF', b'z'], unescape(r"a\xFFz"));
```
The `escape`/`escape_os` routines provide the reverse transformation, which
makes it easy to show user friendly error messages involving arbitrary bytes.
# Building patterns
Typically, regular expression patterns must be valid UTF-8. However, command
line arguments aren't guaranteed to be valid UTF-8. Unfortunately, the
standard library's UTF-8 conversion functions from `OsStr`s do not provide
good error messages. However, the
[`pattern_from_bytes`](fn.pattern_from_bytes.html)
and
[`pattern_from_os`](fn.pattern_from_os.html)
do, including reporting exactly where the first invalid UTF-8 byte is seen.
Additionally, it can be useful to read patterns from a file while reporting
good error messages that include line numbers. The
[`patterns_from_path`](fn.patterns_from_path.html),
[`patterns_from_reader`](fn.patterns_from_reader.html)
and
[`patterns_from_stdin`](fn.patterns_from_stdin.html)
routines do just that. If any pattern is found that is invalid UTF-8, then the
error includes the file path (if available) along with the line number and the
byte offset at which the first invalid UTF-8 byte was observed.
# Read process output
Sometimes a command line application needs to execute other processes and read
its stdout in a streaming fashion. The
[`CommandReader`](struct.CommandReader.html)
provides this functionality with an explicit goal of improving failure modes.
In particular, if the process exits with an error code, then stderr is read
and converted into a normal Rust error to show to end users. This makes the
underlying failure modes explicit and gives more information to end users for
debugging the problem.
As a special case,
[`DecompressionReader`](struct.DecompressionReader.html)
provides a way to decompress arbitrary files by matching their file extensions
up with corresponding decompression programs (such as `gzip` and `xz`). This
is useful as a means of performing simplistic decompression in a portable
manner without binding to specific compression libraries. This does come with
some overhead though, so if you need to decompress lots of small files, this
may not be an appropriate convenience to use.
Each reader has a corresponding builder for additional configuration, such as
whether to read stderr asynchronously in order to avoid deadlock (which is
enabled by default).
# Miscellaneous parsing
The
[`parse_human_readable_size`](fn.parse_human_readable_size.html)
routine parses strings like `2M` and converts them to the corresponding number
of bytes (`2 * 1<<20` in this case). If an invalid size is found, then a good
error message is crafted that typically tells the user how to fix the problem.
*/
#![deny(missing_docs)]
extern crate atty;
extern crate bstr;
extern crate globset;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
extern crate regex;
extern crate same_file;
extern crate termcolor;
#[cfg(windows)]
extern crate winapi_util;
mod decompress;
mod escape;
mod human;
mod pattern;
mod process;
mod wtr;
pub use decompress::{
DecompressionMatcher, DecompressionMatcherBuilder, DecompressionReader,
DecompressionReaderBuilder,
};
pub use escape::{escape, escape_os, unescape, unescape_os};
pub use human::{parse_human_readable_size, ParseSizeError};
pub use pattern::{
pattern_from_bytes, pattern_from_os, patterns_from_path,
patterns_from_reader, patterns_from_stdin, InvalidPatternError,
};
pub use process::{CommandError, CommandReader, CommandReaderBuilder};
pub use wtr::{
stdout, stdout_buffered_block, stdout_buffered_line, StandardStream,
};
/// Returns true if and only if stdin is believed to be readable.
///
/// When stdin is readable, command line programs may choose to behave
/// differently than when stdin is not readable. For example, `command foo`
/// might search the current directory for occurrences of `foo` where as
/// `command foo < some-file` or `cat some-file | command foo` might instead
/// only search stdin for occurrences of `foo`.
pub fn is_readable_stdin() -> bool {
#[cfg(unix)]
fn imp() -> bool {
use same_file::Handle;
use std::os::unix::fs::FileTypeExt;
let ft = match Handle::stdin().and_then(|h| h.as_file().metadata()) {
Err(_) => return false,
Ok(md) => md.file_type(),
};
ft.is_file() || ft.is_fifo()
}
#[cfg(windows)]
fn imp() -> bool {
use winapi_util as winutil;
winutil::file::typ(winutil::HandleRef::stdin())
.map(|t| t.is_disk() || t.is_pipe())
.unwrap_or(false)
}
!is_tty_stdin() && imp()
}
/// Returns true if and only if stdin is believed to be connectted to a tty
/// or a console.
pub fn is_tty_stdin() -> bool {
atty::is(atty::Stream::Stdin)
}
/// Returns true if and only if stdout is believed to be connectted to a tty
/// or a console.
///
/// This is useful for when you want your command line program to produce
/// different output depending on whether it's printing directly to a user's
/// terminal or whether it's being redirected somewhere else. For example,
/// implementations of `ls` will often show one item per line when stdout is
/// redirected, but will condensed output when printing to a tty.
pub fn is_tty_stdout() -> bool {
atty::is(atty::Stream::Stdout)
}
/// Returns true if and only if stderr is believed to be connectted to a tty
/// or a console.
pub fn is_tty_stderr() -> bool {
atty::is(atty::Stream::Stderr)
}

195
crates/cli/src/pattern.rs Normal file
View File

@@ -0,0 +1,195 @@
use std::error;
use std::ffi::OsStr;
use std::fmt;
use std::fs::File;
use std::io;
use std::path::Path;
use std::str;
use bstr::io::BufReadExt;
use escape::{escape, escape_os};
/// An error that occurs when a pattern could not be converted to valid UTF-8.
///
/// The purpose of this error is to give a more targeted failure mode for
/// patterns written by end users that are not valid UTF-8.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct InvalidPatternError {
original: String,
valid_up_to: usize,
}
impl InvalidPatternError {
/// Returns the index in the given string up to which valid UTF-8 was
/// verified.
pub fn valid_up_to(&self) -> usize {
self.valid_up_to
}
}
impl error::Error for InvalidPatternError {
fn description(&self) -> &str {
"invalid pattern"
}
}
impl fmt::Display for InvalidPatternError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"found invalid UTF-8 in pattern at byte offset {} \
(use hex escape sequences to match arbitrary bytes \
in a pattern, e.g., \\xFF): '{}'",
self.valid_up_to, self.original,
)
}
}
impl From<InvalidPatternError> for io::Error {
fn from(paterr: InvalidPatternError) -> io::Error {
io::Error::new(io::ErrorKind::Other, paterr)
}
}
/// Convert an OS string into a regular expression pattern.
///
/// This conversion fails if the given pattern is not valid UTF-8, in which
/// case, a targeted error with more information about where the invalid UTF-8
/// occurs is given. The error also suggests the use of hex escape sequences,
/// which are supported by many regex engines.
pub fn pattern_from_os(pattern: &OsStr) -> Result<&str, InvalidPatternError> {
pattern.to_str().ok_or_else(|| {
let valid_up_to = pattern
.to_string_lossy()
.find('\u{FFFD}')
.expect("a Unicode replacement codepoint for invalid UTF-8");
InvalidPatternError {
original: escape_os(pattern),
valid_up_to: valid_up_to,
}
})
}
/// Convert arbitrary bytes into a regular expression pattern.
///
/// This conversion fails if the given pattern is not valid UTF-8, in which
/// case, a targeted error with more information about where the invalid UTF-8
/// occurs is given. The error also suggests the use of hex escape sequences,
/// which are supported by many regex engines.
pub fn pattern_from_bytes(
pattern: &[u8],
) -> Result<&str, InvalidPatternError> {
str::from_utf8(pattern).map_err(|err| InvalidPatternError {
original: escape(pattern),
valid_up_to: err.valid_up_to(),
})
}
/// Read patterns from a file path, one per line.
///
/// If there was a problem reading or if any of the patterns contain invalid
/// UTF-8, then an error is returned. If there was a problem with a specific
/// pattern, then the error message will include the line number and the file
/// path.
pub fn patterns_from_path<P: AsRef<Path>>(path: P) -> io::Result<Vec<String>> {
let path = path.as_ref();
let file = File::open(path).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("{}: {}", path.display(), err),
)
})?;
patterns_from_reader(file).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("{}:{}", path.display(), err),
)
})
}
/// Read patterns from stdin, one per line.
///
/// If there was a problem reading or if any of the patterns contain invalid
/// UTF-8, then an error is returned. If there was a problem with a specific
/// pattern, then the error message will include the line number and the fact
/// that it came from stdin.
pub fn patterns_from_stdin() -> io::Result<Vec<String>> {
let stdin = io::stdin();
let locked = stdin.lock();
patterns_from_reader(locked).map_err(|err| {
io::Error::new(io::ErrorKind::Other, format!("<stdin>:{}", err))
})
}
/// Read patterns from any reader, one per line.
///
/// If there was a problem reading or if any of the patterns contain invalid
/// UTF-8, then an error is returned. If there was a problem with a specific
/// pattern, then the error message will include the line number.
///
/// Note that this routine uses its own internal buffer, so the caller should
/// not provide their own buffered reader if possible.
///
/// # Example
///
/// This shows how to parse patterns, one per line.
///
/// ```
/// use grep_cli::patterns_from_reader;
///
/// # fn example() -> Result<(), Box<::std::error::Error>> {
/// let patterns = "\
/// foo
/// bar\\s+foo
/// [a-z]{3}
/// ";
///
/// assert_eq!(patterns_from_reader(patterns.as_bytes())?, vec![
/// r"foo",
/// r"bar\s+foo",
/// r"[a-z]{3}",
/// ]);
/// # Ok(()) }
/// ```
pub fn patterns_from_reader<R: io::Read>(rdr: R) -> io::Result<Vec<String>> {
let mut patterns = vec![];
let mut line_number = 0;
io::BufReader::new(rdr).for_byte_line(|line| {
line_number += 1;
match pattern_from_bytes(line) {
Ok(pattern) => {
patterns.push(pattern.to_string());
Ok(true)
}
Err(err) => Err(io::Error::new(
io::ErrorKind::Other,
format!("{}: {}", line_number, err),
)),
}
})?;
Ok(patterns)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn bytes() {
let pat = b"abc\xFFxyz";
let err = pattern_from_bytes(pat).unwrap_err();
assert_eq!(3, err.valid_up_to());
}
#[test]
#[cfg(unix)]
fn os() {
use std::ffi::OsStr;
use std::os::unix::ffi::OsStrExt;
let pat = OsStr::from_bytes(b"abc\xFFxyz");
let err = pattern_from_os(pat).unwrap_err();
assert_eq!(3, err.valid_up_to());
}
}

270
crates/cli/src/process.rs Normal file
View File

@@ -0,0 +1,270 @@
use std::error;
use std::fmt;
use std::io::{self, Read};
use std::iter;
use std::process;
use std::thread::{self, JoinHandle};
/// An error that can occur while running a command and reading its output.
///
/// This error can be seamlessly converted to an `io::Error` via a `From`
/// implementation.
#[derive(Debug)]
pub struct CommandError {
kind: CommandErrorKind,
}
#[derive(Debug)]
enum CommandErrorKind {
Io(io::Error),
Stderr(Vec<u8>),
}
impl CommandError {
/// Create an error from an I/O error.
pub(crate) fn io(ioerr: io::Error) -> CommandError {
CommandError { kind: CommandErrorKind::Io(ioerr) }
}
/// Create an error from the contents of stderr (which may be empty).
pub(crate) fn stderr(bytes: Vec<u8>) -> CommandError {
CommandError { kind: CommandErrorKind::Stderr(bytes) }
}
}
impl error::Error for CommandError {
fn description(&self) -> &str {
"command error"
}
}
impl fmt::Display for CommandError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.kind {
CommandErrorKind::Io(ref e) => e.fmt(f),
CommandErrorKind::Stderr(ref bytes) => {
let msg = String::from_utf8_lossy(bytes);
if msg.trim().is_empty() {
write!(f, "<stderr is empty>")
} else {
let div = iter::repeat('-').take(79).collect::<String>();
write!(
f,
"\n{div}\n{msg}\n{div}",
div = div,
msg = msg.trim()
)
}
}
}
}
}
impl From<io::Error> for CommandError {
fn from(ioerr: io::Error) -> CommandError {
CommandError { kind: CommandErrorKind::Io(ioerr) }
}
}
impl From<CommandError> for io::Error {
fn from(cmderr: CommandError) -> io::Error {
match cmderr.kind {
CommandErrorKind::Io(ioerr) => ioerr,
CommandErrorKind::Stderr(_) => {
io::Error::new(io::ErrorKind::Other, cmderr)
}
}
}
}
/// Configures and builds a streaming reader for process output.
#[derive(Clone, Debug, Default)]
pub struct CommandReaderBuilder {
async_stderr: bool,
}
impl CommandReaderBuilder {
/// Create a new builder with the default configuration.
pub fn new() -> CommandReaderBuilder {
CommandReaderBuilder::default()
}
/// Build a new streaming reader for the given command's output.
///
/// The caller should set everything that's required on the given command
/// before building a reader, such as its arguments, environment and
/// current working directory. Settings such as the stdout and stderr (but
/// not stdin) pipes will be overridden so that they can be controlled by
/// the reader.
///
/// If there was a problem spawning the given command, then its error is
/// returned.
pub fn build(
&self,
command: &mut process::Command,
) -> Result<CommandReader, CommandError> {
let mut child = command
.stdout(process::Stdio::piped())
.stderr(process::Stdio::piped())
.spawn()?;
let stdout = child.stdout.take().unwrap();
let stderr = if self.async_stderr {
StderrReader::async(child.stderr.take().unwrap())
} else {
StderrReader::sync(child.stderr.take().unwrap())
};
Ok(CommandReader {
child: child,
stdout: stdout,
stderr: stderr,
done: false,
})
}
/// When enabled, the reader will asynchronously read the contents of the
/// command's stderr output. When disabled, stderr is only read after the
/// stdout stream has been exhausted (or if the process quits with an error
/// code).
///
/// Note that when enabled, this may require launching an additional
/// thread in order to read stderr. This is done so that the process being
/// executed is never blocked from writing to stdout or stderr. If this is
/// disabled, then it is possible for the process to fill up the stderr
/// buffer and deadlock.
///
/// This is enabled by default.
pub fn async_stderr(&mut self, yes: bool) -> &mut CommandReaderBuilder {
self.async_stderr = yes;
self
}
}
/// A streaming reader for a command's output.
///
/// The purpose of this reader is to provide an easy way to execute processes
/// whose stdout is read in a streaming way while also making the processes'
/// stderr available when the process fails with an exit code. This makes it
/// possible to execute processes while surfacing the underlying failure mode
/// in the case of an error.
///
/// Moreover, by default, this reader will asynchronously read the processes'
/// stderr. This prevents subtle deadlocking bugs for noisy processes that
/// write a lot to stderr. Currently, the entire contents of stderr is read
/// on to the heap.
///
/// # Example
///
/// This example shows how to invoke `gzip` to decompress the contents of a
/// file. If the `gzip` command reports a failing exit status, then its stderr
/// is returned as an error.
///
/// ```no_run
/// use std::io::Read;
/// use std::process::Command;
/// use grep_cli::CommandReader;
///
/// # fn example() -> Result<(), Box<::std::error::Error>> {
/// let mut cmd = Command::new("gzip");
/// cmd.arg("-d").arg("-c").arg("/usr/share/man/man1/ls.1.gz");
///
/// let mut rdr = CommandReader::new(&mut cmd)?;
/// let mut contents = vec![];
/// rdr.read_to_end(&mut contents)?;
/// # Ok(()) }
/// ```
#[derive(Debug)]
pub struct CommandReader {
child: process::Child,
stdout: process::ChildStdout,
stderr: StderrReader,
done: bool,
}
impl CommandReader {
/// Create a new streaming reader for the given command using the default
/// configuration.
///
/// The caller should set everything that's required on the given command
/// before building a reader, such as its arguments, environment and
/// current working directory. Settings such as the stdout and stderr (but
/// not stdin) pipes will be overridden so that they can be controlled by
/// the reader.
///
/// If there was a problem spawning the given command, then its error is
/// returned.
///
/// If the caller requires additional configuration for the reader
/// returned, then use
/// [`CommandReaderBuilder`](struct.CommandReaderBuilder.html).
pub fn new(
cmd: &mut process::Command,
) -> Result<CommandReader, CommandError> {
CommandReaderBuilder::new().build(cmd)
}
}
impl io::Read for CommandReader {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.done {
return Ok(0);
}
let nread = self.stdout.read(buf)?;
if nread == 0 {
self.done = true;
// Reap the child now that we're done reading. If the command
// failed, report stderr as an error.
if !self.child.wait()?.success() {
return Err(io::Error::from(self.stderr.read_to_end()));
}
}
Ok(nread)
}
}
/// A reader that encapsulates the asynchronous or synchronous reading of
/// stderr.
#[derive(Debug)]
enum StderrReader {
Async(Option<JoinHandle<CommandError>>),
Sync(process::ChildStderr),
}
impl StderrReader {
/// Create a reader for stderr that reads contents asynchronously.
fn async(mut stderr: process::ChildStderr) -> StderrReader {
let handle =
thread::spawn(move || stderr_to_command_error(&mut stderr));
StderrReader::Async(Some(handle))
}
/// Create a reader for stderr that reads contents synchronously.
fn sync(stderr: process::ChildStderr) -> StderrReader {
StderrReader::Sync(stderr)
}
/// Consumes all of stderr on to the heap and returns it as an error.
///
/// If there was a problem reading stderr itself, then this returns an I/O
/// command error.
fn read_to_end(&mut self) -> CommandError {
match *self {
StderrReader::Async(ref mut handle) => {
let handle = handle
.take()
.expect("read_to_end cannot be called more than once");
handle.join().expect("stderr reading thread does not panic")
}
StderrReader::Sync(ref mut stderr) => {
stderr_to_command_error(stderr)
}
}
}
}
fn stderr_to_command_error(stderr: &mut process::ChildStderr) -> CommandError {
let mut bytes = vec![];
match stderr.read_to_end(&mut bytes) {
Ok(_) => CommandError::stderr(bytes),
Err(err) => CommandError::io(err),
}
}

133
crates/cli/src/wtr.rs Normal file
View File

@@ -0,0 +1,133 @@
use std::io;
use termcolor;
use is_tty_stdout;
/// A writer that supports coloring with either line or block buffering.
pub struct StandardStream(StandardStreamKind);
/// Returns a possibly buffered writer to stdout for the given color choice.
///
/// The writer returned is either line buffered or block buffered. The decision
/// between these two is made automatically based on whether a tty is attached
/// to stdout or not. If a tty is attached, then line buffering is used.
/// Otherwise, block buffering is used. In general, block buffering is more
/// efficient, but may increase the time it takes for the end user to see the
/// first bits of output.
///
/// If you need more fine grained control over the buffering mode, then use one
/// of `stdout_buffered_line` or `stdout_buffered_block`.
///
/// The color choice given is passed along to the underlying writer. To
/// completely disable colors in all cases, use `ColorChoice::Never`.
pub fn stdout(color_choice: termcolor::ColorChoice) -> StandardStream {
if is_tty_stdout() {
stdout_buffered_line(color_choice)
} else {
stdout_buffered_block(color_choice)
}
}
/// Returns a line buffered writer to stdout for the given color choice.
///
/// This writer is useful when printing results directly to a tty such that
/// users see output as soon as it's written. The downside of this approach
/// is that it can be slower, especially when there is a lot of output.
///
/// You might consider using
/// [`stdout`](fn.stdout.html)
/// instead, which chooses the buffering strategy automatically based on
/// whether stdout is connected to a tty.
pub fn stdout_buffered_line(
color_choice: termcolor::ColorChoice,
) -> StandardStream {
let out = termcolor::StandardStream::stdout(color_choice);
StandardStream(StandardStreamKind::LineBuffered(out))
}
/// Returns a block buffered writer to stdout for the given color choice.
///
/// This writer is useful when printing results to a file since it amortizes
/// the cost of writing data. The downside of this approach is that it can
/// increase the latency of display output when writing to a tty.
///
/// You might consider using
/// [`stdout`](fn.stdout.html)
/// instead, which chooses the buffering strategy automatically based on
/// whether stdout is connected to a tty.
pub fn stdout_buffered_block(
color_choice: termcolor::ColorChoice,
) -> StandardStream {
let out = termcolor::BufferedStandardStream::stdout(color_choice);
StandardStream(StandardStreamKind::BlockBuffered(out))
}
enum StandardStreamKind {
LineBuffered(termcolor::StandardStream),
BlockBuffered(termcolor::BufferedStandardStream),
}
impl io::Write for StandardStream {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
use self::StandardStreamKind::*;
match self.0 {
LineBuffered(ref mut w) => w.write(buf),
BlockBuffered(ref mut w) => w.write(buf),
}
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
use self::StandardStreamKind::*;
match self.0 {
LineBuffered(ref mut w) => w.flush(),
BlockBuffered(ref mut w) => w.flush(),
}
}
}
impl termcolor::WriteColor for StandardStream {
#[inline]
fn supports_color(&self) -> bool {
use self::StandardStreamKind::*;
match self.0 {
LineBuffered(ref w) => w.supports_color(),
BlockBuffered(ref w) => w.supports_color(),
}
}
#[inline]
fn set_color(&mut self, spec: &termcolor::ColorSpec) -> io::Result<()> {
use self::StandardStreamKind::*;
match self.0 {
LineBuffered(ref mut w) => w.set_color(spec),
BlockBuffered(ref mut w) => w.set_color(spec),
}
}
#[inline]
fn reset(&mut self) -> io::Result<()> {
use self::StandardStreamKind::*;
match self.0 {
LineBuffered(ref mut w) => w.reset(),
BlockBuffered(ref mut w) => w.reset(),
}
}
#[inline]
fn is_synchronous(&self) -> bool {
use self::StandardStreamKind::*;
match self.0 {
LineBuffered(ref w) => w.is_synchronous(),
BlockBuffered(ref w) => w.is_synchronous(),
}
}
}

2955
crates/core/app.rs Normal file

File diff suppressed because it is too large Load Diff

1805
crates/core/args.rs Normal file

File diff suppressed because it is too large Load Diff

169
crates/core/config.rs Normal file
View File

@@ -0,0 +1,169 @@
// This module provides routines for reading ripgrep config "rc" files. The
// primary output of these routines is a sequence of arguments, where each
// argument corresponds precisely to one shell argument.
use std::env;
use std::error::Error;
use std::ffi::OsString;
use std::fs::File;
use std::io;
use std::path::{Path, PathBuf};
use bstr::{io::BufReadExt, ByteSlice};
use log;
use crate::Result;
/// Return a sequence of arguments derived from ripgrep rc configuration files.
pub fn args() -> Vec<OsString> {
let config_path = match env::var_os("RIPGREP_CONFIG_PATH") {
None => return vec![],
Some(config_path) => {
if config_path.is_empty() {
return vec![];
}
PathBuf::from(config_path)
}
};
let (args, errs) = match parse(&config_path) {
Ok((args, errs)) => (args, errs),
Err(err) => {
message!("{}", err);
return vec![];
}
};
if !errs.is_empty() {
for err in errs {
message!("{}:{}", config_path.display(), err);
}
}
log::debug!(
"{}: arguments loaded from config file: {:?}",
config_path.display(),
args
);
args
}
/// Parse a single ripgrep rc file from the given path.
///
/// On success, this returns a set of shell arguments, in order, that should
/// be pre-pended to the arguments given to ripgrep at the command line.
///
/// If the file could not be read, then an error is returned. If there was
/// a problem parsing one or more lines in the file, then errors are returned
/// for each line in addition to successfully parsed arguments.
fn parse<P: AsRef<Path>>(
path: P,
) -> Result<(Vec<OsString>, Vec<Box<dyn Error>>)> {
let path = path.as_ref();
match File::open(&path) {
Ok(file) => parse_reader(file),
Err(err) => Err(From::from(format!("{}: {}", path.display(), err))),
}
}
/// Parse a single ripgrep rc file from the given reader.
///
/// Callers should not provided a buffered reader, as this routine will use its
/// own buffer internally.
///
/// On success, this returns a set of shell arguments, in order, that should
/// be pre-pended to the arguments given to ripgrep at the command line.
///
/// If the reader could not be read, then an error is returned. If there was a
/// problem parsing one or more lines, then errors are returned for each line
/// in addition to successfully parsed arguments.
fn parse_reader<R: io::Read>(
rdr: R,
) -> Result<(Vec<OsString>, Vec<Box<dyn Error>>)> {
let bufrdr = io::BufReader::new(rdr);
let (mut args, mut errs) = (vec![], vec![]);
let mut line_number = 0;
bufrdr.for_byte_line_with_terminator(|line| {
line_number += 1;
let line = line.trim();
if line.is_empty() || line[0] == b'#' {
return Ok(true);
}
match line.to_os_str() {
Ok(osstr) => {
args.push(osstr.to_os_string());
}
Err(err) => {
errs.push(format!("{}: {}", line_number, err).into());
}
}
Ok(true)
})?;
Ok((args, errs))
}
#[cfg(test)]
mod tests {
use super::parse_reader;
use std::ffi::OsString;
#[test]
fn basic() {
let (args, errs) = parse_reader(
&b"\
# Test
--context=0
--smart-case
-u
# --bar
--foo
"[..],
)
.unwrap();
assert!(errs.is_empty());
let args: Vec<String> =
args.into_iter().map(|s| s.into_string().unwrap()).collect();
assert_eq!(args, vec!["--context=0", "--smart-case", "-u", "--foo",]);
}
// We test that we can handle invalid UTF-8 on Unix-like systems.
#[test]
#[cfg(unix)]
fn error() {
use std::os::unix::ffi::OsStringExt;
let (args, errs) = parse_reader(
&b"\
quux
foo\xFFbar
baz
"[..],
)
.unwrap();
assert!(errs.is_empty());
assert_eq!(
args,
vec![
OsString::from("quux"),
OsString::from_vec(b"foo\xFFbar".to_vec()),
OsString::from("baz"),
]
);
}
// ... but test that invalid UTF-8 fails on Windows.
#[test]
#[cfg(not(unix))]
fn error() {
let (args, errs) = parse_reader(
&b"\
quux
foo\xFFbar
baz
"[..],
)
.unwrap();
assert_eq!(errs.len(), 1);
assert_eq!(args, vec![OsString::from("quux"), OsString::from("baz"),]);
}
}

68
crates/core/logger.rs Normal file
View File

@@ -0,0 +1,68 @@
// This module defines a super simple logger that works with the `log` crate.
// We don't need anything fancy; just basic log levels and the ability to
// print to stderr. We therefore avoid bringing in extra dependencies just
// for this functionality.
use log::{self, Log};
/// The simplest possible logger that logs to stderr.
///
/// This logger does no filtering. Instead, it relies on the `log` crates
/// filtering via its global max_level setting.
#[derive(Debug)]
pub struct Logger(());
const LOGGER: &'static Logger = &Logger(());
impl Logger {
/// Create a new logger that logs to stderr and initialize it as the
/// global logger. If there was a problem setting the logger, then an
/// error is returned.
pub fn init() -> Result<(), log::SetLoggerError> {
log::set_logger(LOGGER)
}
}
impl Log for Logger {
fn enabled(&self, _: &log::Metadata) -> bool {
// We set the log level via log::set_max_level, so we don't need to
// implement filtering here.
true
}
fn log(&self, record: &log::Record) {
match (record.file(), record.line()) {
(Some(file), Some(line)) => {
eprintln!(
"{}|{}|{}:{}: {}",
record.level(),
record.target(),
file,
line,
record.args()
);
}
(Some(file), None) => {
eprintln!(
"{}|{}|{}: {}",
record.level(),
record.target(),
file,
record.args()
);
}
_ => {
eprintln!(
"{}|{}: {}",
record.level(),
record.target(),
record.args()
);
}
}
}
fn flush(&self) {
// We use eprintln! which is flushed on every call.
}
}

325
crates/core/main.rs Normal file
View File

@@ -0,0 +1,325 @@
use std::error;
use std::io::{self, Write};
use std::process;
use std::sync::Mutex;
use std::time::Instant;
use ignore::WalkState;
use args::Args;
use subject::Subject;
#[macro_use]
mod messages;
mod app;
mod args;
mod config;
mod logger;
mod path_printer;
mod search;
mod subject;
// Since Rust no longer uses jemalloc by default, ripgrep will, by default,
// use the system allocator. On Linux, this would normally be glibc's
// allocator, which is pretty good. In particular, ripgrep does not have a
// particularly allocation heavy workload, so there really isn't much
// difference (for ripgrep's purposes) between glibc's allocator and jemalloc.
//
// However, when ripgrep is built with musl, this means ripgrep will use musl's
// allocator, which appears to be substantially worse. (musl's goal is not to
// have the fastest version of everything. Its goal is to be small and amenable
// to static compilation.) Even though ripgrep isn't particularly allocation
// heavy, musl's allocator appears to slow down ripgrep quite a bit. Therefore,
// when building with musl, we use jemalloc.
//
// We don't unconditionally use jemalloc because it can be nice to use the
// system's default allocator by default. Moreover, jemalloc seems to increase
// compilation times by a bit.
//
// Moreover, we only do this on 64-bit systems since jemalloc doesn't support
// i686.
#[cfg(all(target_env = "musl", target_pointer_width = "64"))]
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
type Result<T> = ::std::result::Result<T, Box<dyn error::Error>>;
fn main() {
if let Err(err) = Args::parse().and_then(try_main) {
eprintln!("{}", err);
process::exit(2);
}
}
fn try_main(args: Args) -> Result<()> {
use args::Command::*;
let matched = match args.command()? {
Search => search(&args),
SearchParallel => search_parallel(&args),
SearchNever => Ok(false),
Files => files(&args),
FilesParallel => files_parallel(&args),
Types => types(&args),
PCRE2Version => pcre2_version(&args),
}?;
if matched && (args.quiet() || !messages::errored()) {
process::exit(0)
} else if messages::errored() {
process::exit(2)
} else {
process::exit(1)
}
}
/// The top-level entry point for single-threaded search. This recursively
/// steps through the file list (current directory by default) and searches
/// each file sequentially.
fn search(args: &Args) -> Result<bool> {
let started_at = Instant::now();
let quit_after_match = args.quit_after_match()?;
let subject_builder = args.subject_builder();
let mut stats = args.stats()?;
let mut searcher = args.search_worker(args.stdout())?;
let mut matched = false;
for result in args.walker()? {
let subject = match subject_builder.build_from_result(result) {
Some(subject) => subject,
None => continue,
};
let search_result = match searcher.search(&subject) {
Ok(search_result) => search_result,
Err(err) => {
// A broken pipe means graceful termination.
if err.kind() == io::ErrorKind::BrokenPipe {
break;
}
err_message!("{}: {}", subject.path().display(), err);
continue;
}
};
matched = matched || search_result.has_match();
if let Some(ref mut stats) = stats {
*stats += search_result.stats().unwrap();
}
if matched && quit_after_match {
break;
}
}
if let Some(ref stats) = stats {
let elapsed = Instant::now().duration_since(started_at);
// We don't care if we couldn't print this successfully.
let _ = searcher.print_stats(elapsed, stats);
}
Ok(matched)
}
/// The top-level entry point for multi-threaded search. The parallelism is
/// itself achieved by the recursive directory traversal. All we need to do is
/// feed it a worker for performing a search on each file.
fn search_parallel(args: &Args) -> Result<bool> {
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering::SeqCst;
let quit_after_match = args.quit_after_match()?;
let started_at = Instant::now();
let subject_builder = args.subject_builder();
let bufwtr = args.buffer_writer()?;
let stats = args.stats()?.map(Mutex::new);
let matched = AtomicBool::new(false);
let mut searcher_err = None;
args.walker_parallel()?.run(|| {
let bufwtr = &bufwtr;
let stats = &stats;
let matched = &matched;
let subject_builder = &subject_builder;
let mut searcher = match args.search_worker(bufwtr.buffer()) {
Ok(searcher) => searcher,
Err(err) => {
searcher_err = Some(err);
return Box::new(move |_| WalkState::Quit);
}
};
Box::new(move |result| {
let subject = match subject_builder.build_from_result(result) {
Some(subject) => subject,
None => return WalkState::Continue,
};
searcher.printer().get_mut().clear();
let search_result = match searcher.search(&subject) {
Ok(search_result) => search_result,
Err(err) => {
err_message!("{}: {}", subject.path().display(), err);
return WalkState::Continue;
}
};
if search_result.has_match() {
matched.store(true, SeqCst);
}
if let Some(ref locked_stats) = *stats {
let mut stats = locked_stats.lock().unwrap();
*stats += search_result.stats().unwrap();
}
if let Err(err) = bufwtr.print(searcher.printer().get_mut()) {
// A broken pipe means graceful termination.
if err.kind() == io::ErrorKind::BrokenPipe {
return WalkState::Quit;
}
// Otherwise, we continue on our merry way.
err_message!("{}: {}", subject.path().display(), err);
}
if matched.load(SeqCst) && quit_after_match {
WalkState::Quit
} else {
WalkState::Continue
}
})
});
if let Some(err) = searcher_err.take() {
return Err(err);
}
if let Some(ref locked_stats) = stats {
let elapsed = Instant::now().duration_since(started_at);
let stats = locked_stats.lock().unwrap();
let mut searcher = args.search_worker(args.stdout())?;
// We don't care if we couldn't print this successfully.
let _ = searcher.print_stats(elapsed, &stats);
}
Ok(matched.load(SeqCst))
}
/// The top-level entry point for listing files without searching them. This
/// recursively steps through the file list (current directory by default) and
/// prints each path sequentially using a single thread.
fn files(args: &Args) -> Result<bool> {
let quit_after_match = args.quit_after_match()?;
let subject_builder = args.subject_builder();
let mut matched = false;
let mut path_printer = args.path_printer(args.stdout())?;
for result in args.walker()? {
let subject = match subject_builder.build_from_result(result) {
Some(subject) => subject,
None => continue,
};
matched = true;
if quit_after_match {
break;
}
if let Err(err) = path_printer.write_path(subject.path()) {
// A broken pipe means graceful termination.
if err.kind() == io::ErrorKind::BrokenPipe {
break;
}
// Otherwise, we have some other error that's preventing us from
// writing to stdout, so we should bubble it up.
return Err(err.into());
}
}
Ok(matched)
}
/// The top-level entry point for listing files without searching them. This
/// recursively steps through the file list (current directory by default) and
/// prints each path sequentially using multiple threads.
fn files_parallel(args: &Args) -> Result<bool> {
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering::SeqCst;
use std::sync::mpsc;
use std::thread;
let quit_after_match = args.quit_after_match()?;
let subject_builder = args.subject_builder();
let mut path_printer = args.path_printer(args.stdout())?;
let matched = AtomicBool::new(false);
let (tx, rx) = mpsc::channel::<Subject>();
let print_thread = thread::spawn(move || -> io::Result<()> {
for subject in rx.iter() {
path_printer.write_path(subject.path())?;
}
Ok(())
});
args.walker_parallel()?.run(|| {
let subject_builder = &subject_builder;
let matched = &matched;
let tx = tx.clone();
Box::new(move |result| {
let subject = match subject_builder.build_from_result(result) {
Some(subject) => subject,
None => return WalkState::Continue,
};
matched.store(true, SeqCst);
if quit_after_match {
WalkState::Quit
} else {
match tx.send(subject) {
Ok(_) => WalkState::Continue,
Err(_) => WalkState::Quit,
}
}
})
});
drop(tx);
if let Err(err) = print_thread.join().unwrap() {
// A broken pipe means graceful termination, so fall through.
// Otherwise, something bad happened while writing to stdout, so bubble
// it up.
if err.kind() != io::ErrorKind::BrokenPipe {
return Err(err.into());
}
}
Ok(matched.load(SeqCst))
}
/// The top-level entry point for --type-list.
fn types(args: &Args) -> Result<bool> {
let mut count = 0;
let mut stdout = args.stdout();
for def in args.type_defs()? {
count += 1;
stdout.write_all(def.name().as_bytes())?;
stdout.write_all(b": ")?;
let mut first = true;
for glob in def.globs() {
if !first {
stdout.write_all(b", ")?;
}
stdout.write_all(glob.as_bytes())?;
first = false;
}
stdout.write_all(b"\n")?;
}
Ok(count > 0)
}
/// The top-level entry point for --pcre2-version.
fn pcre2_version(args: &Args) -> Result<bool> {
#[cfg(feature = "pcre2")]
fn imp(args: &Args) -> Result<bool> {
use grep::pcre2;
let mut stdout = args.stdout();
let (major, minor) = pcre2::version();
writeln!(stdout, "PCRE2 {}.{} is available", major, minor)?;
if cfg!(target_pointer_width = "64") && pcre2::is_jit_available() {
writeln!(stdout, "JIT is available")?;
}
Ok(true)
}
#[cfg(not(feature = "pcre2"))]
fn imp(args: &Args) -> Result<bool> {
let mut stdout = args.stdout();
writeln!(stdout, "PCRE2 is not available in this build of ripgrep.")?;
Ok(false)
}
imp(args)
}

74
crates/core/messages.rs Normal file
View File

@@ -0,0 +1,74 @@
use std::sync::atomic::{AtomicBool, Ordering};
static MESSAGES: AtomicBool = AtomicBool::new(false);
static IGNORE_MESSAGES: AtomicBool = AtomicBool::new(false);
static ERRORED: AtomicBool = AtomicBool::new(false);
/// Emit a non-fatal error message, unless messages were disabled.
#[macro_export]
macro_rules! message {
($($tt:tt)*) => {
if crate::messages::messages() {
eprintln!($($tt)*);
}
}
}
/// Like message, but sets ripgrep's "errored" flag, which controls the exit
/// status.
#[macro_export]
macro_rules! err_message {
($($tt:tt)*) => {
crate::messages::set_errored();
message!($($tt)*);
}
}
/// Emit a non-fatal ignore-related error message (like a parse error), unless
/// ignore-messages were disabled.
#[macro_export]
macro_rules! ignore_message {
($($tt:tt)*) => {
if crate::messages::messages() && crate::messages::ignore_messages() {
eprintln!($($tt)*);
}
}
}
/// Returns true if and only if messages should be shown.
pub fn messages() -> bool {
MESSAGES.load(Ordering::SeqCst)
}
/// Set whether messages should be shown or not.
///
/// By default, they are not shown.
pub fn set_messages(yes: bool) {
MESSAGES.store(yes, Ordering::SeqCst)
}
/// Returns true if and only if "ignore" related messages should be shown.
pub fn ignore_messages() -> bool {
IGNORE_MESSAGES.load(Ordering::SeqCst)
}
/// Set whether "ignore" related messages should be shown or not.
///
/// By default, they are not shown.
///
/// Note that this is overridden if `messages` is disabled. Namely, if
/// `messages` is disabled, then "ignore" messages are never shown, regardless
/// of this setting.
pub fn set_ignore_messages(yes: bool) {
IGNORE_MESSAGES.store(yes, Ordering::SeqCst)
}
/// Returns true if and only if ripgrep came across a non-fatal error.
pub fn errored() -> bool {
ERRORED.load(Ordering::SeqCst)
}
/// Indicate that ripgrep has come across a non-fatal error.
pub fn set_errored() {
ERRORED.store(true, Ordering::SeqCst);
}

View File

@@ -0,0 +1,98 @@
use std::io;
use std::path::Path;
use grep::printer::{ColorSpecs, PrinterPath};
use termcolor::WriteColor;
/// A configuration for describing how paths should be written.
#[derive(Clone, Debug)]
struct Config {
colors: ColorSpecs,
separator: Option<u8>,
terminator: u8,
}
impl Default for Config {
fn default() -> Config {
Config {
colors: ColorSpecs::default(),
separator: None,
terminator: b'\n',
}
}
}
/// A builder for constructing things to search over.
#[derive(Clone, Debug)]
pub struct PathPrinterBuilder {
config: Config,
}
impl PathPrinterBuilder {
/// Return a new subject builder with a default configuration.
pub fn new() -> PathPrinterBuilder {
PathPrinterBuilder { config: Config::default() }
}
/// Create a new path printer with the current configuration that writes
/// paths to the given writer.
pub fn build<W: WriteColor>(&self, wtr: W) -> PathPrinter<W> {
PathPrinter { config: self.config.clone(), wtr: wtr }
}
/// Set the color specification for this printer.
///
/// Currently, only the `path` component of the given specification is
/// used.
pub fn color_specs(
&mut self,
specs: ColorSpecs,
) -> &mut PathPrinterBuilder {
self.config.colors = specs;
self
}
/// A path separator.
///
/// When provided, the path's default separator will be replaced with
/// the given separator.
///
/// This is not set by default, and the system's default path separator
/// will be used.
pub fn separator(&mut self, sep: Option<u8>) -> &mut PathPrinterBuilder {
self.config.separator = sep;
self
}
/// A path terminator.
///
/// When printing a path, it will be by terminated by the given byte.
///
/// This is set to `\n` by default.
pub fn terminator(&mut self, terminator: u8) -> &mut PathPrinterBuilder {
self.config.terminator = terminator;
self
}
}
/// A printer for emitting paths to a writer, with optional color support.
#[derive(Debug)]
pub struct PathPrinter<W> {
config: Config,
wtr: W,
}
impl<W: WriteColor> PathPrinter<W> {
/// Write the given path to the underlying writer.
pub fn write_path(&mut self, path: &Path) -> io::Result<()> {
let ppath = PrinterPath::with_separator(path, self.config.separator);
if !self.wtr.supports_color() {
self.wtr.write_all(ppath.as_bytes())?;
} else {
self.wtr.set_color(self.config.colors.path())?;
self.wtr.write_all(ppath.as_bytes())?;
self.wtr.reset()?;
}
self.wtr.write_all(&[self.config.terminator])
}
}

534
crates/core/search.rs Normal file
View File

@@ -0,0 +1,534 @@
use std::fs::File;
use std::io;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use std::time::Duration;
use grep::cli;
use grep::matcher::Matcher;
#[cfg(feature = "pcre2")]
use grep::pcre2::RegexMatcher as PCRE2RegexMatcher;
use grep::printer::{Standard, Stats, Summary, JSON};
use grep::regex::RegexMatcher as RustRegexMatcher;
use grep::searcher::{BinaryDetection, Searcher};
use ignore::overrides::Override;
use serde_json as json;
use serde_json::json;
use termcolor::WriteColor;
use crate::subject::Subject;
/// The configuration for the search worker. Among a few other things, the
/// configuration primarily controls the way we show search results to users
/// at a very high level.
#[derive(Clone, Debug)]
struct Config {
json_stats: bool,
preprocessor: Option<PathBuf>,
preprocessor_globs: Override,
search_zip: bool,
binary_implicit: BinaryDetection,
binary_explicit: BinaryDetection,
}
impl Default for Config {
fn default() -> Config {
Config {
json_stats: false,
preprocessor: None,
preprocessor_globs: Override::empty(),
search_zip: false,
binary_implicit: BinaryDetection::none(),
binary_explicit: BinaryDetection::none(),
}
}
}
/// A builder for configuring and constructing a search worker.
#[derive(Clone, Debug)]
pub struct SearchWorkerBuilder {
config: Config,
command_builder: cli::CommandReaderBuilder,
decomp_builder: cli::DecompressionReaderBuilder,
}
impl Default for SearchWorkerBuilder {
fn default() -> SearchWorkerBuilder {
SearchWorkerBuilder::new()
}
}
impl SearchWorkerBuilder {
/// Create a new builder for configuring and constructing a search worker.
pub fn new() -> SearchWorkerBuilder {
let mut cmd_builder = cli::CommandReaderBuilder::new();
cmd_builder.async_stderr(true);
let mut decomp_builder = cli::DecompressionReaderBuilder::new();
decomp_builder.async_stderr(true);
SearchWorkerBuilder {
config: Config::default(),
command_builder: cmd_builder,
decomp_builder: decomp_builder,
}
}
/// Create a new search worker using the given searcher, matcher and
/// printer.
pub fn build<W: WriteColor>(
&self,
matcher: PatternMatcher,
searcher: Searcher,
printer: Printer<W>,
) -> SearchWorker<W> {
let config = self.config.clone();
let command_builder = self.command_builder.clone();
let decomp_builder = self.decomp_builder.clone();
SearchWorker {
config,
command_builder,
decomp_builder,
matcher,
searcher,
printer,
}
}
/// Forcefully use JSON to emit statistics, even if the underlying printer
/// is not the JSON printer.
///
/// This is useful for implementing flag combinations like
/// `--json --quiet`, which uses the summary printer for implementing
/// `--quiet` but still wants to emit summary statistics, which should
/// be JSON formatted because of the `--json` flag.
pub fn json_stats(&mut self, yes: bool) -> &mut SearchWorkerBuilder {
self.config.json_stats = yes;
self
}
/// Set the path to a preprocessor command.
///
/// When this is set, instead of searching files directly, the given
/// command will be run with the file path as the first argument, and the
/// output of that command will be searched instead.
pub fn preprocessor(
&mut self,
cmd: Option<PathBuf>,
) -> &mut SearchWorkerBuilder {
self.config.preprocessor = cmd;
self
}
/// Set the globs for determining which files should be run through the
/// preprocessor. By default, with no globs and a preprocessor specified,
/// every file is run through the preprocessor.
pub fn preprocessor_globs(
&mut self,
globs: Override,
) -> &mut SearchWorkerBuilder {
self.config.preprocessor_globs = globs;
self
}
/// Enable the decompression and searching of common compressed files.
///
/// When enabled, if a particular file path is recognized as a compressed
/// file, then it is decompressed before searching.
///
/// Note that if a preprocessor command is set, then it overrides this
/// setting.
pub fn search_zip(&mut self, yes: bool) -> &mut SearchWorkerBuilder {
self.config.search_zip = yes;
self
}
/// Set the binary detection that should be used when searching files
/// found via a recursive directory search.
///
/// Generally, this binary detection may be `BinaryDetection::quit` if
/// we want to skip binary files completely.
///
/// By default, no binary detection is performed.
pub fn binary_detection_implicit(
&mut self,
detection: BinaryDetection,
) -> &mut SearchWorkerBuilder {
self.config.binary_implicit = detection;
self
}
/// Set the binary detection that should be used when searching files
/// explicitly supplied by an end user.
///
/// Generally, this binary detection should NOT be `BinaryDetection::quit`,
/// since we never want to automatically filter files supplied by the end
/// user.
///
/// By default, no binary detection is performed.
pub fn binary_detection_explicit(
&mut self,
detection: BinaryDetection,
) -> &mut SearchWorkerBuilder {
self.config.binary_explicit = detection;
self
}
}
/// The result of executing a search.
///
/// Generally speaking, the "result" of a search is sent to a printer, which
/// writes results to an underlying writer such as stdout or a file. However,
/// every search also has some aggregate statistics or meta data that may be
/// useful to higher level routines.
#[derive(Clone, Debug, Default)]
pub struct SearchResult {
has_match: bool,
stats: Option<Stats>,
}
impl SearchResult {
/// Whether the search found a match or not.
pub fn has_match(&self) -> bool {
self.has_match
}
/// Return aggregate search statistics for a single search, if available.
///
/// It can be expensive to compute statistics, so these are only present
/// if explicitly enabled in the printer provided by the caller.
pub fn stats(&self) -> Option<&Stats> {
self.stats.as_ref()
}
}
/// The pattern matcher used by a search worker.
#[derive(Clone, Debug)]
pub enum PatternMatcher {
RustRegex(RustRegexMatcher),
#[cfg(feature = "pcre2")]
PCRE2(PCRE2RegexMatcher),
}
/// The printer used by a search worker.
///
/// The `W` type parameter refers to the type of the underlying writer.
#[derive(Debug)]
pub enum Printer<W> {
/// Use the standard printer, which supports the classic grep-like format.
Standard(Standard<W>),
/// Use the summary printer, which supports aggregate displays of search
/// results.
Summary(Summary<W>),
/// A JSON printer, which emits results in the JSON Lines format.
JSON(JSON<W>),
}
impl<W: WriteColor> Printer<W> {
fn print_stats(
&mut self,
total_duration: Duration,
stats: &Stats,
) -> io::Result<()> {
match *self {
Printer::JSON(_) => self.print_stats_json(total_duration, stats),
Printer::Standard(_) | Printer::Summary(_) => {
self.print_stats_human(total_duration, stats)
}
}
}
fn print_stats_human(
&mut self,
total_duration: Duration,
stats: &Stats,
) -> io::Result<()> {
write!(
self.get_mut(),
"
{matches} matches
{lines} matched lines
{searches_with_match} files contained matches
{searches} files searched
{bytes_printed} bytes printed
{bytes_searched} bytes searched
{search_time:0.6} seconds spent searching
{process_time:0.6} seconds
",
matches = stats.matches(),
lines = stats.matched_lines(),
searches_with_match = stats.searches_with_match(),
searches = stats.searches(),
bytes_printed = stats.bytes_printed(),
bytes_searched = stats.bytes_searched(),
search_time = fractional_seconds(stats.elapsed()),
process_time = fractional_seconds(total_duration)
)
}
fn print_stats_json(
&mut self,
total_duration: Duration,
stats: &Stats,
) -> io::Result<()> {
// We specifically match the format laid out by the JSON printer in
// the grep-printer crate. We simply "extend" it with the 'summary'
// message type.
let fractional = fractional_seconds(total_duration);
json::to_writer(
self.get_mut(),
&json!({
"type": "summary",
"data": {
"stats": stats,
"elapsed_total": {
"secs": total_duration.as_secs(),
"nanos": total_duration.subsec_nanos(),
"human": format!("{:0.6}s", fractional),
},
}
}),
)?;
write!(self.get_mut(), "\n")
}
/// Return a mutable reference to the underlying printer's writer.
pub fn get_mut(&mut self) -> &mut W {
match *self {
Printer::Standard(ref mut p) => p.get_mut(),
Printer::Summary(ref mut p) => p.get_mut(),
Printer::JSON(ref mut p) => p.get_mut(),
}
}
}
/// A worker for executing searches.
///
/// It is intended for a single worker to execute many searches, and is
/// generally intended to be used from a single thread. When searching using
/// multiple threads, it is better to create a new worker for each thread.
#[derive(Debug)]
pub struct SearchWorker<W> {
config: Config,
command_builder: cli::CommandReaderBuilder,
decomp_builder: cli::DecompressionReaderBuilder,
matcher: PatternMatcher,
searcher: Searcher,
printer: Printer<W>,
}
impl<W: WriteColor> SearchWorker<W> {
/// Execute a search over the given subject.
pub fn search(&mut self, subject: &Subject) -> io::Result<SearchResult> {
let bin = if subject.is_explicit() {
self.config.binary_explicit.clone()
} else {
self.config.binary_implicit.clone()
};
self.searcher.set_binary_detection(bin);
let path = subject.path();
if subject.is_stdin() {
self.search_reader(path, io::stdin().lock())
} else if self.should_preprocess(path) {
self.search_preprocessor(path)
} else if self.should_decompress(path) {
self.search_decompress(path)
} else {
self.search_path(path)
}
}
/// Return a mutable reference to the underlying printer.
pub fn printer(&mut self) -> &mut Printer<W> {
&mut self.printer
}
/// Print the given statistics to the underlying writer in a way that is
/// consistent with this searcher's printer's format.
///
/// While `Stats` contains a duration itself, this only corresponds to the
/// time spent searching, where as `total_duration` should roughly
/// approximate the lifespan of the ripgrep process itself.
pub fn print_stats(
&mut self,
total_duration: Duration,
stats: &Stats,
) -> io::Result<()> {
if self.config.json_stats {
self.printer().print_stats_json(total_duration, stats)
} else {
self.printer().print_stats(total_duration, stats)
}
}
/// Returns true if and only if the given file path should be
/// decompressed before searching.
fn should_decompress(&self, path: &Path) -> bool {
if !self.config.search_zip {
return false;
}
self.decomp_builder.get_matcher().has_command(path)
}
/// Returns true if and only if the given file path should be run through
/// the preprocessor.
fn should_preprocess(&self, path: &Path) -> bool {
if !self.config.preprocessor.is_some() {
return false;
}
if self.config.preprocessor_globs.is_empty() {
return true;
}
!self.config.preprocessor_globs.matched(path, false).is_ignore()
}
/// Search the given file path by first asking the preprocessor for the
/// data to search instead of opening the path directly.
fn search_preprocessor(
&mut self,
path: &Path,
) -> io::Result<SearchResult> {
let bin = self.config.preprocessor.as_ref().unwrap();
let mut cmd = Command::new(bin);
cmd.arg(path).stdin(Stdio::from(File::open(path)?));
let rdr = self.command_builder.build(&mut cmd).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!(
"preprocessor command could not start: '{:?}': {}",
cmd, err,
),
)
})?;
self.search_reader(path, rdr).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("preprocessor command failed: '{:?}': {}", cmd, err),
)
})
}
/// Attempt to decompress the data at the given file path and search the
/// result. If the given file path isn't recognized as a compressed file,
/// then search it without doing any decompression.
fn search_decompress(&mut self, path: &Path) -> io::Result<SearchResult> {
let rdr = self.decomp_builder.build(path)?;
self.search_reader(path, rdr)
}
/// Search the contents of the given file path.
fn search_path(&mut self, path: &Path) -> io::Result<SearchResult> {
use self::PatternMatcher::*;
let (searcher, printer) = (&mut self.searcher, &mut self.printer);
match self.matcher {
RustRegex(ref m) => search_path(m, searcher, printer, path),
#[cfg(feature = "pcre2")]
PCRE2(ref m) => search_path(m, searcher, printer, path),
}
}
/// Executes a search on the given reader, which may or may not correspond
/// directly to the contents of the given file path. Instead, the reader
/// may actually cause something else to be searched (for example, when
/// a preprocessor is set or when decompression is enabled). In those
/// cases, the file path is used for visual purposes only.
///
/// Generally speaking, this method should only be used when there is no
/// other choice. Searching via `search_path` provides more opportunities
/// for optimizations (such as memory maps).
fn search_reader<R: io::Read>(
&mut self,
path: &Path,
rdr: R,
) -> io::Result<SearchResult> {
use self::PatternMatcher::*;
let (searcher, printer) = (&mut self.searcher, &mut self.printer);
match self.matcher {
RustRegex(ref m) => search_reader(m, searcher, printer, path, rdr),
#[cfg(feature = "pcre2")]
PCRE2(ref m) => search_reader(m, searcher, printer, path, rdr),
}
}
}
/// Search the contents of the given file path using the given matcher,
/// searcher and printer.
fn search_path<M: Matcher, W: WriteColor>(
matcher: M,
searcher: &mut Searcher,
printer: &mut Printer<W>,
path: &Path,
) -> io::Result<SearchResult> {
match *printer {
Printer::Standard(ref mut p) => {
let mut sink = p.sink_with_path(&matcher, path);
searcher.search_path(&matcher, path, &mut sink)?;
Ok(SearchResult {
has_match: sink.has_match(),
stats: sink.stats().map(|s| s.clone()),
})
}
Printer::Summary(ref mut p) => {
let mut sink = p.sink_with_path(&matcher, path);
searcher.search_path(&matcher, path, &mut sink)?;
Ok(SearchResult {
has_match: sink.has_match(),
stats: sink.stats().map(|s| s.clone()),
})
}
Printer::JSON(ref mut p) => {
let mut sink = p.sink_with_path(&matcher, path);
searcher.search_path(&matcher, path, &mut sink)?;
Ok(SearchResult {
has_match: sink.has_match(),
stats: Some(sink.stats().clone()),
})
}
}
}
/// Search the contents of the given reader using the given matcher, searcher
/// and printer.
fn search_reader<M: Matcher, R: io::Read, W: WriteColor>(
matcher: M,
searcher: &mut Searcher,
printer: &mut Printer<W>,
path: &Path,
rdr: R,
) -> io::Result<SearchResult> {
match *printer {
Printer::Standard(ref mut p) => {
let mut sink = p.sink_with_path(&matcher, path);
searcher.search_reader(&matcher, rdr, &mut sink)?;
Ok(SearchResult {
has_match: sink.has_match(),
stats: sink.stats().map(|s| s.clone()),
})
}
Printer::Summary(ref mut p) => {
let mut sink = p.sink_with_path(&matcher, path);
searcher.search_reader(&matcher, rdr, &mut sink)?;
Ok(SearchResult {
has_match: sink.has_match(),
stats: sink.stats().map(|s| s.clone()),
})
}
Printer::JSON(ref mut p) => {
let mut sink = p.sink_with_path(&matcher, path);
searcher.search_reader(&matcher, rdr, &mut sink)?;
Ok(SearchResult {
has_match: sink.has_match(),
stats: Some(sink.stats().clone()),
})
}
}
}
/// Return the given duration as fractional seconds.
fn fractional_seconds(duration: Duration) -> f64 {
(duration.as_secs() as f64) + (duration.subsec_nanos() as f64 * 1e-9)
}

162
crates/core/subject.rs Normal file
View File

@@ -0,0 +1,162 @@
use std::path::Path;
use ignore::{self, DirEntry};
use log;
/// A configuration for describing how subjects should be built.
#[derive(Clone, Debug)]
struct Config {
strip_dot_prefix: bool,
}
impl Default for Config {
fn default() -> Config {
Config { strip_dot_prefix: false }
}
}
/// A builder for constructing things to search over.
#[derive(Clone, Debug)]
pub struct SubjectBuilder {
config: Config,
}
impl SubjectBuilder {
/// Return a new subject builder with a default configuration.
pub fn new() -> SubjectBuilder {
SubjectBuilder { config: Config::default() }
}
/// Create a new subject from a possibly missing directory entry.
///
/// If the directory entry isn't present, then the corresponding error is
/// logged if messages have been configured. Otherwise, if the subject is
/// deemed searchable, then it is returned.
pub fn build_from_result(
&self,
result: Result<DirEntry, ignore::Error>,
) -> Option<Subject> {
match result {
Ok(dent) => self.build(dent),
Err(err) => {
err_message!("{}", err);
None
}
}
}
/// Create a new subject using this builder's configuration.
///
/// If a subject could not be created or should otherwise not be searched,
/// then this returns `None` after emitting any relevant log messages.
pub fn build(&self, dent: DirEntry) -> Option<Subject> {
let subj = Subject {
dent: dent,
strip_dot_prefix: self.config.strip_dot_prefix,
};
if let Some(ignore_err) = subj.dent.error() {
ignore_message!("{}", ignore_err);
}
// If this entry was explicitly provided by an end user, then we always
// want to search it.
if subj.is_explicit() {
return Some(subj);
}
// At this point, we only want to search something if it's explicitly a
// file. This omits symlinks. (If ripgrep was configured to follow
// symlinks, then they have already been followed by the directory
// traversal.)
if subj.is_file() {
return Some(subj);
}
// We got nothin. Emit a debug message, but only if this isn't a
// directory. Otherwise, emitting messages for directories is just
// noisy.
if !subj.is_dir() {
log::debug!(
"ignoring {}: failed to pass subject filter: \
file type: {:?}, metadata: {:?}",
subj.dent.path().display(),
subj.dent.file_type(),
subj.dent.metadata()
);
}
None
}
/// When enabled, if the subject's file path starts with `./` then it is
/// stripped.
///
/// This is useful when implicitly searching the current working directory.
pub fn strip_dot_prefix(&mut self, yes: bool) -> &mut SubjectBuilder {
self.config.strip_dot_prefix = yes;
self
}
}
/// A subject is a thing we want to search. Generally, a subject is either a
/// file or stdin.
#[derive(Clone, Debug)]
pub struct Subject {
dent: DirEntry,
strip_dot_prefix: bool,
}
impl Subject {
/// Return the file path corresponding to this subject.
///
/// If this subject corresponds to stdin, then a special `<stdin>` path
/// is returned instead.
pub fn path(&self) -> &Path {
if self.strip_dot_prefix && self.dent.path().starts_with("./") {
self.dent.path().strip_prefix("./").unwrap()
} else {
self.dent.path()
}
}
/// Returns true if and only if this entry corresponds to stdin.
pub fn is_stdin(&self) -> bool {
self.dent.is_stdin()
}
/// Returns true if and only if this entry corresponds to a subject to
/// search that was explicitly supplied by an end user.
///
/// Generally, this corresponds to either stdin or an explicit file path
/// argument. e.g., in `rg foo some-file ./some-dir/`, `some-file` is
/// an explicit subject, but, e.g., `./some-dir/some-other-file` is not.
///
/// However, note that ripgrep does not see through shell globbing. e.g.,
/// in `rg foo ./some-dir/*`, `./some-dir/some-other-file` will be treated
/// as an explicit subject.
pub fn is_explicit(&self) -> bool {
// stdin is obvious. When an entry has a depth of 0, that means it
// was explicitly provided to our directory iterator, which means it
// was in turn explicitly provided by the end user. The !is_dir check
// means that we want to search files even if their symlinks, again,
// because they were explicitly provided. (And we never want to try
// to search a directory.)
self.is_stdin() || (self.dent.depth() == 0 && !self.is_dir())
}
/// Returns true if and only if this subject points to a directory after
/// following symbolic links.
fn is_dir(&self) -> bool {
let ft = match self.dent.file_type() {
None => return false,
Some(ft) => ft,
};
if ft.is_dir() {
return true;
}
// If this is a symlink, then we want to follow it to determine
// whether it's a directory or not.
self.dent.path_is_symlink() && self.dent.path().is_dir()
}
/// Returns true if and only if this subject points to a file.
fn is_file(&self) -> bool {
self.dent.file_type().map_or(false, |ft| ft.is_file())
}
}

3
crates/globset/COPYING Normal file
View File

@@ -0,0 +1,3 @@
This project is dual-licensed under the Unlicense and MIT licenses.
You may use this code under the terms of either license.

33
crates/globset/Cargo.toml Normal file
View File

@@ -0,0 +1,33 @@
[package]
name = "globset"
version = "0.4.4" #:version
authors = ["Andrew Gallant <jamslam@gmail.com>"]
description = """
Cross platform single glob and glob set matching. Glob set matching is the
process of matching one or more glob patterns against a single candidate path
simultaneously, and returning all of the globs that matched.
"""
documentation = "https://docs.rs/globset"
homepage = "https://github.com/BurntSushi/ripgrep/tree/master/globset"
repository = "https://github.com/BurntSushi/ripgrep/tree/master/globset"
readme = "README.md"
keywords = ["regex", "glob", "multiple", "set", "pattern"]
license = "Unlicense/MIT"
[lib]
name = "globset"
bench = false
[dependencies]
aho-corasick = "0.7.3"
bstr = { version = "0.2.0", default-features = false, features = ["std"] }
fnv = "1.0.6"
log = "0.4.5"
regex = "1.1.5"
[dev-dependencies]
glob = "0.3.0"
lazy_static = "1"
[features]
simd-accel = []

View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 Andrew Gallant
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

122
crates/globset/README.md Normal file
View File

@@ -0,0 +1,122 @@
globset
=======
Cross platform single glob and glob set matching. Glob set matching is the
process of matching one or more glob patterns against a single candidate path
simultaneously, and returning all of the globs that matched.
[![Linux build status](https://api.travis-ci.org/BurntSushi/ripgrep.svg)](https://travis-ci.org/BurntSushi/ripgrep)
[![Windows build status](https://ci.appveyor.com/api/projects/status/github/BurntSushi/ripgrep?svg=true)](https://ci.appveyor.com/project/BurntSushi/ripgrep)
[![](https://img.shields.io/crates/v/globset.svg)](https://crates.io/crates/globset)
Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org).
### Documentation
[https://docs.rs/globset](https://docs.rs/globset)
### Usage
Add this to your `Cargo.toml`:
```toml
[dependencies]
globset = "0.3"
```
and this to your crate root:
```rust
extern crate globset;
```
### Example: one glob
This example shows how to match a single glob against a single file path.
```rust
use globset::Glob;
let glob = Glob::new("*.rs")?.compile_matcher();
assert!(glob.is_match("foo.rs"));
assert!(glob.is_match("foo/bar.rs"));
assert!(!glob.is_match("Cargo.toml"));
```
### Example: configuring a glob matcher
This example shows how to use a `GlobBuilder` to configure aspects of match
semantics. In this example, we prevent wildcards from matching path separators.
```rust
use globset::GlobBuilder;
let glob = GlobBuilder::new("*.rs")
.literal_separator(true).build()?.compile_matcher();
assert!(glob.is_match("foo.rs"));
assert!(!glob.is_match("foo/bar.rs")); // no longer matches
assert!(!glob.is_match("Cargo.toml"));
```
### Example: match multiple globs at once
This example shows how to match multiple glob patterns at once.
```rust
use globset::{Glob, GlobSetBuilder};
let mut builder = GlobSetBuilder::new();
// A GlobBuilder can be used to configure each glob's match semantics
// independently.
builder.add(Glob::new("*.rs")?);
builder.add(Glob::new("src/lib.rs")?);
builder.add(Glob::new("src/**/foo.rs")?);
let set = builder.build()?;
assert_eq!(set.matches("src/bar/baz/foo.rs"), vec![0, 2]);
```
### Performance
This crate implements globs by converting them to regular expressions, and
executing them with the
[`regex`](https://github.com/rust-lang-nursery/regex)
crate.
For single glob matching, performance of this crate should be roughly on par
with the performance of the
[`glob`](https://github.com/rust-lang-nursery/glob)
crate. (`*_regex` correspond to benchmarks for this library while `*_glob`
correspond to benchmarks for the `glob` library.)
Optimizations in the `regex` crate may propel this library past `glob`,
particularly when matching longer paths.
```
test ext_glob ... bench: 425 ns/iter (+/- 21)
test ext_regex ... bench: 175 ns/iter (+/- 10)
test long_glob ... bench: 182 ns/iter (+/- 11)
test long_regex ... bench: 173 ns/iter (+/- 10)
test short_glob ... bench: 69 ns/iter (+/- 4)
test short_regex ... bench: 83 ns/iter (+/- 2)
```
The primary performance advantage of this crate is when matching multiple
globs against a single path. With the `glob` crate, one must match each glob
synchronously, one after the other. In this crate, many can be matched
simultaneously. For example:
```
test many_short_glob ... bench: 1,063 ns/iter (+/- 47)
test many_short_regex_set ... bench: 186 ns/iter (+/- 11)
```
### Comparison with the [`glob`](https://github.com/rust-lang-nursery/glob) crate
* Supports alternate "or" globs, e.g., `*.{foo,bar}`.
* Can match non-UTF-8 file paths correctly.
* Supports matching multiple globs at once.
* Doesn't provide a recursive directory iterator of matching file paths,
although I believe this crate should grow one eventually.
* Supports case insensitive and require-literal-separator match options, but
**doesn't** support the require-literal-leading-dot option.

24
crates/globset/UNLICENSE Normal file
View File

@@ -0,0 +1,24 @@
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>

View File

@@ -0,0 +1,116 @@
/*!
This module benchmarks the glob implementation. For benchmarks on the ripgrep
tool itself, see the benchsuite directory.
*/
#![feature(test)]
extern crate glob;
extern crate globset;
extern crate regex;
extern crate test;
use globset::{Candidate, Glob, GlobMatcher, GlobSet, GlobSetBuilder};
const EXT: &'static str = "some/a/bigger/path/to/the/crazy/needle.txt";
const EXT_PAT: &'static str = "*.txt";
const SHORT: &'static str = "some/needle.txt";
const SHORT_PAT: &'static str = "some/**/needle.txt";
const LONG: &'static str = "some/a/bigger/path/to/the/crazy/needle.txt";
const LONG_PAT: &'static str = "some/**/needle.txt";
fn new_glob(pat: &str) -> glob::Pattern {
glob::Pattern::new(pat).unwrap()
}
fn new_reglob(pat: &str) -> GlobMatcher {
Glob::new(pat).unwrap().compile_matcher()
}
fn new_reglob_many(pats: &[&str]) -> GlobSet {
let mut builder = GlobSetBuilder::new();
for pat in pats {
builder.add(Glob::new(pat).unwrap());
}
builder.build().unwrap()
}
#[bench]
fn ext_glob(b: &mut test::Bencher) {
let pat = new_glob(EXT_PAT);
b.iter(|| assert!(pat.matches(EXT)));
}
#[bench]
fn ext_regex(b: &mut test::Bencher) {
let set = new_reglob(EXT_PAT);
let cand = Candidate::new(EXT);
b.iter(|| assert!(set.is_match_candidate(&cand)));
}
#[bench]
fn short_glob(b: &mut test::Bencher) {
let pat = new_glob(SHORT_PAT);
b.iter(|| assert!(pat.matches(SHORT)));
}
#[bench]
fn short_regex(b: &mut test::Bencher) {
let set = new_reglob(SHORT_PAT);
let cand = Candidate::new(SHORT);
b.iter(|| assert!(set.is_match_candidate(&cand)));
}
#[bench]
fn long_glob(b: &mut test::Bencher) {
let pat = new_glob(LONG_PAT);
b.iter(|| assert!(pat.matches(LONG)));
}
#[bench]
fn long_regex(b: &mut test::Bencher) {
let set = new_reglob(LONG_PAT);
let cand = Candidate::new(LONG);
b.iter(|| assert!(set.is_match_candidate(&cand)));
}
const MANY_SHORT_GLOBS: &'static [&'static str] = &[
// Taken from a random .gitignore on my system.
".*.swp",
"tags",
"target",
"*.lock",
"tmp",
"*.csv",
"*.fst",
"*-got",
"*.csv.idx",
"words",
"98m*",
"dict",
"test",
"months",
];
const MANY_SHORT_SEARCH: &'static str = "98m-blah.csv.idx";
#[bench]
fn many_short_glob(b: &mut test::Bencher) {
let pats: Vec<_> = MANY_SHORT_GLOBS.iter().map(|&s| new_glob(s)).collect();
b.iter(|| {
let mut count = 0;
for pat in &pats {
if pat.matches(MANY_SHORT_SEARCH) {
count += 1;
}
}
assert_eq!(2, count);
})
}
#[bench]
fn many_short_regex_set(b: &mut test::Bencher) {
let set = new_reglob_many(MANY_SHORT_GLOBS);
b.iter(|| assert_eq!(2, set.matches(MANY_SHORT_SEARCH).iter().count()));
}

1521
crates/globset/src/glob.rs Normal file

File diff suppressed because it is too large Load Diff

860
crates/globset/src/lib.rs Normal file
View File

@@ -0,0 +1,860 @@
/*!
The globset crate provides cross platform single glob and glob set matching.
Glob set matching is the process of matching one or more glob patterns against
a single candidate path simultaneously, and returning all of the globs that
matched. For example, given this set of globs:
```ignore
*.rs
src/lib.rs
src/**/foo.rs
```
and a path `src/bar/baz/foo.rs`, then the set would report the first and third
globs as matching.
# Example: one glob
This example shows how to match a single glob against a single file path.
```
# fn example() -> Result<(), globset::Error> {
use globset::Glob;
let glob = Glob::new("*.rs")?.compile_matcher();
assert!(glob.is_match("foo.rs"));
assert!(glob.is_match("foo/bar.rs"));
assert!(!glob.is_match("Cargo.toml"));
# Ok(()) } example().unwrap();
```
# Example: configuring a glob matcher
This example shows how to use a `GlobBuilder` to configure aspects of match
semantics. In this example, we prevent wildcards from matching path separators.
```
# fn example() -> Result<(), globset::Error> {
use globset::GlobBuilder;
let glob = GlobBuilder::new("*.rs")
.literal_separator(true).build()?.compile_matcher();
assert!(glob.is_match("foo.rs"));
assert!(!glob.is_match("foo/bar.rs")); // no longer matches
assert!(!glob.is_match("Cargo.toml"));
# Ok(()) } example().unwrap();
```
# Example: match multiple globs at once
This example shows how to match multiple glob patterns at once.
```
# fn example() -> Result<(), globset::Error> {
use globset::{Glob, GlobSetBuilder};
let mut builder = GlobSetBuilder::new();
// A GlobBuilder can be used to configure each glob's match semantics
// independently.
builder.add(Glob::new("*.rs")?);
builder.add(Glob::new("src/lib.rs")?);
builder.add(Glob::new("src/**/foo.rs")?);
let set = builder.build()?;
assert_eq!(set.matches("src/bar/baz/foo.rs"), vec![0, 2]);
# Ok(()) } example().unwrap();
```
# Syntax
Standard Unix-style glob syntax is supported:
* `?` matches any single character. (If the `literal_separator` option is
enabled, then `?` can never match a path separator.)
* `*` matches zero or more characters. (If the `literal_separator` option is
enabled, then `*` can never match a path separator.)
* `**` recursively matches directories but are only legal in three situations.
First, if the glob starts with <code>\*\*&#x2F;</code>, then it matches
all directories. For example, <code>\*\*&#x2F;foo</code> matches `foo`
and `bar/foo` but not `foo/bar`. Secondly, if the glob ends with
<code>&#x2F;\*\*</code>, then it matches all sub-entries. For example,
<code>foo&#x2F;\*\*</code> matches `foo/a` and `foo/a/b`, but not `foo`.
Thirdly, if the glob contains <code>&#x2F;\*\*&#x2F;</code> anywhere within
the pattern, then it matches zero or more directories. Using `**` anywhere
else is illegal (N.B. the glob `**` is allowed and means "match everything").
* `{a,b}` matches `a` or `b` where `a` and `b` are arbitrary glob patterns.
(N.B. Nesting `{...}` is not currently allowed.)
* `[ab]` matches `a` or `b` where `a` and `b` are characters. Use
`[!ab]` to match any character except for `a` and `b`.
* Metacharacters such as `*` and `?` can be escaped with character class
notation. e.g., `[*]` matches `*`.
* When backslash escapes are enabled, a backslash (`\`) will escape all meta
characters in a glob. If it precedes a non-meta character, then the slash is
ignored. A `\\` will match a literal `\\`. Note that this mode is only
enabled on Unix platforms by default, but can be enabled on any platform
via the `backslash_escape` setting on `Glob`.
A `GlobBuilder` can be used to prevent wildcards from matching path separators,
or to enable case insensitive matching.
*/
#![deny(missing_docs)]
extern crate aho_corasick;
extern crate bstr;
extern crate fnv;
#[macro_use]
extern crate log;
extern crate regex;
use std::borrow::Cow;
use std::collections::{BTreeMap, HashMap};
use std::error::Error as StdError;
use std::fmt;
use std::hash;
use std::path::Path;
use std::str;
use aho_corasick::AhoCorasick;
use bstr::{ByteSlice, ByteVec, B};
use regex::bytes::{Regex, RegexBuilder, RegexSet};
use glob::MatchStrategy;
pub use glob::{Glob, GlobBuilder, GlobMatcher};
use pathutil::{file_name, file_name_ext, normalize_path};
mod glob;
mod pathutil;
/// Represents an error that can occur when parsing a glob pattern.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Error {
/// The original glob provided by the caller.
glob: Option<String>,
/// The kind of error.
kind: ErrorKind,
}
/// The kind of error that can occur when parsing a glob pattern.
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum ErrorKind {
/// **DEPRECATED**.
///
/// This error used to occur for consistency with git's glob specification,
/// but the specification now accepts all uses of `**`. When `**` does not
/// appear adjacent to a path separator or at the beginning/end of a glob,
/// it is now treated as two consecutive `*` patterns. As such, this error
/// is no longer used.
InvalidRecursive,
/// Occurs when a character class (e.g., `[abc]`) is not closed.
UnclosedClass,
/// Occurs when a range in a character (e.g., `[a-z]`) is invalid. For
/// example, if the range starts with a lexicographically larger character
/// than it ends with.
InvalidRange(char, char),
/// Occurs when a `}` is found without a matching `{`.
UnopenedAlternates,
/// Occurs when a `{` is found without a matching `}`.
UnclosedAlternates,
/// Occurs when an alternating group is nested inside another alternating
/// group, e.g., `{{a,b},{c,d}}`.
NestedAlternates,
/// Occurs when an unescaped '\' is found at the end of a glob.
DanglingEscape,
/// An error associated with parsing or compiling a regex.
Regex(String),
/// Hints that destructuring should not be exhaustive.
///
/// This enum may grow additional variants, so this makes sure clients
/// don't count on exhaustive matching. (Otherwise, adding a new variant
/// could break existing code.)
#[doc(hidden)]
__Nonexhaustive,
}
impl StdError for Error {
fn description(&self) -> &str {
self.kind.description()
}
}
impl Error {
/// Return the glob that caused this error, if one exists.
pub fn glob(&self) -> Option<&str> {
self.glob.as_ref().map(|s| &**s)
}
/// Return the kind of this error.
pub fn kind(&self) -> &ErrorKind {
&self.kind
}
}
impl ErrorKind {
fn description(&self) -> &str {
match *self {
ErrorKind::InvalidRecursive => {
"invalid use of **; must be one path component"
}
ErrorKind::UnclosedClass => {
"unclosed character class; missing ']'"
}
ErrorKind::InvalidRange(_, _) => "invalid character range",
ErrorKind::UnopenedAlternates => {
"unopened alternate group; missing '{' \
(maybe escape '}' with '[}]'?)"
}
ErrorKind::UnclosedAlternates => {
"unclosed alternate group; missing '}' \
(maybe escape '{' with '[{]'?)"
}
ErrorKind::NestedAlternates => {
"nested alternate groups are not allowed"
}
ErrorKind::DanglingEscape => "dangling '\\'",
ErrorKind::Regex(ref err) => err,
ErrorKind::__Nonexhaustive => unreachable!(),
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.glob {
None => self.kind.fmt(f),
Some(ref glob) => {
write!(f, "error parsing glob '{}': {}", glob, self.kind)
}
}
}
}
impl fmt::Display for ErrorKind {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ErrorKind::InvalidRecursive
| ErrorKind::UnclosedClass
| ErrorKind::UnopenedAlternates
| ErrorKind::UnclosedAlternates
| ErrorKind::NestedAlternates
| ErrorKind::DanglingEscape
| ErrorKind::Regex(_) => write!(f, "{}", self.description()),
ErrorKind::InvalidRange(s, e) => {
write!(f, "invalid range; '{}' > '{}'", s, e)
}
ErrorKind::__Nonexhaustive => unreachable!(),
}
}
}
fn new_regex(pat: &str) -> Result<Regex, Error> {
RegexBuilder::new(pat)
.dot_matches_new_line(true)
.size_limit(10 * (1 << 20))
.dfa_size_limit(10 * (1 << 20))
.build()
.map_err(|err| Error {
glob: Some(pat.to_string()),
kind: ErrorKind::Regex(err.to_string()),
})
}
fn new_regex_set<I, S>(pats: I) -> Result<RegexSet, Error>
where
S: AsRef<str>,
I: IntoIterator<Item = S>,
{
RegexSet::new(pats).map_err(|err| Error {
glob: None,
kind: ErrorKind::Regex(err.to_string()),
})
}
type Fnv = hash::BuildHasherDefault<fnv::FnvHasher>;
/// GlobSet represents a group of globs that can be matched together in a
/// single pass.
#[derive(Clone, Debug)]
pub struct GlobSet {
len: usize,
strats: Vec<GlobSetMatchStrategy>,
}
impl GlobSet {
/// Create an empty `GlobSet`. An empty set matches nothing.
#[inline]
pub fn empty() -> GlobSet {
GlobSet { len: 0, strats: vec![] }
}
/// Returns true if this set is empty, and therefore matches nothing.
#[inline]
pub fn is_empty(&self) -> bool {
self.len == 0
}
/// Returns the number of globs in this set.
#[inline]
pub fn len(&self) -> usize {
self.len
}
/// Returns true if any glob in this set matches the path given.
pub fn is_match<P: AsRef<Path>>(&self, path: P) -> bool {
self.is_match_candidate(&Candidate::new(path.as_ref()))
}
/// Returns true if any glob in this set matches the path given.
///
/// This takes a Candidate as input, which can be used to amortize the
/// cost of preparing a path for matching.
pub fn is_match_candidate(&self, path: &Candidate) -> bool {
if self.is_empty() {
return false;
}
for strat in &self.strats {
if strat.is_match(path) {
return true;
}
}
false
}
/// Returns the sequence number of every glob pattern that matches the
/// given path.
pub fn matches<P: AsRef<Path>>(&self, path: P) -> Vec<usize> {
self.matches_candidate(&Candidate::new(path.as_ref()))
}
/// Returns the sequence number of every glob pattern that matches the
/// given path.
///
/// This takes a Candidate as input, which can be used to amortize the
/// cost of preparing a path for matching.
pub fn matches_candidate(&self, path: &Candidate) -> Vec<usize> {
let mut into = vec![];
if self.is_empty() {
return into;
}
self.matches_candidate_into(path, &mut into);
into
}
/// Adds the sequence number of every glob pattern that matches the given
/// path to the vec given.
///
/// `into` is is cleared before matching begins, and contains the set of
/// sequence numbers (in ascending order) after matching ends. If no globs
/// were matched, then `into` will be empty.
pub fn matches_into<P: AsRef<Path>>(
&self,
path: P,
into: &mut Vec<usize>,
) {
self.matches_candidate_into(&Candidate::new(path.as_ref()), into);
}
/// Adds the sequence number of every glob pattern that matches the given
/// path to the vec given.
///
/// `into` is is cleared before matching begins, and contains the set of
/// sequence numbers (in ascending order) after matching ends. If no globs
/// were matched, then `into` will be empty.
///
/// This takes a Candidate as input, which can be used to amortize the
/// cost of preparing a path for matching.
pub fn matches_candidate_into(
&self,
path: &Candidate,
into: &mut Vec<usize>,
) {
into.clear();
if self.is_empty() {
return;
}
for strat in &self.strats {
strat.matches_into(path, into);
}
into.sort();
into.dedup();
}
fn new(pats: &[Glob]) -> Result<GlobSet, Error> {
if pats.is_empty() {
return Ok(GlobSet { len: 0, strats: vec![] });
}
let mut lits = LiteralStrategy::new();
let mut base_lits = BasenameLiteralStrategy::new();
let mut exts = ExtensionStrategy::new();
let mut prefixes = MultiStrategyBuilder::new();
let mut suffixes = MultiStrategyBuilder::new();
let mut required_exts = RequiredExtensionStrategyBuilder::new();
let mut regexes = MultiStrategyBuilder::new();
for (i, p) in pats.iter().enumerate() {
match MatchStrategy::new(p) {
MatchStrategy::Literal(lit) => {
lits.add(i, lit);
}
MatchStrategy::BasenameLiteral(lit) => {
base_lits.add(i, lit);
}
MatchStrategy::Extension(ext) => {
exts.add(i, ext);
}
MatchStrategy::Prefix(prefix) => {
prefixes.add(i, prefix);
}
MatchStrategy::Suffix { suffix, component } => {
if component {
lits.add(i, suffix[1..].to_string());
}
suffixes.add(i, suffix);
}
MatchStrategy::RequiredExtension(ext) => {
required_exts.add(i, ext, p.regex().to_owned());
}
MatchStrategy::Regex => {
debug!("glob converted to regex: {:?}", p);
regexes.add(i, p.regex().to_owned());
}
}
}
debug!(
"built glob set; {} literals, {} basenames, {} extensions, \
{} prefixes, {} suffixes, {} required extensions, {} regexes",
lits.0.len(),
base_lits.0.len(),
exts.0.len(),
prefixes.literals.len(),
suffixes.literals.len(),
required_exts.0.len(),
regexes.literals.len()
);
Ok(GlobSet {
len: pats.len(),
strats: vec![
GlobSetMatchStrategy::Extension(exts),
GlobSetMatchStrategy::BasenameLiteral(base_lits),
GlobSetMatchStrategy::Literal(lits),
GlobSetMatchStrategy::Suffix(suffixes.suffix()),
GlobSetMatchStrategy::Prefix(prefixes.prefix()),
GlobSetMatchStrategy::RequiredExtension(
required_exts.build()?,
),
GlobSetMatchStrategy::Regex(regexes.regex_set()?),
],
})
}
}
/// GlobSetBuilder builds a group of patterns that can be used to
/// simultaneously match a file path.
#[derive(Clone, Debug)]
pub struct GlobSetBuilder {
pats: Vec<Glob>,
}
impl GlobSetBuilder {
/// Create a new GlobSetBuilder. A GlobSetBuilder can be used to add new
/// patterns. Once all patterns have been added, `build` should be called
/// to produce a `GlobSet`, which can then be used for matching.
pub fn new() -> GlobSetBuilder {
GlobSetBuilder { pats: vec![] }
}
/// Builds a new matcher from all of the glob patterns added so far.
///
/// Once a matcher is built, no new patterns can be added to it.
pub fn build(&self) -> Result<GlobSet, Error> {
GlobSet::new(&self.pats)
}
/// Add a new pattern to this set.
pub fn add(&mut self, pat: Glob) -> &mut GlobSetBuilder {
self.pats.push(pat);
self
}
}
/// A candidate path for matching.
///
/// All glob matching in this crate operates on `Candidate` values.
/// Constructing candidates has a very small cost associated with it, so
/// callers may find it beneficial to amortize that cost when matching a single
/// path against multiple globs or sets of globs.
#[derive(Clone, Debug)]
pub struct Candidate<'a> {
path: Cow<'a, [u8]>,
basename: Cow<'a, [u8]>,
ext: Cow<'a, [u8]>,
}
impl<'a> Candidate<'a> {
/// Create a new candidate for matching from the given path.
pub fn new<P: AsRef<Path> + ?Sized>(path: &'a P) -> Candidate<'a> {
let path = normalize_path(Vec::from_path_lossy(path.as_ref()));
let basename = file_name(&path).unwrap_or(Cow::Borrowed(B("")));
let ext = file_name_ext(&basename).unwrap_or(Cow::Borrowed(B("")));
Candidate { path: path, basename: basename, ext: ext }
}
fn path_prefix(&self, max: usize) -> &[u8] {
if self.path.len() <= max {
&*self.path
} else {
&self.path[..max]
}
}
fn path_suffix(&self, max: usize) -> &[u8] {
if self.path.len() <= max {
&*self.path
} else {
&self.path[self.path.len() - max..]
}
}
}
#[derive(Clone, Debug)]
enum GlobSetMatchStrategy {
Literal(LiteralStrategy),
BasenameLiteral(BasenameLiteralStrategy),
Extension(ExtensionStrategy),
Prefix(PrefixStrategy),
Suffix(SuffixStrategy),
RequiredExtension(RequiredExtensionStrategy),
Regex(RegexSetStrategy),
}
impl GlobSetMatchStrategy {
fn is_match(&self, candidate: &Candidate) -> bool {
use self::GlobSetMatchStrategy::*;
match *self {
Literal(ref s) => s.is_match(candidate),
BasenameLiteral(ref s) => s.is_match(candidate),
Extension(ref s) => s.is_match(candidate),
Prefix(ref s) => s.is_match(candidate),
Suffix(ref s) => s.is_match(candidate),
RequiredExtension(ref s) => s.is_match(candidate),
Regex(ref s) => s.is_match(candidate),
}
}
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
use self::GlobSetMatchStrategy::*;
match *self {
Literal(ref s) => s.matches_into(candidate, matches),
BasenameLiteral(ref s) => s.matches_into(candidate, matches),
Extension(ref s) => s.matches_into(candidate, matches),
Prefix(ref s) => s.matches_into(candidate, matches),
Suffix(ref s) => s.matches_into(candidate, matches),
RequiredExtension(ref s) => s.matches_into(candidate, matches),
Regex(ref s) => s.matches_into(candidate, matches),
}
}
}
#[derive(Clone, Debug)]
struct LiteralStrategy(BTreeMap<Vec<u8>, Vec<usize>>);
impl LiteralStrategy {
fn new() -> LiteralStrategy {
LiteralStrategy(BTreeMap::new())
}
fn add(&mut self, global_index: usize, lit: String) {
self.0.entry(lit.into_bytes()).or_insert(vec![]).push(global_index);
}
fn is_match(&self, candidate: &Candidate) -> bool {
self.0.contains_key(candidate.path.as_bytes())
}
#[inline(never)]
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
if let Some(hits) = self.0.get(candidate.path.as_bytes()) {
matches.extend(hits);
}
}
}
#[derive(Clone, Debug)]
struct BasenameLiteralStrategy(BTreeMap<Vec<u8>, Vec<usize>>);
impl BasenameLiteralStrategy {
fn new() -> BasenameLiteralStrategy {
BasenameLiteralStrategy(BTreeMap::new())
}
fn add(&mut self, global_index: usize, lit: String) {
self.0.entry(lit.into_bytes()).or_insert(vec![]).push(global_index);
}
fn is_match(&self, candidate: &Candidate) -> bool {
if candidate.basename.is_empty() {
return false;
}
self.0.contains_key(candidate.basename.as_bytes())
}
#[inline(never)]
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
if candidate.basename.is_empty() {
return;
}
if let Some(hits) = self.0.get(candidate.basename.as_bytes()) {
matches.extend(hits);
}
}
}
#[derive(Clone, Debug)]
struct ExtensionStrategy(HashMap<Vec<u8>, Vec<usize>, Fnv>);
impl ExtensionStrategy {
fn new() -> ExtensionStrategy {
ExtensionStrategy(HashMap::with_hasher(Fnv::default()))
}
fn add(&mut self, global_index: usize, ext: String) {
self.0.entry(ext.into_bytes()).or_insert(vec![]).push(global_index);
}
fn is_match(&self, candidate: &Candidate) -> bool {
if candidate.ext.is_empty() {
return false;
}
self.0.contains_key(candidate.ext.as_bytes())
}
#[inline(never)]
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
if candidate.ext.is_empty() {
return;
}
if let Some(hits) = self.0.get(candidate.ext.as_bytes()) {
matches.extend(hits);
}
}
}
#[derive(Clone, Debug)]
struct PrefixStrategy {
matcher: AhoCorasick,
map: Vec<usize>,
longest: usize,
}
impl PrefixStrategy {
fn is_match(&self, candidate: &Candidate) -> bool {
let path = candidate.path_prefix(self.longest);
for m in self.matcher.find_overlapping_iter(path) {
if m.start() == 0 {
return true;
}
}
false
}
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
let path = candidate.path_prefix(self.longest);
for m in self.matcher.find_overlapping_iter(path) {
if m.start() == 0 {
matches.push(self.map[m.pattern()]);
}
}
}
}
#[derive(Clone, Debug)]
struct SuffixStrategy {
matcher: AhoCorasick,
map: Vec<usize>,
longest: usize,
}
impl SuffixStrategy {
fn is_match(&self, candidate: &Candidate) -> bool {
let path = candidate.path_suffix(self.longest);
for m in self.matcher.find_overlapping_iter(path) {
if m.end() == path.len() {
return true;
}
}
false
}
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
let path = candidate.path_suffix(self.longest);
for m in self.matcher.find_overlapping_iter(path) {
if m.end() == path.len() {
matches.push(self.map[m.pattern()]);
}
}
}
}
#[derive(Clone, Debug)]
struct RequiredExtensionStrategy(HashMap<Vec<u8>, Vec<(usize, Regex)>, Fnv>);
impl RequiredExtensionStrategy {
fn is_match(&self, candidate: &Candidate) -> bool {
if candidate.ext.is_empty() {
return false;
}
match self.0.get(candidate.ext.as_bytes()) {
None => false,
Some(regexes) => {
for &(_, ref re) in regexes {
if re.is_match(candidate.path.as_bytes()) {
return true;
}
}
false
}
}
}
#[inline(never)]
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
if candidate.ext.is_empty() {
return;
}
if let Some(regexes) = self.0.get(candidate.ext.as_bytes()) {
for &(global_index, ref re) in regexes {
if re.is_match(candidate.path.as_bytes()) {
matches.push(global_index);
}
}
}
}
}
#[derive(Clone, Debug)]
struct RegexSetStrategy {
matcher: RegexSet,
map: Vec<usize>,
}
impl RegexSetStrategy {
fn is_match(&self, candidate: &Candidate) -> bool {
self.matcher.is_match(candidate.path.as_bytes())
}
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
for i in self.matcher.matches(candidate.path.as_bytes()) {
matches.push(self.map[i]);
}
}
}
#[derive(Clone, Debug)]
struct MultiStrategyBuilder {
literals: Vec<String>,
map: Vec<usize>,
longest: usize,
}
impl MultiStrategyBuilder {
fn new() -> MultiStrategyBuilder {
MultiStrategyBuilder { literals: vec![], map: vec![], longest: 0 }
}
fn add(&mut self, global_index: usize, literal: String) {
if literal.len() > self.longest {
self.longest = literal.len();
}
self.map.push(global_index);
self.literals.push(literal);
}
fn prefix(self) -> PrefixStrategy {
PrefixStrategy {
matcher: AhoCorasick::new_auto_configured(&self.literals),
map: self.map,
longest: self.longest,
}
}
fn suffix(self) -> SuffixStrategy {
SuffixStrategy {
matcher: AhoCorasick::new_auto_configured(&self.literals),
map: self.map,
longest: self.longest,
}
}
fn regex_set(self) -> Result<RegexSetStrategy, Error> {
Ok(RegexSetStrategy {
matcher: new_regex_set(self.literals)?,
map: self.map,
})
}
}
#[derive(Clone, Debug)]
struct RequiredExtensionStrategyBuilder(
HashMap<Vec<u8>, Vec<(usize, String)>>,
);
impl RequiredExtensionStrategyBuilder {
fn new() -> RequiredExtensionStrategyBuilder {
RequiredExtensionStrategyBuilder(HashMap::new())
}
fn add(&mut self, global_index: usize, ext: String, regex: String) {
self.0
.entry(ext.into_bytes())
.or_insert(vec![])
.push((global_index, regex));
}
fn build(self) -> Result<RequiredExtensionStrategy, Error> {
let mut exts = HashMap::with_hasher(Fnv::default());
for (ext, regexes) in self.0.into_iter() {
exts.insert(ext.clone(), vec![]);
for (global_index, regex) in regexes {
let compiled = new_regex(&regex)?;
exts.get_mut(&ext).unwrap().push((global_index, compiled));
}
}
Ok(RequiredExtensionStrategy(exts))
}
}
#[cfg(test)]
mod tests {
use super::GlobSetBuilder;
use glob::Glob;
#[test]
fn set_works() {
let mut builder = GlobSetBuilder::new();
builder.add(Glob::new("src/**/*.rs").unwrap());
builder.add(Glob::new("*.c").unwrap());
builder.add(Glob::new("src/lib.rs").unwrap());
let set = builder.build().unwrap();
assert!(set.is_match("foo.c"));
assert!(set.is_match("src/foo.c"));
assert!(!set.is_match("foo.rs"));
assert!(!set.is_match("tests/foo.rs"));
assert!(set.is_match("src/foo.rs"));
assert!(set.is_match("src/grep/src/main.rs"));
let matches = set.matches("src/lib.rs");
assert_eq!(2, matches.len());
assert_eq!(0, matches[0]);
assert_eq!(2, matches[1]);
}
#[test]
fn empty_set_works() {
let set = GlobSetBuilder::new().build().unwrap();
assert!(!set.is_match(""));
assert!(!set.is_match("a"));
}
}

View File

@@ -0,0 +1,129 @@
use std::borrow::Cow;
use bstr::{ByteSlice, ByteVec};
/// The final component of the path, if it is a normal file.
///
/// If the path terminates in ., .., or consists solely of a root of prefix,
/// file_name will return None.
pub fn file_name<'a>(path: &Cow<'a, [u8]>) -> Option<Cow<'a, [u8]>> {
if path.is_empty() {
return None;
} else if path.last_byte() == Some(b'.') {
return None;
}
let last_slash = path.rfind_byte(b'/').map(|i| i + 1).unwrap_or(0);
Some(match *path {
Cow::Borrowed(path) => Cow::Borrowed(&path[last_slash..]),
Cow::Owned(ref path) => {
let mut path = path.clone();
path.drain_bytes(..last_slash);
Cow::Owned(path)
}
})
}
/// Return a file extension given a path's file name.
///
/// Note that this does NOT match the semantics of std::path::Path::extension.
/// Namely, the extension includes the `.` and matching is otherwise more
/// liberal. Specifically, the extenion is:
///
/// * None, if the file name given is empty;
/// * None, if there is no embedded `.`;
/// * Otherwise, the portion of the file name starting with the final `.`.
///
/// e.g., A file name of `.rs` has an extension `.rs`.
///
/// N.B. This is done to make certain glob match optimizations easier. Namely,
/// a pattern like `*.rs` is obviously trying to match files with a `rs`
/// extension, but it also matches files like `.rs`, which doesn't have an
/// extension according to std::path::Path::extension.
pub fn file_name_ext<'a>(name: &Cow<'a, [u8]>) -> Option<Cow<'a, [u8]>> {
if name.is_empty() {
return None;
}
let last_dot_at = match name.rfind_byte(b'.') {
None => return None,
Some(i) => i,
};
Some(match *name {
Cow::Borrowed(name) => Cow::Borrowed(&name[last_dot_at..]),
Cow::Owned(ref name) => {
let mut name = name.clone();
name.drain_bytes(..last_dot_at);
Cow::Owned(name)
}
})
}
/// Normalizes a path to use `/` as a separator everywhere, even on platforms
/// that recognize other characters as separators.
#[cfg(unix)]
pub fn normalize_path(path: Cow<[u8]>) -> Cow<[u8]> {
// UNIX only uses /, so we're good.
path
}
/// Normalizes a path to use `/` as a separator everywhere, even on platforms
/// that recognize other characters as separators.
#[cfg(not(unix))]
pub fn normalize_path(mut path: Cow<[u8]>) -> Cow<[u8]> {
use std::path::is_separator;
for i in 0..path.len() {
if path[i] == b'/' || !is_separator(path[i] as char) {
continue;
}
path.to_mut()[i] = b'/';
}
path
}
#[cfg(test)]
mod tests {
use std::borrow::Cow;
use bstr::{ByteVec, B};
use super::{file_name_ext, normalize_path};
macro_rules! ext {
($name:ident, $file_name:expr, $ext:expr) => {
#[test]
fn $name() {
let bs = Vec::from($file_name);
let got = file_name_ext(&Cow::Owned(bs));
assert_eq!($ext.map(|s| Cow::Borrowed(B(s))), got);
}
};
}
ext!(ext1, "foo.rs", Some(".rs"));
ext!(ext2, ".rs", Some(".rs"));
ext!(ext3, "..rs", Some(".rs"));
ext!(ext4, "", None::<&str>);
ext!(ext5, "foo", None::<&str>);
macro_rules! normalize {
($name:ident, $path:expr, $expected:expr) => {
#[test]
fn $name() {
let bs = Vec::from_slice($path);
let got = normalize_path(Cow::Owned(bs));
assert_eq!($expected.to_vec(), got.into_owned());
}
};
}
normalize!(normal1, b"foo", b"foo");
normalize!(normal2, b"foo/bar", b"foo/bar");
#[cfg(unix)]
normalize!(normal3, b"foo\\bar", b"foo\\bar");
#[cfg(not(unix))]
normalize!(normal3, b"foo\\bar", b"foo/bar");
#[cfg(unix)]
normalize!(normal4, b"foo\\bar/baz", b"foo\\bar/baz");
#[cfg(not(unix))]
normalize!(normal4, b"foo\\bar/baz", b"foo/bar/baz");
}

3
crates/grep/COPYING Normal file
View File

@@ -0,0 +1,3 @@
This project is dual-licensed under the Unlicense and MIT licenses.
You may use this code under the terms of either license.

32
crates/grep/Cargo.toml Normal file
View File

@@ -0,0 +1,32 @@
[package]
name = "grep"
version = "0.2.4" #:version
authors = ["Andrew Gallant <jamslam@gmail.com>"]
description = """
Fast line oriented regex searching as a library.
"""
documentation = "http://burntsushi.net/rustdoc/grep/"
homepage = "https://github.com/BurntSushi/ripgrep"
repository = "https://github.com/BurntSushi/ripgrep"
readme = "README.md"
keywords = ["regex", "grep", "egrep", "search", "pattern"]
license = "Unlicense/MIT"
[dependencies]
grep-cli = { version = "0.1.2", path = "../grep-cli" }
grep-matcher = { version = "0.1.2", path = "../grep-matcher" }
grep-pcre2 = { version = "0.1.3", path = "../grep-pcre2", optional = true }
grep-printer = { version = "0.1.2", path = "../grep-printer" }
grep-regex = { version = "0.1.3", path = "../grep-regex" }
grep-searcher = { version = "0.1.4", path = "../grep-searcher" }
[dev-dependencies]
termcolor = "1.0.4"
walkdir = "2.2.7"
[features]
simd-accel = ["grep-searcher/simd-accel"]
pcre2 = ["grep-pcre2"]
# This feature is DEPRECATED. Runtime dispatch is used for SIMD now.
avx-accel = []

21
crates/grep/LICENSE-MIT Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 Andrew Gallant
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

41
crates/grep/README.md Normal file
View File

@@ -0,0 +1,41 @@
grep
----
ripgrep, as a library.
[![Linux build status](https://api.travis-ci.org/BurntSushi/ripgrep.svg)](https://travis-ci.org/BurntSushi/ripgrep)
[![Windows build status](https://ci.appveyor.com/api/projects/status/github/BurntSushi/ripgrep?svg=true)](https://ci.appveyor.com/project/BurntSushi/ripgrep)
[![](https://img.shields.io/crates/v/grep.svg)](https://crates.io/crates/grep)
Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org).
### Documentation
[https://docs.rs/grep](https://docs.rs/grep)
NOTE: This crate isn't ready for wide use yet. Ambitious individuals can
probably piece together the parts, but there is no high level documentation
describing how all of the pieces fit together.
### Usage
Add this to your `Cargo.toml`:
```toml
[dependencies]
grep = "0.2"
```
and this to your crate root:
```rust
extern crate grep;
```
### Features
This crate provides a `pcre2` feature (disabled by default) which, when
enabled, re-exports the `grep-pcre2` crate as an alternative `Matcher`
implementation to the standard `grep-regex` implementation.

24
crates/grep/UNLICENSE Normal file
View File

@@ -0,0 +1,24 @@
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>

View File

@@ -0,0 +1,72 @@
extern crate grep;
extern crate termcolor;
extern crate walkdir;
use std::env;
use std::error::Error;
use std::ffi::OsString;
use std::process;
use grep::cli;
use grep::printer::{ColorSpecs, StandardBuilder};
use grep::regex::RegexMatcher;
use grep::searcher::{BinaryDetection, SearcherBuilder};
use termcolor::ColorChoice;
use walkdir::WalkDir;
fn main() {
if let Err(err) = try_main() {
eprintln!("{}", err);
process::exit(1);
}
}
fn try_main() -> Result<(), Box<dyn Error>> {
let mut args: Vec<OsString> = env::args_os().collect();
if args.len() < 2 {
return Err("Usage: simplegrep <pattern> [<path> ...]".into());
}
if args.len() == 2 {
args.push(OsString::from("./"));
}
search(cli::pattern_from_os(&args[1])?, &args[2..])
}
fn search(pattern: &str, paths: &[OsString]) -> Result<(), Box<dyn Error>> {
let matcher = RegexMatcher::new_line_matcher(&pattern)?;
let mut searcher = SearcherBuilder::new()
.binary_detection(BinaryDetection::quit(b'\x00'))
.line_number(false)
.build();
let mut printer = StandardBuilder::new()
.color_specs(ColorSpecs::default_with_color())
.build(cli::stdout(if cli::is_tty_stdout() {
ColorChoice::Auto
} else {
ColorChoice::Never
}));
for path in paths {
for result in WalkDir::new(path) {
let dent = match result {
Ok(dent) => dent,
Err(err) => {
eprintln!("{}", err);
continue;
}
};
if !dent.file_type().is_file() {
continue;
}
let result = searcher.search_path(
&matcher,
dent.path(),
printer.sink_with_path(&matcher, dent.path()),
);
if let Err(err) = result {
eprintln!("{}: {}", dent.path().display(), err);
}
}
}
Ok(())
}

23
crates/grep/src/lib.rs Normal file
View File

@@ -0,0 +1,23 @@
/*!
ripgrep, as a library.
This library is intended to provide a high level facade to the crates that
make up ripgrep's core searching routines. However, there is no high level
documentation available yet guiding users on how to fit all of the pieces
together.
Every public API item in the constituent crates is documented, but examples
are sparse.
A cookbook and a guide are planned.
*/
#![deny(missing_docs)]
pub extern crate grep_cli as cli;
pub extern crate grep_matcher as matcher;
#[cfg(feature = "pcre2")]
pub extern crate grep_pcre2 as pcre2;
pub extern crate grep_printer as printer;
pub extern crate grep_regex as regex;
pub extern crate grep_searcher as searcher;

3
crates/ignore/COPYING Normal file
View File

@@ -0,0 +1,3 @@
This project is dual-licensed under the Unlicense and MIT licenses.
You may use this code under the terms of either license.

36
crates/ignore/Cargo.toml Normal file
View File

@@ -0,0 +1,36 @@
[package]
name = "ignore"
version = "0.4.11" #:version
authors = ["Andrew Gallant <jamslam@gmail.com>"]
description = """
A fast library for efficiently matching ignore files such as `.gitignore`
against file paths.
"""
documentation = "https://docs.rs/ignore"
homepage = "https://github.com/BurntSushi/ripgrep/tree/master/ignore"
repository = "https://github.com/BurntSushi/ripgrep/tree/master/ignore"
readme = "README.md"
keywords = ["glob", "ignore", "gitignore", "pattern", "file"]
license = "Unlicense/MIT"
[lib]
name = "ignore"
bench = false
[dependencies]
crossbeam-channel = "0.4.0"
crossbeam-utils = "0.7.0"
globset = { version = "0.4.3", path = "../globset" }
lazy_static = "1.1"
log = "0.4.5"
memchr = "2.1"
regex = "1.1"
same-file = "1.0.4"
thread_local = "1"
walkdir = "2.2.7"
[target.'cfg(windows)'.dependencies.winapi-util]
version = "0.1.2"
[features]
simd-accel = ["globset/simd-accel"]

21
crates/ignore/LICENSE-MIT Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 Andrew Gallant
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

66
crates/ignore/README.md Normal file
View File

@@ -0,0 +1,66 @@
ignore
======
The ignore crate provides a fast recursive directory iterator that respects
various filters such as globs, file types and `.gitignore` files. This crate
also provides lower level direct access to gitignore and file type matchers.
[![Linux build status](https://api.travis-ci.org/BurntSushi/ripgrep.svg)](https://travis-ci.org/BurntSushi/ripgrep)
[![Windows build status](https://ci.appveyor.com/api/projects/status/github/BurntSushi/ripgrep?svg=true)](https://ci.appveyor.com/project/BurntSushi/ripgrep)
[![](https://img.shields.io/crates/v/ignore.svg)](https://crates.io/crates/ignore)
Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org).
### Documentation
[https://docs.rs/ignore](https://docs.rs/ignore)
### Usage
Add this to your `Cargo.toml`:
```toml
[dependencies]
ignore = "0.4"
```
and this to your crate root:
```rust
extern crate ignore;
```
### Example
This example shows the most basic usage of this crate. This code will
recursively traverse the current directory while automatically filtering out
files and directories according to ignore globs found in files like
`.ignore` and `.gitignore`:
```rust,no_run
use ignore::Walk;
for result in Walk::new("./") {
// Each item yielded by the iterator is either a directory entry or an
// error, so either print the path or the error.
match result {
Ok(entry) => println!("{}", entry.path().display()),
Err(err) => println!("ERROR: {}", err),
}
}
```
### Example: advanced
By default, the recursive directory iterator will ignore hidden files and
directories. This can be disabled by building the iterator with `WalkBuilder`:
```rust,no_run
use ignore::WalkBuilder;
for result in WalkBuilder::new("./").hidden(false).build() {
println!("{:?}", result);
}
```
See the documentation for `WalkBuilder` for many other options.

24
crates/ignore/UNLICENSE Normal file
View File

@@ -0,0 +1,24 @@
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>

View File

@@ -0,0 +1,84 @@
extern crate crossbeam_channel as channel;
extern crate ignore;
extern crate walkdir;
use std::env;
use std::io::{self, Write};
use std::path::Path;
use std::thread;
use ignore::WalkBuilder;
use walkdir::WalkDir;
fn main() {
let mut path = env::args().nth(1).unwrap();
let mut parallel = false;
let mut simple = false;
let (tx, rx) = channel::bounded::<DirEntry>(100);
if path == "parallel" {
path = env::args().nth(2).unwrap();
parallel = true;
} else if path == "walkdir" {
path = env::args().nth(2).unwrap();
simple = true;
}
let stdout_thread = thread::spawn(move || {
let mut stdout = io::BufWriter::new(io::stdout());
for dent in rx {
write_path(&mut stdout, dent.path());
}
});
if parallel {
let walker = WalkBuilder::new(path).threads(6).build_parallel();
walker.run(|| {
let tx = tx.clone();
Box::new(move |result| {
use ignore::WalkState::*;
tx.send(DirEntry::Y(result.unwrap())).unwrap();
Continue
})
});
} else if simple {
let walker = WalkDir::new(path);
for result in walker {
tx.send(DirEntry::X(result.unwrap())).unwrap();
}
} else {
let walker = WalkBuilder::new(path).build();
for result in walker {
tx.send(DirEntry::Y(result.unwrap())).unwrap();
}
}
drop(tx);
stdout_thread.join().unwrap();
}
enum DirEntry {
X(walkdir::DirEntry),
Y(ignore::DirEntry),
}
impl DirEntry {
fn path(&self) -> &Path {
match *self {
DirEntry::X(ref x) => x.path(),
DirEntry::Y(ref y) => y.path(),
}
}
}
#[cfg(unix)]
fn write_path<W: Write>(mut wtr: W, path: &Path) {
use std::os::unix::ffi::OsStrExt;
wtr.write(path.as_os_str().as_bytes()).unwrap();
wtr.write(b"\n").unwrap();
}
#[cfg(not(unix))]
fn write_path<W: Write>(mut wtr: W, path: &Path) {
wtr.write(path.to_string_lossy().as_bytes()).unwrap();
wtr.write(b"\n").unwrap();
}

View File

@@ -0,0 +1,244 @@
/// This list represents the default file types that ripgrep ships with. In
/// general, any file format is fair game, although it should generally be
/// limited to reasonably popular open formats. For other cases, you can add
/// types to each invocation of ripgrep with the '--type-add' flag.
///
/// If you would like to add or improve this list, please file a PR:
/// https://github.com/BurntSushi/ripgrep
///
/// Please try to keep this list sorted lexicographically and wrapped to 79
/// columns (inclusive).
#[rustfmt::skip]
pub const DEFAULT_TYPES: &[(&str, &[&str])] = &[
("agda", &["*.agda", "*.lagda"]),
("aidl", &["*.aidl"]),
("amake", &["*.mk", "*.bp"]),
("asciidoc", &["*.adoc", "*.asc", "*.asciidoc"]),
("asm", &["*.asm", "*.s", "*.S"]),
("asp", &[
"*.aspx", "*.aspx.cs", "*.aspx.cs", "*.ascx", "*.ascx.cs", "*.ascx.vb",
]),
("ats", &["*.ats", "*.dats", "*.sats", "*.hats"]),
("avro", &["*.avdl", "*.avpr", "*.avsc"]),
("awk", &["*.awk"]),
("bazel", &["*.bzl", "WORKSPACE", "BUILD", "BUILD.bazel"]),
("bitbake", &["*.bb", "*.bbappend", "*.bbclass", "*.conf", "*.inc"]),
("brotli", &["*.br"]),
("buildstream", &["*.bst"]),
("bzip2", &["*.bz2", "*.tbz2"]),
("c", &["*.[chH]", "*.[chH].in", "*.cats"]),
("cabal", &["*.cabal"]),
("cbor", &["*.cbor"]),
("ceylon", &["*.ceylon"]),
("clojure", &["*.clj", "*.cljc", "*.cljs", "*.cljx"]),
("cmake", &["*.cmake", "CMakeLists.txt"]),
("coffeescript", &["*.coffee"]),
("config", &["*.cfg", "*.conf", "*.config", "*.ini"]),
("cpp", &[
"*.[ChH]", "*.cc", "*.[ch]pp", "*.[ch]xx", "*.hh", "*.inl",
"*.[ChH].in", "*.cc.in", "*.[ch]pp.in", "*.[ch]xx.in", "*.hh.in",
]),
("creole", &["*.creole"]),
("crystal", &["Projectfile", "*.cr"]),
("cs", &["*.cs"]),
("csharp", &["*.cs"]),
("cshtml", &["*.cshtml"]),
("css", &["*.css", "*.scss"]),
("csv", &["*.csv"]),
("cython", &["*.pyx", "*.pxi", "*.pxd"]),
("d", &["*.d"]),
("dart", &["*.dart"]),
("dhall", &["*.dhall"]),
("diff", &["*.patch", "*.diff"]),
("docker", &["*Dockerfile*"]),
("edn", &["*.edn"]),
("elisp", &["*.el"]),
("elixir", &["*.ex", "*.eex", "*.exs"]),
("elm", &["*.elm"]),
("erb", &["*.erb"]),
("erlang", &["*.erl", "*.hrl"]),
("fidl", &["*.fidl"]),
("fish", &["*.fish"]),
("fortran", &[
"*.f", "*.F", "*.f77", "*.F77", "*.pfo",
"*.f90", "*.F90", "*.f95", "*.F95",
]),
("fsharp", &["*.fs", "*.fsx", "*.fsi"]),
("gap", &["*.g", "*.gap", "*.gi", "*.gd", "*.tst"]),
("gn", &["*.gn", "*.gni"]),
("go", &["*.go"]),
("gradle", &["*.gradle"]),
("groovy", &["*.groovy", "*.gradle"]),
("gzip", &["*.gz", "*.tgz"]),
("h", &["*.h", "*.hpp"]),
("haml", &["*.haml"]),
("haskell", &["*.hs", "*.lhs", "*.cpphs", "*.c2hs", "*.hsc"]),
("hbs", &["*.hbs"]),
("hs", &["*.hs", "*.lhs"]),
("html", &["*.htm", "*.html", "*.ejs"]),
("idris", &["*.idr", "*.lidr"]),
("java", &["*.java", "*.jsp", "*.jspx", "*.properties"]),
("jinja", &["*.j2", "*.jinja", "*.jinja2"]),
("jl", &["*.jl"]),
("js", &["*.js", "*.jsx", "*.vue"]),
("json", &["*.json", "composer.lock"]),
("jsonl", &["*.jsonl"]),
("julia", &["*.jl"]),
("jupyter", &["*.ipynb", "*.jpynb"]),
("kotlin", &["*.kt", "*.kts"]),
("less", &["*.less"]),
("license", &[
// General
"COPYING", "COPYING[.-]*",
"COPYRIGHT", "COPYRIGHT[.-]*",
"EULA", "EULA[.-]*",
"licen[cs]e", "licen[cs]e.*",
"LICEN[CS]E", "LICEN[CS]E[.-]*", "*[.-]LICEN[CS]E*",
"NOTICE", "NOTICE[.-]*",
"PATENTS", "PATENTS[.-]*",
"UNLICEN[CS]E", "UNLICEN[CS]E[.-]*",
// GPL (gpl.txt, etc.)
"agpl[.-]*",
"gpl[.-]*",
"lgpl[.-]*",
// Other license-specific (APACHE-2.0.txt, etc.)
"AGPL-*[0-9]*",
"APACHE-*[0-9]*",
"BSD-*[0-9]*",
"CC-BY-*",
"GFDL-*[0-9]*",
"GNU-*[0-9]*",
"GPL-*[0-9]*",
"LGPL-*[0-9]*",
"MIT-*[0-9]*",
"MPL-*[0-9]*",
"OFL-*[0-9]*",
]),
("lisp", &["*.el", "*.jl", "*.lisp", "*.lsp", "*.sc", "*.scm"]),
("lock", &["*.lock", "package-lock.json"]),
("log", &["*.log"]),
("lua", &["*.lua"]),
("lz4", &["*.lz4"]),
("lzma", &["*.lzma"]),
("m4", &["*.ac", "*.m4"]),
("make", &[
"[Gg][Nn][Uu]makefile", "[Mm]akefile",
"[Gg][Nn][Uu]makefile.am", "[Mm]akefile.am",
"[Gg][Nn][Uu]makefile.in", "[Mm]akefile.in",
"*.mk", "*.mak"
]),
("mako", &["*.mako", "*.mao"]),
("man", &["*.[0-9lnpx]", "*.[0-9][cEFMmpSx]"]),
("markdown", &["*.markdown", "*.md", "*.mdown", "*.mkdn"]),
("matlab", &["*.m"]),
("md", &["*.markdown", "*.md", "*.mdown", "*.mkdn"]),
("mk", &["mkfile"]),
("ml", &["*.ml"]),
("msbuild", &[
"*.csproj", "*.fsproj", "*.vcxproj", "*.proj", "*.props", "*.targets",
]),
("nim", &["*.nim", "*.nimf", "*.nimble", "*.nims"]),
("nix", &["*.nix"]),
("objc", &["*.h", "*.m"]),
("objcpp", &["*.h", "*.mm"]),
("ocaml", &["*.ml", "*.mli", "*.mll", "*.mly"]),
("org", &["*.org", "*.org_archive"]),
("pascal", &["*.pas", "*.dpr", "*.lpr", "*.pp", "*.inc"]),
("pdf", &["*.pdf"]),
("perl", &["*.perl", "*.pl", "*.PL", "*.plh", "*.plx", "*.pm", "*.t"]),
("php", &["*.php", "*.php3", "*.php4", "*.php5", "*.phtml"]),
("pod", &["*.pod"]),
("postscript", &["*.eps", "*.ps"]),
("protobuf", &["*.proto"]),
("ps", &["*.cdxml", "*.ps1", "*.ps1xml", "*.psd1", "*.psm1"]),
("puppet", &["*.erb", "*.pp", "*.rb"]),
("purs", &["*.purs"]),
("py", &["*.py"]),
("qmake", &["*.pro", "*.pri", "*.prf"]),
("qml", &["*.qml"]),
("r", &["*.R", "*.r", "*.Rmd", "*.Rnw"]),
("rdoc", &["*.rdoc"]),
("readme", &["README*", "*README"]),
("robot", &["*.robot"]),
("rst", &["*.rst"]),
("ruby", &["Gemfile", "*.gemspec", ".irbrc", "Rakefile", "*.rb"]),
("rust", &["*.rs"]),
("sass", &["*.sass", "*.scss"]),
("scala", &["*.scala", "*.sbt"]),
("sh", &[
// Portable/misc. init files
".login", ".logout", ".profile", "profile",
// bash-specific init files
".bash_login", "bash_login",
".bash_logout", "bash_logout",
".bash_profile", "bash_profile",
".bashrc", "bashrc", "*.bashrc",
// csh-specific init files
".cshrc", "*.cshrc",
// ksh-specific init files
".kshrc", "*.kshrc",
// tcsh-specific init files
".tcshrc",
// zsh-specific init files
".zshenv", "zshenv",
".zlogin", "zlogin",
".zlogout", "zlogout",
".zprofile", "zprofile",
".zshrc", "zshrc",
// Extensions
"*.bash", "*.csh", "*.ksh", "*.sh", "*.tcsh", "*.zsh",
]),
("slim", &["*.skim", "*.slim", "*.slime"]),
("smarty", &["*.tpl"]),
("sml", &["*.sml", "*.sig"]),
("soy", &["*.soy"]),
("spark", &["*.spark"]),
("spec", &["*.spec"]),
("sql", &["*.sql", "*.psql"]),
("stylus", &["*.styl"]),
("sv", &["*.v", "*.vg", "*.sv", "*.svh", "*.h"]),
("svg", &["*.svg"]),
("swift", &["*.swift"]),
("swig", &["*.def", "*.i"]),
("systemd", &[
"*.automount", "*.conf", "*.device", "*.link", "*.mount", "*.path",
"*.scope", "*.service", "*.slice", "*.socket", "*.swap", "*.target",
"*.timer",
]),
("taskpaper", &["*.taskpaper"]),
("tcl", &["*.tcl"]),
("tex", &["*.tex", "*.ltx", "*.cls", "*.sty", "*.bib", "*.dtx", "*.ins"]),
("textile", &["*.textile"]),
("tf", &["*.tf"]),
("thrift", &["*.thrift"]),
("toml", &["*.toml", "Cargo.lock"]),
("ts", &["*.ts", "*.tsx"]),
("twig", &["*.twig"]),
("txt", &["*.txt"]),
("typoscript", &["*.typoscript", "*.ts"]),
("vala", &["*.vala"]),
("vb", &["*.vb"]),
("verilog", &["*.v", "*.vh", "*.sv", "*.svh"]),
("vhdl", &["*.vhd", "*.vhdl"]),
("vim", &["*.vim"]),
("vimscript", &["*.vim"]),
("webidl", &["*.idl", "*.webidl", "*.widl"]),
("wiki", &["*.mediawiki", "*.wiki"]),
("xml", &[
"*.xml", "*.xml.dist", "*.dtd", "*.xsl", "*.xslt", "*.xsd", "*.xjb",
"*.rng", "*.sch", "*.xhtml",
]),
("xz", &["*.xz", "*.txz"]),
("yacc", &["*.y"]),
("yaml", &["*.yaml", "*.yml"]),
("zig", &["*.zig"]),
("zsh", &[
".zshenv", "zshenv",
".zlogin", "zlogin",
".zlogout", "zlogout",
".zprofile", "zprofile",
".zshrc", "zshrc",
"*.zsh",
]),
("zstd", &["*.zst", "*.zstd"]),
];

1200
crates/ignore/src/dir.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,786 @@
/*!
The gitignore module provides a way to match globs from a gitignore file
against file paths.
Note that this module implements the specification as described in the
`gitignore` man page from scratch. That is, this module does *not* shell out to
the `git` command line tool.
*/
use std::cell::RefCell;
use std::env;
use std::fs::File;
use std::io::{self, BufRead, Read};
use std::path::{Path, PathBuf};
use std::str;
use std::sync::Arc;
use globset::{Candidate, GlobBuilder, GlobSet, GlobSetBuilder};
use regex::bytes::Regex;
use thread_local::ThreadLocal;
use pathutil::{is_file_name, strip_prefix};
use {Error, Match, PartialErrorBuilder};
/// Glob represents a single glob in a gitignore file.
///
/// This is used to report information about the highest precedent glob that
/// matched in one or more gitignore files.
#[derive(Clone, Debug)]
pub struct Glob {
/// The file path that this glob was extracted from.
from: Option<PathBuf>,
/// The original glob string.
original: String,
/// The actual glob string used to convert to a regex.
actual: String,
/// Whether this is a whitelisted glob or not.
is_whitelist: bool,
/// Whether this glob should only match directories or not.
is_only_dir: bool,
}
impl Glob {
/// Returns the file path that defined this glob.
pub fn from(&self) -> Option<&Path> {
self.from.as_ref().map(|p| &**p)
}
/// The original glob as it was defined in a gitignore file.
pub fn original(&self) -> &str {
&self.original
}
/// The actual glob that was compiled to respect gitignore
/// semantics.
pub fn actual(&self) -> &str {
&self.actual
}
/// Whether this was a whitelisted glob or not.
pub fn is_whitelist(&self) -> bool {
self.is_whitelist
}
/// Whether this glob must match a directory or not.
pub fn is_only_dir(&self) -> bool {
self.is_only_dir
}
/// Returns true if and only if this glob has a `**/` prefix.
fn has_doublestar_prefix(&self) -> bool {
self.actual.starts_with("**/") || self.actual == "**"
}
}
/// Gitignore is a matcher for the globs in one or more gitignore files
/// in the same directory.
#[derive(Clone, Debug)]
pub struct Gitignore {
set: GlobSet,
root: PathBuf,
globs: Vec<Glob>,
num_ignores: u64,
num_whitelists: u64,
matches: Option<Arc<ThreadLocal<RefCell<Vec<usize>>>>>,
}
impl Gitignore {
/// Creates a new gitignore matcher from the gitignore file path given.
///
/// If it's desirable to include multiple gitignore files in a single
/// matcher, or read gitignore globs from a different source, then
/// use `GitignoreBuilder`.
///
/// This always returns a valid matcher, even if it's empty. In particular,
/// a Gitignore file can be partially valid, e.g., when one glob is invalid
/// but the rest aren't.
///
/// Note that I/O errors are ignored. For more granular control over
/// errors, use `GitignoreBuilder`.
pub fn new<P: AsRef<Path>>(
gitignore_path: P,
) -> (Gitignore, Option<Error>) {
let path = gitignore_path.as_ref();
let parent = path.parent().unwrap_or(Path::new("/"));
let mut builder = GitignoreBuilder::new(parent);
let mut errs = PartialErrorBuilder::default();
errs.maybe_push_ignore_io(builder.add(path));
match builder.build() {
Ok(gi) => (gi, errs.into_error_option()),
Err(err) => {
errs.push(err);
(Gitignore::empty(), errs.into_error_option())
}
}
}
/// Creates a new gitignore matcher from the global ignore file, if one
/// exists.
///
/// The global config file path is specified by git's `core.excludesFile`
/// config option.
///
/// Git's config file location is `$HOME/.gitconfig`. If `$HOME/.gitconfig`
/// does not exist or does not specify `core.excludesFile`, then
/// `$XDG_CONFIG_HOME/git/ignore` is read. If `$XDG_CONFIG_HOME` is not
/// set or is empty, then `$HOME/.config/git/ignore` is used instead.
pub fn global() -> (Gitignore, Option<Error>) {
GitignoreBuilder::new("").build_global()
}
/// Creates a new empty gitignore matcher that never matches anything.
///
/// Its path is empty.
pub fn empty() -> Gitignore {
Gitignore {
set: GlobSet::empty(),
root: PathBuf::from(""),
globs: vec![],
num_ignores: 0,
num_whitelists: 0,
matches: None,
}
}
/// Returns the directory containing this gitignore matcher.
///
/// All matches are done relative to this path.
pub fn path(&self) -> &Path {
&*self.root
}
/// Returns true if and only if this gitignore has zero globs, and
/// therefore never matches any file path.
pub fn is_empty(&self) -> bool {
self.set.is_empty()
}
/// Returns the total number of globs, which should be equivalent to
/// `num_ignores + num_whitelists`.
pub fn len(&self) -> usize {
self.set.len()
}
/// Returns the total number of ignore globs.
pub fn num_ignores(&self) -> u64 {
self.num_ignores
}
/// Returns the total number of whitelisted globs.
pub fn num_whitelists(&self) -> u64 {
self.num_whitelists
}
/// Returns whether the given path (file or directory) matched a pattern in
/// this gitignore matcher.
///
/// `is_dir` should be true if the path refers to a directory and false
/// otherwise.
///
/// The given path is matched relative to the path given when building
/// the matcher. Specifically, before matching `path`, its prefix (as
/// determined by a common suffix of the directory containing this
/// gitignore) is stripped. If there is no common suffix/prefix overlap,
/// then `path` is assumed to be relative to this matcher.
pub fn matched<P: AsRef<Path>>(
&self,
path: P,
is_dir: bool,
) -> Match<&Glob> {
if self.is_empty() {
return Match::None;
}
self.matched_stripped(self.strip(path.as_ref()), is_dir)
}
/// Returns whether the given path (file or directory, and expected to be
/// under the root) or any of its parent directories (up to the root)
/// matched a pattern in this gitignore matcher.
///
/// NOTE: This method is more expensive than walking the directory hierarchy
/// top-to-bottom and matching the entries. But, is easier to use in cases
/// when a list of paths are available without a hierarchy.
///
/// `is_dir` should be true if the path refers to a directory and false
/// otherwise.
///
/// The given path is matched relative to the path given when building
/// the matcher. Specifically, before matching `path`, its prefix (as
/// determined by a common suffix of the directory containing this
/// gitignore) is stripped. If there is no common suffix/prefix overlap,
/// then `path` is assumed to be relative to this matcher.
///
/// # Panics
///
/// This method panics if the given file path is not under the root path
/// of this matcher.
pub fn matched_path_or_any_parents<P: AsRef<Path>>(
&self,
path: P,
is_dir: bool,
) -> Match<&Glob> {
if self.is_empty() {
return Match::None;
}
let mut path = self.strip(path.as_ref());
assert!(!path.has_root(), "path is expected to be under the root");
match self.matched_stripped(path, is_dir) {
Match::None => (), // walk up
a_match => return a_match,
}
while let Some(parent) = path.parent() {
match self.matched_stripped(parent, /* is_dir */ true) {
Match::None => path = parent, // walk up
a_match => return a_match,
}
}
Match::None
}
/// Like matched, but takes a path that has already been stripped.
fn matched_stripped<P: AsRef<Path>>(
&self,
path: P,
is_dir: bool,
) -> Match<&Glob> {
if self.is_empty() {
return Match::None;
}
let path = path.as_ref();
let _matches = self.matches.as_ref().unwrap().get_or_default();
let mut matches = _matches.borrow_mut();
let candidate = Candidate::new(path);
self.set.matches_candidate_into(&candidate, &mut *matches);
for &i in matches.iter().rev() {
let glob = &self.globs[i];
if !glob.is_only_dir() || is_dir {
return if glob.is_whitelist() {
Match::Whitelist(glob)
} else {
Match::Ignore(glob)
};
}
}
Match::None
}
/// Strips the given path such that it's suitable for matching with this
/// gitignore matcher.
fn strip<'a, P: 'a + AsRef<Path> + ?Sized>(
&'a self,
path: &'a P,
) -> &'a Path {
let mut path = path.as_ref();
// A leading ./ is completely superfluous. We also strip it from
// our gitignore root path, so we need to strip it from our candidate
// path too.
if let Some(p) = strip_prefix("./", path) {
path = p;
}
// Strip any common prefix between the candidate path and the root
// of the gitignore, to make sure we get relative matching right.
// BUT, a file name might not have any directory components to it,
// in which case, we don't want to accidentally strip any part of the
// file name.
//
// As an additional special case, if the root is just `.`, then we
// shouldn't try to strip anything, e.g., when path begins with a `.`.
if self.root != Path::new(".") && !is_file_name(path) {
if let Some(p) = strip_prefix(&self.root, path) {
path = p;
// If we're left with a leading slash, get rid of it.
if let Some(p) = strip_prefix("/", path) {
path = p;
}
}
}
path
}
}
/// Builds a matcher for a single set of globs from a .gitignore file.
#[derive(Clone, Debug)]
pub struct GitignoreBuilder {
builder: GlobSetBuilder,
root: PathBuf,
globs: Vec<Glob>,
case_insensitive: bool,
}
impl GitignoreBuilder {
/// Create a new builder for a gitignore file.
///
/// The path given should be the path at which the globs for this gitignore
/// file should be matched. Note that paths are always matched relative
/// to the root path given here. Generally, the root path should correspond
/// to the *directory* containing a `.gitignore` file.
pub fn new<P: AsRef<Path>>(root: P) -> GitignoreBuilder {
let root = root.as_ref();
GitignoreBuilder {
builder: GlobSetBuilder::new(),
root: strip_prefix("./", root).unwrap_or(root).to_path_buf(),
globs: vec![],
case_insensitive: false,
}
}
/// Builds a new matcher from the globs added so far.
///
/// Once a matcher is built, no new globs can be added to it.
pub fn build(&self) -> Result<Gitignore, Error> {
let nignore = self.globs.iter().filter(|g| !g.is_whitelist()).count();
let nwhite = self.globs.iter().filter(|g| g.is_whitelist()).count();
let set = self
.builder
.build()
.map_err(|err| Error::Glob { glob: None, err: err.to_string() })?;
Ok(Gitignore {
set: set,
root: self.root.clone(),
globs: self.globs.clone(),
num_ignores: nignore as u64,
num_whitelists: nwhite as u64,
matches: Some(Arc::new(ThreadLocal::default())),
})
}
/// Build a global gitignore matcher using the configuration in this
/// builder.
///
/// This consumes ownership of the builder unlike `build` because it
/// must mutate the builder to add the global gitignore globs.
///
/// Note that this ignores the path given to this builder's constructor
/// and instead derives the path automatically from git's global
/// configuration.
pub fn build_global(mut self) -> (Gitignore, Option<Error>) {
match gitconfig_excludes_path() {
None => (Gitignore::empty(), None),
Some(path) => {
if !path.is_file() {
(Gitignore::empty(), None)
} else {
let mut errs = PartialErrorBuilder::default();
errs.maybe_push_ignore_io(self.add(path));
match self.build() {
Ok(gi) => (gi, errs.into_error_option()),
Err(err) => {
errs.push(err);
(Gitignore::empty(), errs.into_error_option())
}
}
}
}
}
}
/// Add each glob from the file path given.
///
/// The file given should be formatted as a `gitignore` file.
///
/// Note that partial errors can be returned. For example, if there was
/// a problem adding one glob, an error for that will be returned, but
/// all other valid globs will still be added.
pub fn add<P: AsRef<Path>>(&mut self, path: P) -> Option<Error> {
let path = path.as_ref();
let file = match File::open(path) {
Err(err) => return Some(Error::Io(err).with_path(path)),
Ok(file) => file,
};
let rdr = io::BufReader::new(file);
let mut errs = PartialErrorBuilder::default();
for (i, line) in rdr.lines().enumerate() {
let lineno = (i + 1) as u64;
let line = match line {
Ok(line) => line,
Err(err) => {
errs.push(Error::Io(err).tagged(path, lineno));
break;
}
};
if let Err(err) = self.add_line(Some(path.to_path_buf()), &line) {
errs.push(err.tagged(path, lineno));
}
}
errs.into_error_option()
}
/// Add each glob line from the string given.
///
/// If this string came from a particular `gitignore` file, then its path
/// should be provided here.
///
/// The string given should be formatted as a `gitignore` file.
#[cfg(test)]
fn add_str(
&mut self,
from: Option<PathBuf>,
gitignore: &str,
) -> Result<&mut GitignoreBuilder, Error> {
for line in gitignore.lines() {
self.add_line(from.clone(), line)?;
}
Ok(self)
}
/// Add a line from a gitignore file to this builder.
///
/// If this line came from a particular `gitignore` file, then its path
/// should be provided here.
///
/// If the line could not be parsed as a glob, then an error is returned.
pub fn add_line(
&mut self,
from: Option<PathBuf>,
mut line: &str,
) -> Result<&mut GitignoreBuilder, Error> {
#![allow(deprecated)]
if line.starts_with("#") {
return Ok(self);
}
if !line.ends_with("\\ ") {
line = line.trim_right();
}
if line.is_empty() {
return Ok(self);
}
let mut glob = Glob {
from: from,
original: line.to_string(),
actual: String::new(),
is_whitelist: false,
is_only_dir: false,
};
let mut is_absolute = false;
if line.starts_with("\\!") || line.starts_with("\\#") {
line = &line[1..];
is_absolute = line.chars().nth(0) == Some('/');
} else {
if line.starts_with("!") {
glob.is_whitelist = true;
line = &line[1..];
}
if line.starts_with("/") {
// `man gitignore` says that if a glob starts with a slash,
// then the glob can only match the beginning of a path
// (relative to the location of gitignore). We achieve this by
// simply banning wildcards from matching /.
line = &line[1..];
is_absolute = true;
}
}
// If it ends with a slash, then this should only match directories,
// but the slash should otherwise not be used while globbing.
if let Some((i, c)) = line.char_indices().rev().nth(0) {
if c == '/' {
glob.is_only_dir = true;
line = &line[..i];
}
}
glob.actual = line.to_string();
// If there is a literal slash, then this is a glob that must match the
// entire path name. Otherwise, we should let it match anywhere, so use
// a **/ prefix.
if !is_absolute && !line.chars().any(|c| c == '/') {
// ... but only if we don't already have a **/ prefix.
if !glob.has_doublestar_prefix() {
glob.actual = format!("**/{}", glob.actual);
}
}
// If the glob ends with `/**`, then we should only match everything
// inside a directory, but not the directory itself. Standard globs
// will match the directory. So we add `/*` to force the issue.
if glob.actual.ends_with("/**") {
glob.actual = format!("{}/*", glob.actual);
}
let parsed = GlobBuilder::new(&glob.actual)
.literal_separator(true)
.case_insensitive(self.case_insensitive)
.backslash_escape(true)
.build()
.map_err(|err| Error::Glob {
glob: Some(glob.original.clone()),
err: err.kind().to_string(),
})?;
self.builder.add(parsed);
self.globs.push(glob);
Ok(self)
}
/// Toggle whether the globs should be matched case insensitively or not.
///
/// When this option is changed, only globs added after the change will be
/// affected.
///
/// This is disabled by default.
pub fn case_insensitive(
&mut self,
yes: bool,
) -> Result<&mut GitignoreBuilder, Error> {
// TODO: This should not return a `Result`. Fix this in the next semver
// release.
self.case_insensitive = yes;
Ok(self)
}
}
/// Return the file path of the current environment's global gitignore file.
///
/// Note that the file path returned may not exist.
fn gitconfig_excludes_path() -> Option<PathBuf> {
// git supports $HOME/.gitconfig and $XDG_CONFIG_HOME/git/config. Notably,
// both can be active at the same time, where $HOME/.gitconfig takes
// precedent. So if $HOME/.gitconfig defines a `core.excludesFile`, then
// we're done.
match gitconfig_home_contents().and_then(|x| parse_excludes_file(&x)) {
Some(path) => return Some(path),
None => {}
}
match gitconfig_xdg_contents().and_then(|x| parse_excludes_file(&x)) {
Some(path) => return Some(path),
None => {}
}
excludes_file_default()
}
/// Returns the file contents of git's global config file, if one exists, in
/// the user's home directory.
fn gitconfig_home_contents() -> Option<Vec<u8>> {
let home = match home_dir() {
None => return None,
Some(home) => home,
};
let mut file = match File::open(home.join(".gitconfig")) {
Err(_) => return None,
Ok(file) => io::BufReader::new(file),
};
let mut contents = vec![];
file.read_to_end(&mut contents).ok().map(|_| contents)
}
/// Returns the file contents of git's global config file, if one exists, in
/// the user's XDG_CONFIG_HOME directory.
fn gitconfig_xdg_contents() -> Option<Vec<u8>> {
let path = env::var_os("XDG_CONFIG_HOME")
.and_then(|x| if x.is_empty() { None } else { Some(PathBuf::from(x)) })
.or_else(|| home_dir().map(|p| p.join(".config")))
.map(|x| x.join("git/config"));
let mut file = match path.and_then(|p| File::open(p).ok()) {
None => return None,
Some(file) => io::BufReader::new(file),
};
let mut contents = vec![];
file.read_to_end(&mut contents).ok().map(|_| contents)
}
/// Returns the default file path for a global .gitignore file.
///
/// Specifically, this respects XDG_CONFIG_HOME.
fn excludes_file_default() -> Option<PathBuf> {
env::var_os("XDG_CONFIG_HOME")
.and_then(|x| if x.is_empty() { None } else { Some(PathBuf::from(x)) })
.or_else(|| home_dir().map(|p| p.join(".config")))
.map(|x| x.join("git/ignore"))
}
/// Extract git's `core.excludesfile` config setting from the raw file contents
/// given.
fn parse_excludes_file(data: &[u8]) -> Option<PathBuf> {
// N.B. This is the lazy approach, and isn't technically correct, but
// probably works in more circumstances. I guess we would ideally have
// a full INI parser. Yuck.
lazy_static! {
static ref RE: Regex =
Regex::new(r"(?im)^\s*excludesfile\s*=\s*(.+)\s*$").unwrap();
};
let caps = match RE.captures(data) {
None => return None,
Some(caps) => caps,
};
str::from_utf8(&caps[1]).ok().map(|s| PathBuf::from(expand_tilde(s)))
}
/// Expands ~ in file paths to the value of $HOME.
fn expand_tilde(path: &str) -> String {
let home = match home_dir() {
None => return path.to_string(),
Some(home) => home.to_string_lossy().into_owned(),
};
path.replace("~", &home)
}
/// Returns the location of the user's home directory.
fn home_dir() -> Option<PathBuf> {
// We're fine with using env::home_dir for now. Its bugs are, IMO, pretty
// minor corner cases. We should still probably eventually migrate to
// the `dirs` crate to get a proper implementation.
#![allow(deprecated)]
env::home_dir()
}
#[cfg(test)]
mod tests {
use super::{Gitignore, GitignoreBuilder};
use std::path::Path;
fn gi_from_str<P: AsRef<Path>>(root: P, s: &str) -> Gitignore {
let mut builder = GitignoreBuilder::new(root);
builder.add_str(None, s).unwrap();
builder.build().unwrap()
}
macro_rules! ignored {
($name:ident, $root:expr, $gi:expr, $path:expr) => {
ignored!($name, $root, $gi, $path, false);
};
($name:ident, $root:expr, $gi:expr, $path:expr, $is_dir:expr) => {
#[test]
fn $name() {
let gi = gi_from_str($root, $gi);
assert!(gi.matched($path, $is_dir).is_ignore());
}
};
}
macro_rules! not_ignored {
($name:ident, $root:expr, $gi:expr, $path:expr) => {
not_ignored!($name, $root, $gi, $path, false);
};
($name:ident, $root:expr, $gi:expr, $path:expr, $is_dir:expr) => {
#[test]
fn $name() {
let gi = gi_from_str($root, $gi);
assert!(!gi.matched($path, $is_dir).is_ignore());
}
};
}
const ROOT: &'static str = "/home/foobar/rust/rg";
ignored!(ig1, ROOT, "months", "months");
ignored!(ig2, ROOT, "*.lock", "Cargo.lock");
ignored!(ig3, ROOT, "*.rs", "src/main.rs");
ignored!(ig4, ROOT, "src/*.rs", "src/main.rs");
ignored!(ig5, ROOT, "/*.c", "cat-file.c");
ignored!(ig6, ROOT, "/src/*.rs", "src/main.rs");
ignored!(ig7, ROOT, "!src/main.rs\n*.rs", "src/main.rs");
ignored!(ig8, ROOT, "foo/", "foo", true);
ignored!(ig9, ROOT, "**/foo", "foo");
ignored!(ig10, ROOT, "**/foo", "src/foo");
ignored!(ig11, ROOT, "**/foo/**", "src/foo/bar");
ignored!(ig12, ROOT, "**/foo/**", "wat/src/foo/bar/baz");
ignored!(ig13, ROOT, "**/foo/bar", "foo/bar");
ignored!(ig14, ROOT, "**/foo/bar", "src/foo/bar");
ignored!(ig15, ROOT, "abc/**", "abc/x");
ignored!(ig16, ROOT, "abc/**", "abc/x/y");
ignored!(ig17, ROOT, "abc/**", "abc/x/y/z");
ignored!(ig18, ROOT, "a/**/b", "a/b");
ignored!(ig19, ROOT, "a/**/b", "a/x/b");
ignored!(ig20, ROOT, "a/**/b", "a/x/y/b");
ignored!(ig21, ROOT, r"\!xy", "!xy");
ignored!(ig22, ROOT, r"\#foo", "#foo");
ignored!(ig23, ROOT, "foo", "./foo");
ignored!(ig24, ROOT, "target", "grep/target");
ignored!(ig25, ROOT, "Cargo.lock", "./tabwriter-bin/Cargo.lock");
ignored!(ig26, ROOT, "/foo/bar/baz", "./foo/bar/baz");
ignored!(ig27, ROOT, "foo/", "xyz/foo", true);
ignored!(ig28, "./src", "/llvm/", "./src/llvm", true);
ignored!(ig29, ROOT, "node_modules/ ", "node_modules", true);
ignored!(ig30, ROOT, "**/", "foo/bar", true);
ignored!(ig31, ROOT, "path1/*", "path1/foo");
ignored!(ig32, ROOT, ".a/b", ".a/b");
ignored!(ig33, "./", ".a/b", ".a/b");
ignored!(ig34, ".", ".a/b", ".a/b");
ignored!(ig35, "./.", ".a/b", ".a/b");
ignored!(ig36, "././", ".a/b", ".a/b");
ignored!(ig37, "././.", ".a/b", ".a/b");
ignored!(ig38, ROOT, "\\[", "[");
ignored!(ig39, ROOT, "\\?", "?");
ignored!(ig40, ROOT, "\\*", "*");
ignored!(ig41, ROOT, "\\a", "a");
ignored!(ig42, ROOT, "s*.rs", "sfoo.rs");
ignored!(ig43, ROOT, "**", "foo.rs");
ignored!(ig44, ROOT, "**/**/*", "a/foo.rs");
not_ignored!(ignot1, ROOT, "amonths", "months");
not_ignored!(ignot2, ROOT, "monthsa", "months");
not_ignored!(ignot3, ROOT, "/src/*.rs", "src/grep/src/main.rs");
not_ignored!(ignot4, ROOT, "/*.c", "mozilla-sha1/sha1.c");
not_ignored!(ignot5, ROOT, "/src/*.rs", "src/grep/src/main.rs");
not_ignored!(ignot6, ROOT, "*.rs\n!src/main.rs", "src/main.rs");
not_ignored!(ignot7, ROOT, "foo/", "foo", false);
not_ignored!(ignot8, ROOT, "**/foo/**", "wat/src/afoo/bar/baz");
not_ignored!(ignot9, ROOT, "**/foo/**", "wat/src/fooa/bar/baz");
not_ignored!(ignot10, ROOT, "**/foo/bar", "foo/src/bar");
not_ignored!(ignot11, ROOT, "#foo", "#foo");
not_ignored!(ignot12, ROOT, "\n\n\n", "foo");
not_ignored!(ignot13, ROOT, "foo/**", "foo", true);
not_ignored!(
ignot14,
"./third_party/protobuf",
"m4/ltoptions.m4",
"./third_party/protobuf/csharp/src/packages/repositories.config"
);
not_ignored!(ignot15, ROOT, "!/bar", "foo/bar");
not_ignored!(ignot16, ROOT, "*\n!**/", "foo", true);
not_ignored!(ignot17, ROOT, "src/*.rs", "src/grep/src/main.rs");
not_ignored!(ignot18, ROOT, "path1/*", "path2/path1/foo");
not_ignored!(ignot19, ROOT, "s*.rs", "src/foo.rs");
fn bytes(s: &str) -> Vec<u8> {
s.to_string().into_bytes()
}
fn path_string<P: AsRef<Path>>(path: P) -> String {
path.as_ref().to_str().unwrap().to_string()
}
#[test]
fn parse_excludes_file1() {
let data = bytes("[core]\nexcludesFile = /foo/bar");
let got = super::parse_excludes_file(&data).unwrap();
assert_eq!(path_string(got), "/foo/bar");
}
#[test]
fn parse_excludes_file2() {
let data = bytes("[core]\nexcludesFile = ~/foo/bar");
let got = super::parse_excludes_file(&data).unwrap();
assert_eq!(path_string(got), super::expand_tilde("~/foo/bar"));
}
#[test]
fn parse_excludes_file3() {
let data = bytes("[core]\nexcludeFile = /foo/bar");
assert!(super::parse_excludes_file(&data).is_none());
}
// See: https://github.com/BurntSushi/ripgrep/issues/106
#[test]
fn regression_106() {
gi_from_str("/", " ");
}
#[test]
fn case_insensitive() {
let gi = GitignoreBuilder::new(ROOT)
.case_insensitive(true)
.unwrap()
.add_str(None, "*.html")
.unwrap()
.build()
.unwrap();
assert!(gi.matched("foo.html", false).is_ignore());
assert!(gi.matched("foo.HTML", false).is_ignore());
assert!(!gi.matched("foo.htm", false).is_ignore());
assert!(!gi.matched("foo.HTM", false).is_ignore());
}
ignored!(cs1, ROOT, "*.html", "foo.html");
not_ignored!(cs2, ROOT, "*.html", "foo.HTML");
not_ignored!(cs3, ROOT, "*.html", "foo.htm");
not_ignored!(cs4, ROOT, "*.html", "foo.HTM");
}

499
crates/ignore/src/lib.rs Normal file
View File

@@ -0,0 +1,499 @@
/*!
The ignore crate provides a fast recursive directory iterator that respects
various filters such as globs, file types and `.gitignore` files. The precise
matching rules and precedence is explained in the documentation for
`WalkBuilder`.
Secondarily, this crate exposes gitignore and file type matchers for use cases
that demand more fine-grained control.
# Example
This example shows the most basic usage of this crate. This code will
recursively traverse the current directory while automatically filtering out
files and directories according to ignore globs found in files like
`.ignore` and `.gitignore`:
```rust,no_run
use ignore::Walk;
for result in Walk::new("./") {
// Each item yielded by the iterator is either a directory entry or an
// error, so either print the path or the error.
match result {
Ok(entry) => println!("{}", entry.path().display()),
Err(err) => println!("ERROR: {}", err),
}
}
```
# Example: advanced
By default, the recursive directory iterator will ignore hidden files and
directories. This can be disabled by building the iterator with `WalkBuilder`:
```rust,no_run
use ignore::WalkBuilder;
for result in WalkBuilder::new("./").hidden(false).build() {
println!("{:?}", result);
}
```
See the documentation for `WalkBuilder` for many other options.
*/
#![deny(missing_docs)]
extern crate crossbeam_channel as channel;
extern crate globset;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
extern crate memchr;
extern crate regex;
extern crate same_file;
extern crate thread_local;
extern crate walkdir;
#[cfg(windows)]
extern crate winapi_util;
use std::error;
use std::fmt;
use std::io;
use std::path::{Path, PathBuf};
pub use walk::{
DirEntry, ParallelVisitor, ParallelVisitorBuilder, Walk, WalkBuilder,
WalkParallel, WalkState,
};
mod default_types;
mod dir;
pub mod gitignore;
pub mod overrides;
mod pathutil;
pub mod types;
mod walk;
/// Represents an error that can occur when parsing a gitignore file.
#[derive(Debug)]
pub enum Error {
/// A collection of "soft" errors. These occur when adding an ignore
/// file partially succeeded.
Partial(Vec<Error>),
/// An error associated with a specific line number.
WithLineNumber {
/// The line number.
line: u64,
/// The underlying error.
err: Box<Error>,
},
/// An error associated with a particular file path.
WithPath {
/// The file path.
path: PathBuf,
/// The underlying error.
err: Box<Error>,
},
/// An error associated with a particular directory depth when recursively
/// walking a directory.
WithDepth {
/// The directory depth.
depth: usize,
/// The underlying error.
err: Box<Error>,
},
/// An error that occurs when a file loop is detected when traversing
/// symbolic links.
Loop {
/// The ancestor file path in the loop.
ancestor: PathBuf,
/// The child file path in the loop.
child: PathBuf,
},
/// An error that occurs when doing I/O, such as reading an ignore file.
Io(io::Error),
/// An error that occurs when trying to parse a glob.
Glob {
/// The original glob that caused this error. This glob, when
/// available, always corresponds to the glob provided by an end user.
/// e.g., It is the glob as written in a `.gitignore` file.
///
/// (This glob may be distinct from the glob that is actually
/// compiled, after accounting for `gitignore` semantics.)
glob: Option<String>,
/// The underlying glob error as a string.
err: String,
},
/// A type selection for a file type that is not defined.
UnrecognizedFileType(String),
/// A user specified file type definition could not be parsed.
InvalidDefinition,
}
impl Clone for Error {
fn clone(&self) -> Error {
match *self {
Error::Partial(ref errs) => Error::Partial(errs.clone()),
Error::WithLineNumber { line, ref err } => {
Error::WithLineNumber { line: line, err: err.clone() }
}
Error::WithPath { ref path, ref err } => {
Error::WithPath { path: path.clone(), err: err.clone() }
}
Error::WithDepth { depth, ref err } => {
Error::WithDepth { depth: depth, err: err.clone() }
}
Error::Loop { ref ancestor, ref child } => Error::Loop {
ancestor: ancestor.clone(),
child: child.clone(),
},
Error::Io(ref err) => match err.raw_os_error() {
Some(e) => Error::Io(io::Error::from_raw_os_error(e)),
None => Error::Io(io::Error::new(err.kind(), err.to_string())),
},
Error::Glob { ref glob, ref err } => {
Error::Glob { glob: glob.clone(), err: err.clone() }
}
Error::UnrecognizedFileType(ref err) => {
Error::UnrecognizedFileType(err.clone())
}
Error::InvalidDefinition => Error::InvalidDefinition,
}
}
}
impl Error {
/// Returns true if this is a partial error.
///
/// A partial error occurs when only some operations failed while others
/// may have succeeded. For example, an ignore file may contain an invalid
/// glob among otherwise valid globs.
pub fn is_partial(&self) -> bool {
match *self {
Error::Partial(_) => true,
Error::WithLineNumber { ref err, .. } => err.is_partial(),
Error::WithPath { ref err, .. } => err.is_partial(),
Error::WithDepth { ref err, .. } => err.is_partial(),
_ => false,
}
}
/// Returns true if this error is exclusively an I/O error.
pub fn is_io(&self) -> bool {
match *self {
Error::Partial(ref errs) => errs.len() == 1 && errs[0].is_io(),
Error::WithLineNumber { ref err, .. } => err.is_io(),
Error::WithPath { ref err, .. } => err.is_io(),
Error::WithDepth { ref err, .. } => err.is_io(),
Error::Loop { .. } => false,
Error::Io(_) => true,
Error::Glob { .. } => false,
Error::UnrecognizedFileType(_) => false,
Error::InvalidDefinition => false,
}
}
/// Returns a depth associated with recursively walking a directory (if
/// this error was generated from a recursive directory iterator).
pub fn depth(&self) -> Option<usize> {
match *self {
Error::WithPath { ref err, .. } => err.depth(),
Error::WithDepth { depth, .. } => Some(depth),
_ => None,
}
}
/// Turn an error into a tagged error with the given file path.
fn with_path<P: AsRef<Path>>(self, path: P) -> Error {
Error::WithPath {
path: path.as_ref().to_path_buf(),
err: Box::new(self),
}
}
/// Turn an error into a tagged error with the given depth.
fn with_depth(self, depth: usize) -> Error {
Error::WithDepth { depth: depth, err: Box::new(self) }
}
/// Turn an error into a tagged error with the given file path and line
/// number. If path is empty, then it is omitted from the error.
fn tagged<P: AsRef<Path>>(self, path: P, lineno: u64) -> Error {
let errline =
Error::WithLineNumber { line: lineno, err: Box::new(self) };
if path.as_ref().as_os_str().is_empty() {
return errline;
}
errline.with_path(path)
}
/// Build an error from a walkdir error.
fn from_walkdir(err: walkdir::Error) -> Error {
let depth = err.depth();
if let (Some(anc), Some(child)) = (err.loop_ancestor(), err.path()) {
return Error::WithDepth {
depth: depth,
err: Box::new(Error::Loop {
ancestor: anc.to_path_buf(),
child: child.to_path_buf(),
}),
};
}
let path = err.path().map(|p| p.to_path_buf());
let mut ig_err = Error::Io(io::Error::from(err));
if let Some(path) = path {
ig_err = Error::WithPath { path: path, err: Box::new(ig_err) };
}
ig_err
}
}
impl error::Error for Error {
#[allow(deprecated)]
fn description(&self) -> &str {
match *self {
Error::Partial(_) => "partial error",
Error::WithLineNumber { ref err, .. } => err.description(),
Error::WithPath { ref err, .. } => err.description(),
Error::WithDepth { ref err, .. } => err.description(),
Error::Loop { .. } => "file system loop found",
Error::Io(ref err) => err.description(),
Error::Glob { ref err, .. } => err,
Error::UnrecognizedFileType(_) => "unrecognized file type",
Error::InvalidDefinition => "invalid definition",
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::Partial(ref errs) => {
let msgs: Vec<String> =
errs.iter().map(|err| err.to_string()).collect();
write!(f, "{}", msgs.join("\n"))
}
Error::WithLineNumber { line, ref err } => {
write!(f, "line {}: {}", line, err)
}
Error::WithPath { ref path, ref err } => {
write!(f, "{}: {}", path.display(), err)
}
Error::WithDepth { ref err, .. } => err.fmt(f),
Error::Loop { ref ancestor, ref child } => write!(
f,
"File system loop found: \
{} points to an ancestor {}",
child.display(),
ancestor.display()
),
Error::Io(ref err) => err.fmt(f),
Error::Glob { glob: None, ref err } => write!(f, "{}", err),
Error::Glob { glob: Some(ref glob), ref err } => {
write!(f, "error parsing glob '{}': {}", glob, err)
}
Error::UnrecognizedFileType(ref ty) => {
write!(f, "unrecognized file type: {}", ty)
}
Error::InvalidDefinition => write!(
f,
"invalid definition (format is type:glob, e.g., \
html:*.html)"
),
}
}
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Error {
Error::Io(err)
}
}
#[derive(Debug, Default)]
struct PartialErrorBuilder(Vec<Error>);
impl PartialErrorBuilder {
fn push(&mut self, err: Error) {
self.0.push(err);
}
fn push_ignore_io(&mut self, err: Error) {
if !err.is_io() {
self.push(err);
}
}
fn maybe_push(&mut self, err: Option<Error>) {
if let Some(err) = err {
self.push(err);
}
}
fn maybe_push_ignore_io(&mut self, err: Option<Error>) {
if let Some(err) = err {
self.push_ignore_io(err);
}
}
fn into_error_option(mut self) -> Option<Error> {
if self.0.is_empty() {
None
} else if self.0.len() == 1 {
Some(self.0.pop().unwrap())
} else {
Some(Error::Partial(self.0))
}
}
}
/// The result of a glob match.
///
/// The type parameter `T` typically refers to a type that provides more
/// information about a particular match. For example, it might identify
/// the specific gitignore file and the specific glob pattern that caused
/// the match.
#[derive(Clone, Debug)]
pub enum Match<T> {
/// The path didn't match any glob.
None,
/// The highest precedent glob matched indicates the path should be
/// ignored.
Ignore(T),
/// The highest precedent glob matched indicates the path should be
/// whitelisted.
Whitelist(T),
}
impl<T> Match<T> {
/// Returns true if the match result didn't match any globs.
pub fn is_none(&self) -> bool {
match *self {
Match::None => true,
Match::Ignore(_) | Match::Whitelist(_) => false,
}
}
/// Returns true if the match result implies the path should be ignored.
pub fn is_ignore(&self) -> bool {
match *self {
Match::Ignore(_) => true,
Match::None | Match::Whitelist(_) => false,
}
}
/// Returns true if the match result implies the path should be
/// whitelisted.
pub fn is_whitelist(&self) -> bool {
match *self {
Match::Whitelist(_) => true,
Match::None | Match::Ignore(_) => false,
}
}
/// Inverts the match so that `Ignore` becomes `Whitelist` and
/// `Whitelist` becomes `Ignore`. A non-match remains the same.
pub fn invert(self) -> Match<T> {
match self {
Match::None => Match::None,
Match::Ignore(t) => Match::Whitelist(t),
Match::Whitelist(t) => Match::Ignore(t),
}
}
/// Return the value inside this match if it exists.
pub fn inner(&self) -> Option<&T> {
match *self {
Match::None => None,
Match::Ignore(ref t) => Some(t),
Match::Whitelist(ref t) => Some(t),
}
}
/// Apply the given function to the value inside this match.
///
/// If the match has no value, then return the match unchanged.
pub fn map<U, F: FnOnce(T) -> U>(self, f: F) -> Match<U> {
match self {
Match::None => Match::None,
Match::Ignore(t) => Match::Ignore(f(t)),
Match::Whitelist(t) => Match::Whitelist(f(t)),
}
}
/// Return the match if it is not none. Otherwise, return other.
pub fn or(self, other: Self) -> Self {
if self.is_none() {
other
} else {
self
}
}
}
#[cfg(test)]
mod tests {
use std::env;
use std::error;
use std::fs;
use std::path::{Path, PathBuf};
use std::result;
/// A convenient result type alias.
pub type Result<T> =
result::Result<T, Box<dyn error::Error + Send + Sync>>;
macro_rules! err {
($($tt:tt)*) => {
Box::<dyn error::Error + Send + Sync>::from(format!($($tt)*))
}
}
/// A simple wrapper for creating a temporary directory that is
/// automatically deleted when it's dropped.
///
/// We use this in lieu of tempfile because tempfile brings in too many
/// dependencies.
#[derive(Debug)]
pub struct TempDir(PathBuf);
impl Drop for TempDir {
fn drop(&mut self) {
fs::remove_dir_all(&self.0).unwrap();
}
}
impl TempDir {
/// Create a new empty temporary directory under the system's configured
/// temporary directory.
pub fn new() -> Result<TempDir> {
use std::sync::atomic::{AtomicUsize, Ordering};
static TRIES: usize = 100;
static COUNTER: AtomicUsize = AtomicUsize::new(0);
let tmpdir = env::temp_dir();
for _ in 0..TRIES {
let count = COUNTER.fetch_add(1, Ordering::SeqCst);
let path = tmpdir.join("rust-ignore").join(count.to_string());
if path.is_dir() {
continue;
}
fs::create_dir_all(&path).map_err(|e| {
err!("failed to create {}: {}", path.display(), e)
})?;
return Ok(TempDir(path));
}
Err(err!("failed to create temp dir after {} tries", TRIES))
}
/// Return the underlying path to this temporary directory.
pub fn path(&self) -> &Path {
&self.0
}
}
}

View File

@@ -0,0 +1,262 @@
/*!
The overrides module provides a way to specify a set of override globs.
This provides functionality similar to `--include` or `--exclude` in command
line tools.
*/
use std::path::Path;
use gitignore::{self, Gitignore, GitignoreBuilder};
use {Error, Match};
/// Glob represents a single glob in an override matcher.
///
/// This is used to report information about the highest precedent glob
/// that matched.
///
/// Note that not all matches necessarily correspond to a specific glob. For
/// example, if there are one or more whitelist globs and a file path doesn't
/// match any glob in the set, then the file path is considered to be ignored.
///
/// The lifetime `'a` refers to the lifetime of the matcher that produced
/// this glob.
#[derive(Clone, Debug)]
pub struct Glob<'a>(GlobInner<'a>);
#[derive(Clone, Debug)]
enum GlobInner<'a> {
/// No glob matched, but the file path should still be ignored.
UnmatchedIgnore,
/// A glob matched.
Matched(&'a gitignore::Glob),
}
impl<'a> Glob<'a> {
fn unmatched() -> Glob<'a> {
Glob(GlobInner::UnmatchedIgnore)
}
}
/// Manages a set of overrides provided explicitly by the end user.
#[derive(Clone, Debug)]
pub struct Override(Gitignore);
impl Override {
/// Returns an empty matcher that never matches any file path.
pub fn empty() -> Override {
Override(Gitignore::empty())
}
/// Returns the directory of this override set.
///
/// All matches are done relative to this path.
pub fn path(&self) -> &Path {
self.0.path()
}
/// Returns true if and only if this matcher is empty.
///
/// When a matcher is empty, it will never match any file path.
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Returns the total number of ignore globs.
pub fn num_ignores(&self) -> u64 {
self.0.num_whitelists()
}
/// Returns the total number of whitelisted globs.
pub fn num_whitelists(&self) -> u64 {
self.0.num_ignores()
}
/// Returns whether the given file path matched a pattern in this override
/// matcher.
///
/// `is_dir` should be true if the path refers to a directory and false
/// otherwise.
///
/// If there are no overrides, then this always returns `Match::None`.
///
/// If there is at least one whitelist override and `is_dir` is false, then
/// this never returns `Match::None`, since non-matches are interpreted as
/// ignored.
///
/// The given path is matched to the globs relative to the path given
/// when building the override matcher. Specifically, before matching
/// `path`, its prefix (as determined by a common suffix of the directory
/// given) is stripped. If there is no common suffix/prefix overlap, then
/// `path` is assumed to reside in the same directory as the root path for
/// this set of overrides.
pub fn matched<'a, P: AsRef<Path>>(
&'a self,
path: P,
is_dir: bool,
) -> Match<Glob<'a>> {
if self.is_empty() {
return Match::None;
}
let mat = self.0.matched(path, is_dir).invert();
if mat.is_none() && self.num_whitelists() > 0 && !is_dir {
return Match::Ignore(Glob::unmatched());
}
mat.map(move |giglob| Glob(GlobInner::Matched(giglob)))
}
}
/// Builds a matcher for a set of glob overrides.
pub struct OverrideBuilder {
builder: GitignoreBuilder,
}
impl OverrideBuilder {
/// Create a new override builder.
///
/// Matching is done relative to the directory path provided.
pub fn new<P: AsRef<Path>>(path: P) -> OverrideBuilder {
OverrideBuilder { builder: GitignoreBuilder::new(path) }
}
/// Builds a new override matcher from the globs added so far.
///
/// Once a matcher is built, no new globs can be added to it.
pub fn build(&self) -> Result<Override, Error> {
Ok(Override(self.builder.build()?))
}
/// Add a glob to the set of overrides.
///
/// Globs provided here have precisely the same semantics as a single
/// line in a `gitignore` file, where the meaning of `!` is inverted:
/// namely, `!` at the beginning of a glob will ignore a file. Without `!`,
/// all matches of the glob provided are treated as whitelist matches.
pub fn add(&mut self, glob: &str) -> Result<&mut OverrideBuilder, Error> {
self.builder.add_line(None, glob)?;
Ok(self)
}
/// Toggle whether the globs should be matched case insensitively or not.
///
/// When this option is changed, only globs added after the change will be affected.
///
/// This is disabled by default.
pub fn case_insensitive(
&mut self,
yes: bool,
) -> Result<&mut OverrideBuilder, Error> {
// TODO: This should not return a `Result`. Fix this in the next semver
// release.
self.builder.case_insensitive(yes)?;
Ok(self)
}
}
#[cfg(test)]
mod tests {
use super::{Override, OverrideBuilder};
const ROOT: &'static str = "/home/andrew/foo";
fn ov(globs: &[&str]) -> Override {
let mut builder = OverrideBuilder::new(ROOT);
for glob in globs {
builder.add(glob).unwrap();
}
builder.build().unwrap()
}
#[test]
fn empty() {
let ov = ov(&[]);
assert!(ov.matched("a.foo", false).is_none());
assert!(ov.matched("a", false).is_none());
assert!(ov.matched("", false).is_none());
}
#[test]
fn simple() {
let ov = ov(&["*.foo", "!*.bar"]);
assert!(ov.matched("a.foo", false).is_whitelist());
assert!(ov.matched("a.foo", true).is_whitelist());
assert!(ov.matched("a.rs", false).is_ignore());
assert!(ov.matched("a.rs", true).is_none());
assert!(ov.matched("a.bar", false).is_ignore());
assert!(ov.matched("a.bar", true).is_ignore());
}
#[test]
fn only_ignores() {
let ov = ov(&["!*.bar"]);
assert!(ov.matched("a.rs", false).is_none());
assert!(ov.matched("a.rs", true).is_none());
assert!(ov.matched("a.bar", false).is_ignore());
assert!(ov.matched("a.bar", true).is_ignore());
}
#[test]
fn precedence() {
let ov = ov(&["*.foo", "!*.bar.foo"]);
assert!(ov.matched("a.foo", false).is_whitelist());
assert!(ov.matched("a.baz", false).is_ignore());
assert!(ov.matched("a.bar.foo", false).is_ignore());
}
#[test]
fn gitignore() {
let ov = ov(&["/foo", "bar/*.rs", "baz/**"]);
assert!(ov.matched("bar/lib.rs", false).is_whitelist());
assert!(ov.matched("bar/wat/lib.rs", false).is_ignore());
assert!(ov.matched("wat/bar/lib.rs", false).is_ignore());
assert!(ov.matched("foo", false).is_whitelist());
assert!(ov.matched("wat/foo", false).is_ignore());
assert!(ov.matched("baz", false).is_ignore());
assert!(ov.matched("baz/a", false).is_whitelist());
assert!(ov.matched("baz/a/b", false).is_whitelist());
}
#[test]
fn allow_directories() {
// This tests that directories are NOT ignored when they are unmatched.
let ov = ov(&["*.rs"]);
assert!(ov.matched("foo.rs", false).is_whitelist());
assert!(ov.matched("foo.c", false).is_ignore());
assert!(ov.matched("foo", false).is_ignore());
assert!(ov.matched("foo", true).is_none());
assert!(ov.matched("src/foo.rs", false).is_whitelist());
assert!(ov.matched("src/foo.c", false).is_ignore());
assert!(ov.matched("src/foo", false).is_ignore());
assert!(ov.matched("src/foo", true).is_none());
}
#[test]
fn absolute_path() {
let ov = ov(&["!/bar"]);
assert!(ov.matched("./foo/bar", false).is_none());
}
#[test]
fn case_insensitive() {
let ov = OverrideBuilder::new(ROOT)
.case_insensitive(true)
.unwrap()
.add("*.html")
.unwrap()
.build()
.unwrap();
assert!(ov.matched("foo.html", false).is_whitelist());
assert!(ov.matched("foo.HTML", false).is_whitelist());
assert!(ov.matched("foo.htm", false).is_ignore());
assert!(ov.matched("foo.HTM", false).is_ignore());
}
#[test]
fn default_case_sensitive() {
let ov =
OverrideBuilder::new(ROOT).add("*.html").unwrap().build().unwrap();
assert!(ov.matched("foo.html", false).is_whitelist());
assert!(ov.matched("foo.HTML", false).is_ignore());
assert!(ov.matched("foo.htm", false).is_ignore());
assert!(ov.matched("foo.HTM", false).is_ignore());
}
}

View File

@@ -0,0 +1,142 @@
use std::ffi::OsStr;
use std::path::Path;
use walk::DirEntry;
/// Returns true if and only if this entry is considered to be hidden.
///
/// This only returns true if the base name of the path starts with a `.`.
///
/// On Unix, this implements a more optimized check.
#[cfg(unix)]
pub fn is_hidden(dent: &DirEntry) -> bool {
use std::os::unix::ffi::OsStrExt;
if let Some(name) = file_name(dent.path()) {
name.as_bytes().get(0) == Some(&b'.')
} else {
false
}
}
/// Returns true if and only if this entry is considered to be hidden.
///
/// On Windows, this returns true if one of the following is true:
///
/// * The base name of the path starts with a `.`.
/// * The file attributes have the `HIDDEN` property set.
#[cfg(windows)]
pub fn is_hidden(dent: &DirEntry) -> bool {
use std::os::windows::fs::MetadataExt;
use winapi_util::file;
// This looks like we're doing an extra stat call, but on Windows, the
// directory traverser reuses the metadata retrieved from each directory
// entry and stores it on the DirEntry itself. So this is "free."
if let Ok(md) = dent.metadata() {
if file::is_hidden(md.file_attributes() as u64) {
return true;
}
}
if let Some(name) = file_name(dent.path()) {
name.to_str().map(|s| s.starts_with(".")).unwrap_or(false)
} else {
false
}
}
/// Returns true if and only if this entry is considered to be hidden.
///
/// This only returns true if the base name of the path starts with a `.`.
#[cfg(not(any(unix, windows)))]
pub fn is_hidden(dent: &DirEntry) -> bool {
if let Some(name) = file_name(dent.path()) {
name.to_str().map(|s| s.starts_with(".")).unwrap_or(false)
} else {
false
}
}
/// Strip `prefix` from the `path` and return the remainder.
///
/// If `path` doesn't have a prefix `prefix`, then return `None`.
#[cfg(unix)]
pub fn strip_prefix<'a, P: AsRef<Path> + ?Sized>(
prefix: &'a P,
path: &'a Path,
) -> Option<&'a Path> {
use std::os::unix::ffi::OsStrExt;
let prefix = prefix.as_ref().as_os_str().as_bytes();
let path = path.as_os_str().as_bytes();
if prefix.len() > path.len() || prefix != &path[0..prefix.len()] {
None
} else {
Some(&Path::new(OsStr::from_bytes(&path[prefix.len()..])))
}
}
/// Strip `prefix` from the `path` and return the remainder.
///
/// If `path` doesn't have a prefix `prefix`, then return `None`.
#[cfg(not(unix))]
pub fn strip_prefix<'a, P: AsRef<Path> + ?Sized>(
prefix: &'a P,
path: &'a Path,
) -> Option<&'a Path> {
path.strip_prefix(prefix).ok()
}
/// Returns true if this file path is just a file name. i.e., Its parent is
/// the empty string.
#[cfg(unix)]
pub fn is_file_name<P: AsRef<Path>>(path: P) -> bool {
use memchr::memchr;
use std::os::unix::ffi::OsStrExt;
let path = path.as_ref().as_os_str().as_bytes();
memchr(b'/', path).is_none()
}
/// Returns true if this file path is just a file name. i.e., Its parent is
/// the empty string.
#[cfg(not(unix))]
pub fn is_file_name<P: AsRef<Path>>(path: P) -> bool {
path.as_ref().parent().map(|p| p.as_os_str().is_empty()).unwrap_or(false)
}
/// The final component of the path, if it is a normal file.
///
/// If the path terminates in ., .., or consists solely of a root of prefix,
/// file_name will return None.
#[cfg(unix)]
pub fn file_name<'a, P: AsRef<Path> + ?Sized>(
path: &'a P,
) -> Option<&'a OsStr> {
use memchr::memrchr;
use std::os::unix::ffi::OsStrExt;
let path = path.as_ref().as_os_str().as_bytes();
if path.is_empty() {
return None;
} else if path.len() == 1 && path[0] == b'.' {
return None;
} else if path.last() == Some(&b'.') {
return None;
} else if path.len() >= 2 && &path[path.len() - 2..] == &b".."[..] {
return None;
}
let last_slash = memrchr(b'/', path).map(|i| i + 1).unwrap_or(0);
Some(OsStr::from_bytes(&path[last_slash..]))
}
/// The final component of the path, if it is a normal file.
///
/// If the path terminates in ., .., or consists solely of a root of prefix,
/// file_name will return None.
#[cfg(not(unix))]
pub fn file_name<'a, P: AsRef<Path> + ?Sized>(
path: &'a P,
) -> Option<&'a OsStr> {
path.as_ref().file_name()
}

591
crates/ignore/src/types.rs Normal file
View File

@@ -0,0 +1,591 @@
/*!
The types module provides a way of associating globs on file names to file
types.
This can be used to match specific types of files. For example, among
the default file types provided, the Rust file type is defined to be `*.rs`
with name `rust`. Similarly, the C file type is defined to be `*.{c,h}` with
name `c`.
Note that the set of default types may change over time.
# Example
This shows how to create and use a simple file type matcher using the default
file types defined in this crate.
```
use ignore::types::TypesBuilder;
let mut builder = TypesBuilder::new();
builder.add_defaults();
builder.select("rust");
let matcher = builder.build().unwrap();
assert!(matcher.matched("foo.rs", false).is_whitelist());
assert!(matcher.matched("foo.c", false).is_ignore());
```
# Example: negation
This is like the previous example, but shows how negating a file type works.
That is, this will let us match file paths that *don't* correspond to a
particular file type.
```
use ignore::types::TypesBuilder;
let mut builder = TypesBuilder::new();
builder.add_defaults();
builder.negate("c");
let matcher = builder.build().unwrap();
assert!(matcher.matched("foo.rs", false).is_none());
assert!(matcher.matched("foo.c", false).is_ignore());
```
# Example: custom file type definitions
This shows how to extend this library default file type definitions with
your own.
```
use ignore::types::TypesBuilder;
let mut builder = TypesBuilder::new();
builder.add_defaults();
builder.add("foo", "*.foo");
// Another way of adding a file type definition.
// This is useful when accepting input from an end user.
builder.add_def("bar:*.bar");
// Note: we only select `foo`, not `bar`.
builder.select("foo");
let matcher = builder.build().unwrap();
assert!(matcher.matched("x.foo", false).is_whitelist());
// This is ignored because we only selected the `foo` file type.
assert!(matcher.matched("x.bar", false).is_ignore());
```
We can also add file type definitions based on other definitions.
```
use ignore::types::TypesBuilder;
let mut builder = TypesBuilder::new();
builder.add_defaults();
builder.add("foo", "*.foo");
builder.add_def("bar:include:foo,cpp");
builder.select("bar");
let matcher = builder.build().unwrap();
assert!(matcher.matched("x.foo", false).is_whitelist());
assert!(matcher.matched("y.cpp", false).is_whitelist());
```
*/
use std::cell::RefCell;
use std::collections::HashMap;
use std::path::Path;
use std::sync::Arc;
use globset::{GlobBuilder, GlobSet, GlobSetBuilder};
use regex::Regex;
use thread_local::ThreadLocal;
use default_types::DEFAULT_TYPES;
use pathutil::file_name;
use {Error, Match};
/// Glob represents a single glob in a set of file type definitions.
///
/// There may be more than one glob for a particular file type.
///
/// This is used to report information about the highest precedent glob
/// that matched.
///
/// Note that not all matches necessarily correspond to a specific glob.
/// For example, if there are one or more selections and a file path doesn't
/// match any of those selections, then the file path is considered to be
/// ignored.
///
/// The lifetime `'a` refers to the lifetime of the underlying file type
/// definition, which corresponds to the lifetime of the file type matcher.
#[derive(Clone, Debug)]
pub struct Glob<'a>(GlobInner<'a>);
#[derive(Clone, Debug)]
enum GlobInner<'a> {
/// No glob matched, but the file path should still be ignored.
UnmatchedIgnore,
/// A glob matched.
Matched {
/// The file type definition which provided the glob.
def: &'a FileTypeDef,
/// The index of the glob that matched inside the file type definition.
which: usize,
/// Whether the selection was negated or not.
negated: bool,
},
}
impl<'a> Glob<'a> {
fn unmatched() -> Glob<'a> {
Glob(GlobInner::UnmatchedIgnore)
}
/// Return the file type defintion that matched, if one exists. A file type
/// definition always exists when a specific definition matches a file
/// path.
pub fn file_type_def(&self) -> Option<&FileTypeDef> {
match self {
Glob(GlobInner::UnmatchedIgnore) => None,
Glob(GlobInner::Matched { def, .. }) => Some(def),
}
}
}
/// A single file type definition.
///
/// File type definitions can be retrieved in aggregate from a file type
/// matcher. File type definitions are also reported when its responsible
/// for a match.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct FileTypeDef {
name: String,
globs: Vec<String>,
}
impl FileTypeDef {
/// Return the name of this file type.
pub fn name(&self) -> &str {
&self.name
}
/// Return the globs used to recognize this file type.
pub fn globs(&self) -> &[String] {
&self.globs
}
}
/// Types is a file type matcher.
#[derive(Clone, Debug)]
pub struct Types {
/// All of the file type definitions, sorted lexicographically by name.
defs: Vec<FileTypeDef>,
/// All of the selections made by the user.
selections: Vec<Selection<FileTypeDef>>,
/// Whether there is at least one Selection::Select in our selections.
/// When this is true, a Match::None is converted to Match::Ignore.
has_selected: bool,
/// A mapping from glob index in the set to two indices. The first is an
/// index into `selections` and the second is an index into the
/// corresponding file type definition's list of globs.
glob_to_selection: Vec<(usize, usize)>,
/// The set of all glob selections, used for actual matching.
set: GlobSet,
/// Temporary storage for globs that match.
matches: Arc<ThreadLocal<RefCell<Vec<usize>>>>,
}
/// Indicates the type of a selection for a particular file type.
#[derive(Clone, Debug)]
enum Selection<T> {
Select(String, T),
Negate(String, T),
}
impl<T> Selection<T> {
fn is_negated(&self) -> bool {
match *self {
Selection::Select(..) => false,
Selection::Negate(..) => true,
}
}
fn name(&self) -> &str {
match *self {
Selection::Select(ref name, _) => name,
Selection::Negate(ref name, _) => name,
}
}
fn map<U, F: FnOnce(T) -> U>(self, f: F) -> Selection<U> {
match self {
Selection::Select(name, inner) => {
Selection::Select(name, f(inner))
}
Selection::Negate(name, inner) => {
Selection::Negate(name, f(inner))
}
}
}
fn inner(&self) -> &T {
match *self {
Selection::Select(_, ref inner) => inner,
Selection::Negate(_, ref inner) => inner,
}
}
}
impl Types {
/// Creates a new file type matcher that never matches any path and
/// contains no file type definitions.
pub fn empty() -> Types {
Types {
defs: vec![],
selections: vec![],
has_selected: false,
glob_to_selection: vec![],
set: GlobSetBuilder::new().build().unwrap(),
matches: Arc::new(ThreadLocal::default()),
}
}
/// Returns true if and only if this matcher has zero selections.
pub fn is_empty(&self) -> bool {
self.selections.is_empty()
}
/// Returns the number of selections used in this matcher.
pub fn len(&self) -> usize {
self.selections.len()
}
/// Return the set of current file type definitions.
///
/// Definitions and globs are sorted.
pub fn definitions(&self) -> &[FileTypeDef] {
&self.defs
}
/// Returns a match for the given path against this file type matcher.
///
/// The path is considered whitelisted if it matches a selected file type.
/// The path is considered ignored if it matches a negated file type.
/// If at least one file type is selected and `path` doesn't match, then
/// the path is also considered ignored.
pub fn matched<'a, P: AsRef<Path>>(
&'a self,
path: P,
is_dir: bool,
) -> Match<Glob<'a>> {
// File types don't apply to directories, and we can't do anything
// if our glob set is empty.
if is_dir || self.set.is_empty() {
return Match::None;
}
// We only want to match against the file name, so extract it.
// If one doesn't exist, then we can't match it.
let name = match file_name(path.as_ref()) {
Some(name) => name,
None if self.has_selected => {
return Match::Ignore(Glob::unmatched());
}
None => {
return Match::None;
}
};
let mut matches = self.matches.get_or_default().borrow_mut();
self.set.matches_into(name, &mut *matches);
// The highest precedent match is the last one.
if let Some(&i) = matches.last() {
let (isel, iglob) = self.glob_to_selection[i];
let sel = &self.selections[isel];
let glob = Glob(GlobInner::Matched {
def: sel.inner(),
which: iglob,
negated: sel.is_negated(),
});
return if sel.is_negated() {
Match::Ignore(glob)
} else {
Match::Whitelist(glob)
};
}
if self.has_selected {
Match::Ignore(Glob::unmatched())
} else {
Match::None
}
}
}
/// TypesBuilder builds a type matcher from a set of file type definitions and
/// a set of file type selections.
pub struct TypesBuilder {
types: HashMap<String, FileTypeDef>,
selections: Vec<Selection<()>>,
}
impl TypesBuilder {
/// Create a new builder for a file type matcher.
///
/// The builder contains *no* type definitions to start with. A set
/// of default type definitions can be added with `add_defaults`, and
/// additional type definitions can be added with `select` and `negate`.
pub fn new() -> TypesBuilder {
TypesBuilder { types: HashMap::new(), selections: vec![] }
}
/// Build the current set of file type definitions *and* selections into
/// a file type matcher.
pub fn build(&self) -> Result<Types, Error> {
let defs = self.definitions();
let has_selected = self.selections.iter().any(|s| !s.is_negated());
let mut selections = vec![];
let mut glob_to_selection = vec![];
let mut build_set = GlobSetBuilder::new();
for (isel, selection) in self.selections.iter().enumerate() {
let def = match self.types.get(selection.name()) {
Some(def) => def.clone(),
None => {
let name = selection.name().to_string();
return Err(Error::UnrecognizedFileType(name));
}
};
for (iglob, glob) in def.globs.iter().enumerate() {
build_set.add(
GlobBuilder::new(glob)
.literal_separator(true)
.build()
.map_err(|err| Error::Glob {
glob: Some(glob.to_string()),
err: err.kind().to_string(),
})?,
);
glob_to_selection.push((isel, iglob));
}
selections.push(selection.clone().map(move |_| def));
}
let set = build_set
.build()
.map_err(|err| Error::Glob { glob: None, err: err.to_string() })?;
Ok(Types {
defs: defs,
selections: selections,
has_selected: has_selected,
glob_to_selection: glob_to_selection,
set: set,
matches: Arc::new(ThreadLocal::default()),
})
}
/// Return the set of current file type definitions.
///
/// Definitions and globs are sorted.
pub fn definitions(&self) -> Vec<FileTypeDef> {
let mut defs = vec![];
for def in self.types.values() {
let mut def = def.clone();
def.globs.sort();
defs.push(def);
}
defs.sort_by(|def1, def2| def1.name().cmp(def2.name()));
defs
}
/// Select the file type given by `name`.
///
/// If `name` is `all`, then all file types currently defined are selected.
pub fn select(&mut self, name: &str) -> &mut TypesBuilder {
if name == "all" {
for name in self.types.keys() {
self.selections.push(Selection::Select(name.to_string(), ()));
}
} else {
self.selections.push(Selection::Select(name.to_string(), ()));
}
self
}
/// Ignore the file type given by `name`.
///
/// If `name` is `all`, then all file types currently defined are negated.
pub fn negate(&mut self, name: &str) -> &mut TypesBuilder {
if name == "all" {
for name in self.types.keys() {
self.selections.push(Selection::Negate(name.to_string(), ()));
}
} else {
self.selections.push(Selection::Negate(name.to_string(), ()));
}
self
}
/// Clear any file type definitions for the type name given.
pub fn clear(&mut self, name: &str) -> &mut TypesBuilder {
self.types.remove(name);
self
}
/// Add a new file type definition. `name` can be arbitrary and `pat`
/// should be a glob recognizing file paths belonging to the `name` type.
///
/// If `name` is `all` or otherwise contains any character that is not a
/// Unicode letter or number, then an error is returned.
pub fn add(&mut self, name: &str, glob: &str) -> Result<(), Error> {
lazy_static! {
static ref RE: Regex = Regex::new(r"^[\pL\pN]+$").unwrap();
};
if name == "all" || !RE.is_match(name) {
return Err(Error::InvalidDefinition);
}
let (key, glob) = (name.to_string(), glob.to_string());
self.types
.entry(key)
.or_insert_with(|| FileTypeDef {
name: name.to_string(),
globs: vec![],
})
.globs
.push(glob);
Ok(())
}
/// Add a new file type definition specified in string form. There are two
/// valid formats:
/// 1. `{name}:{glob}`. This defines a 'root' definition that associates the
/// given name with the given glob.
/// 2. `{name}:include:{comma-separated list of already defined names}.
/// This defines an 'include' definition that associates the given name
/// with the definitions of the given existing types.
/// Names may not include any characters that are not
/// Unicode letters or numbers.
pub fn add_def(&mut self, def: &str) -> Result<(), Error> {
let parts: Vec<&str> = def.split(':').collect();
match parts.len() {
2 => {
let name = parts[0];
let glob = parts[1];
if name.is_empty() || glob.is_empty() {
return Err(Error::InvalidDefinition);
}
self.add(name, glob)
}
3 => {
let name = parts[0];
let types_string = parts[2];
if name.is_empty()
|| parts[1] != "include"
|| types_string.is_empty()
{
return Err(Error::InvalidDefinition);
}
let types = types_string.split(',');
// Check ahead of time to ensure that all types specified are
// present and fail fast if not.
if types.clone().any(|t| !self.types.contains_key(t)) {
return Err(Error::InvalidDefinition);
}
for type_name in types {
let globs =
self.types.get(type_name).unwrap().globs.clone();
for glob in globs {
self.add(name, &glob)?;
}
}
Ok(())
}
_ => Err(Error::InvalidDefinition),
}
}
/// Add a set of default file type definitions.
pub fn add_defaults(&mut self) -> &mut TypesBuilder {
static MSG: &'static str = "adding a default type should never fail";
for &(name, exts) in DEFAULT_TYPES {
for ext in exts {
self.add(name, ext).expect(MSG);
}
}
self
}
}
#[cfg(test)]
mod tests {
use super::TypesBuilder;
macro_rules! matched {
($name:ident, $types:expr, $sel:expr, $selnot:expr,
$path:expr) => {
matched!($name, $types, $sel, $selnot, $path, true);
};
(not, $name:ident, $types:expr, $sel:expr, $selnot:expr,
$path:expr) => {
matched!($name, $types, $sel, $selnot, $path, false);
};
($name:ident, $types:expr, $sel:expr, $selnot:expr,
$path:expr, $matched:expr) => {
#[test]
fn $name() {
let mut btypes = TypesBuilder::new();
for tydef in $types {
btypes.add_def(tydef).unwrap();
}
for sel in $sel {
btypes.select(sel);
}
for selnot in $selnot {
btypes.negate(selnot);
}
let types = btypes.build().unwrap();
let mat = types.matched($path, false);
assert_eq!($matched, !mat.is_ignore());
}
};
}
fn types() -> Vec<&'static str> {
vec![
"html:*.html",
"html:*.htm",
"rust:*.rs",
"js:*.js",
"foo:*.{rs,foo}",
"combo:include:html,rust",
]
}
matched!(match1, types(), vec!["rust"], vec![], "lib.rs");
matched!(match2, types(), vec!["html"], vec![], "index.html");
matched!(match3, types(), vec!["html"], vec![], "index.htm");
matched!(match4, types(), vec!["html", "rust"], vec![], "main.rs");
matched!(match5, types(), vec![], vec![], "index.html");
matched!(match6, types(), vec![], vec!["rust"], "index.html");
matched!(match7, types(), vec!["foo"], vec!["rust"], "main.foo");
matched!(match8, types(), vec!["combo"], vec![], "index.html");
matched!(match9, types(), vec!["combo"], vec![], "lib.rs");
matched!(not, matchnot1, types(), vec!["rust"], vec![], "index.html");
matched!(not, matchnot2, types(), vec![], vec!["rust"], "main.rs");
matched!(not, matchnot3, types(), vec!["foo"], vec!["rust"], "main.rs");
matched!(not, matchnot4, types(), vec!["rust"], vec!["foo"], "main.rs");
matched!(not, matchnot5, types(), vec!["rust"], vec!["foo"], "main.foo");
matched!(not, matchnot6, types(), vec!["combo"], vec![], "leftpad.js");
#[test]
fn test_invalid_defs() {
let mut btypes = TypesBuilder::new();
for tydef in types() {
btypes.add_def(tydef).unwrap();
}
// Preserve the original definitions for later comparison.
let original_defs = btypes.definitions();
let bad_defs = vec![
// Reference to type that does not exist
"combo:include:html,python",
// Bad format
"combo:foobar:html,rust",
"",
];
for def in bad_defs {
assert!(btypes.add_def(def).is_err());
// Ensure that nothing changed, even if some of the includes were valid.
assert_eq!(btypes.definitions(), original_defs);
}
}
}

2162
crates/ignore/src/walk.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,216 @@
# Based on https://github.com/behnam/gitignore-test/blob/master/.gitignore
### file in root
# MATCH /file_root_1
file_root_00
# NO_MATCH
file_root_01/
# NO_MATCH
file_root_02/*
# NO_MATCH
file_root_03/**
# MATCH /file_root_10
/file_root_10
# NO_MATCH
/file_root_11/
# NO_MATCH
/file_root_12/*
# NO_MATCH
/file_root_13/**
# NO_MATCH
*/file_root_20
# NO_MATCH
*/file_root_21/
# NO_MATCH
*/file_root_22/*
# NO_MATCH
*/file_root_23/**
# MATCH /file_root_30
**/file_root_30
# NO_MATCH
**/file_root_31/
# NO_MATCH
**/file_root_32/*
# NO_MATCH
**/file_root_33/**
### file in sub-dir
# MATCH /parent_dir/file_deep_1
file_deep_00
# NO_MATCH
file_deep_01/
# NO_MATCH
file_deep_02/*
# NO_MATCH
file_deep_03/**
# NO_MATCH
/file_deep_10
# NO_MATCH
/file_deep_11/
# NO_MATCH
/file_deep_12/*
# NO_MATCH
/file_deep_13/**
# MATCH /parent_dir/file_deep_20
*/file_deep_20
# NO_MATCH
*/file_deep_21/
# NO_MATCH
*/file_deep_22/*
# NO_MATCH
*/file_deep_23/**
# MATCH /parent_dir/file_deep_30
**/file_deep_30
# NO_MATCH
**/file_deep_31/
# NO_MATCH
**/file_deep_32/*
# NO_MATCH
**/file_deep_33/**
### dir in root
# MATCH /dir_root_00
dir_root_00
# MATCH /dir_root_01
dir_root_01/
# MATCH /dir_root_02
dir_root_02/*
# MATCH /dir_root_03
dir_root_03/**
# MATCH /dir_root_10
/dir_root_10
# MATCH /dir_root_11
/dir_root_11/
# MATCH /dir_root_12
/dir_root_12/*
# MATCH /dir_root_13
/dir_root_13/**
# NO_MATCH
*/dir_root_20
# NO_MATCH
*/dir_root_21/
# NO_MATCH
*/dir_root_22/*
# NO_MATCH
*/dir_root_23/**
# MATCH /dir_root_30
**/dir_root_30
# MATCH /dir_root_31
**/dir_root_31/
# MATCH /dir_root_32
**/dir_root_32/*
# MATCH /dir_root_33
**/dir_root_33/**
### dir in sub-dir
# MATCH /parent_dir/dir_deep_00
dir_deep_00
# MATCH /parent_dir/dir_deep_01
dir_deep_01/
# NO_MATCH
dir_deep_02/*
# NO_MATCH
dir_deep_03/**
# NO_MATCH
/dir_deep_10
# NO_MATCH
/dir_deep_11/
# NO_MATCH
/dir_deep_12/*
# NO_MATCH
/dir_deep_13/**
# MATCH /parent_dir/dir_deep_20
*/dir_deep_20
# MATCH /parent_dir/dir_deep_21
*/dir_deep_21/
# MATCH /parent_dir/dir_deep_22
*/dir_deep_22/*
# MATCH /parent_dir/dir_deep_23
*/dir_deep_23/**
# MATCH /parent_dir/dir_deep_30
**/dir_deep_30
# MATCH /parent_dir/dir_deep_31
**/dir_deep_31/
# MATCH /parent_dir/dir_deep_32
**/dir_deep_32/*
# MATCH /parent_dir/dir_deep_33
**/dir_deep_33/**

View File

@@ -0,0 +1,300 @@
extern crate ignore;
use std::path::Path;
use ignore::gitignore::{Gitignore, GitignoreBuilder};
const IGNORE_FILE: &'static str =
"tests/gitignore_matched_path_or_any_parents_tests.gitignore";
fn get_gitignore() -> Gitignore {
let mut builder = GitignoreBuilder::new("ROOT");
let error = builder.add(IGNORE_FILE);
assert!(error.is_none(), "failed to open gitignore file");
builder.build().unwrap()
}
#[test]
#[should_panic(expected = "path is expected to be under the root")]
fn test_path_should_be_under_root() {
let gitignore = get_gitignore();
let path = "/tmp/some_file";
gitignore.matched_path_or_any_parents(Path::new(path), false);
assert!(false);
}
#[test]
fn test_files_in_root() {
let gitignore = get_gitignore();
let m = |path: &str| {
gitignore.matched_path_or_any_parents(Path::new(path), false)
};
// 0x
assert!(m("ROOT/file_root_00").is_ignore());
assert!(m("ROOT/file_root_01").is_none());
assert!(m("ROOT/file_root_02").is_none());
assert!(m("ROOT/file_root_03").is_none());
// 1x
assert!(m("ROOT/file_root_10").is_ignore());
assert!(m("ROOT/file_root_11").is_none());
assert!(m("ROOT/file_root_12").is_none());
assert!(m("ROOT/file_root_13").is_none());
// 2x
assert!(m("ROOT/file_root_20").is_none());
assert!(m("ROOT/file_root_21").is_none());
assert!(m("ROOT/file_root_22").is_none());
assert!(m("ROOT/file_root_23").is_none());
// 3x
assert!(m("ROOT/file_root_30").is_ignore());
assert!(m("ROOT/file_root_31").is_none());
assert!(m("ROOT/file_root_32").is_none());
assert!(m("ROOT/file_root_33").is_none());
}
#[test]
fn test_files_in_deep() {
let gitignore = get_gitignore();
let m = |path: &str| {
gitignore.matched_path_or_any_parents(Path::new(path), false)
};
// 0x
assert!(m("ROOT/parent_dir/file_deep_00").is_ignore());
assert!(m("ROOT/parent_dir/file_deep_01").is_none());
assert!(m("ROOT/parent_dir/file_deep_02").is_none());
assert!(m("ROOT/parent_dir/file_deep_03").is_none());
// 1x
assert!(m("ROOT/parent_dir/file_deep_10").is_none());
assert!(m("ROOT/parent_dir/file_deep_11").is_none());
assert!(m("ROOT/parent_dir/file_deep_12").is_none());
assert!(m("ROOT/parent_dir/file_deep_13").is_none());
// 2x
assert!(m("ROOT/parent_dir/file_deep_20").is_ignore());
assert!(m("ROOT/parent_dir/file_deep_21").is_none());
assert!(m("ROOT/parent_dir/file_deep_22").is_none());
assert!(m("ROOT/parent_dir/file_deep_23").is_none());
// 3x
assert!(m("ROOT/parent_dir/file_deep_30").is_ignore());
assert!(m("ROOT/parent_dir/file_deep_31").is_none());
assert!(m("ROOT/parent_dir/file_deep_32").is_none());
assert!(m("ROOT/parent_dir/file_deep_33").is_none());
}
#[test]
fn test_dirs_in_root() {
let gitignore = get_gitignore();
let m = |path: &str, is_dir: bool| {
gitignore.matched_path_or_any_parents(Path::new(path), is_dir)
};
// 00
assert!(m("ROOT/dir_root_00", true).is_ignore());
assert!(m("ROOT/dir_root_00/file", false).is_ignore());
assert!(m("ROOT/dir_root_00/child_dir", true).is_ignore());
assert!(m("ROOT/dir_root_00/child_dir/file", false).is_ignore());
// 01
assert!(m("ROOT/dir_root_01", true).is_ignore());
assert!(m("ROOT/dir_root_01/file", false).is_ignore());
assert!(m("ROOT/dir_root_01/child_dir", true).is_ignore());
assert!(m("ROOT/dir_root_01/child_dir/file", false).is_ignore());
// 02
assert!(m("ROOT/dir_root_02", true).is_none()); // dir itself doesn't match
assert!(m("ROOT/dir_root_02/file", false).is_ignore());
assert!(m("ROOT/dir_root_02/child_dir", true).is_ignore());
assert!(m("ROOT/dir_root_02/child_dir/file", false).is_ignore());
// 03
assert!(m("ROOT/dir_root_03", true).is_none()); // dir itself doesn't match
assert!(m("ROOT/dir_root_03/file", false).is_ignore());
assert!(m("ROOT/dir_root_03/child_dir", true).is_ignore());
assert!(m("ROOT/dir_root_03/child_dir/file", false).is_ignore());
// 10
assert!(m("ROOT/dir_root_10", true).is_ignore());
assert!(m("ROOT/dir_root_10/file", false).is_ignore());
assert!(m("ROOT/dir_root_10/child_dir", true).is_ignore());
assert!(m("ROOT/dir_root_10/child_dir/file", false).is_ignore());
// 11
assert!(m("ROOT/dir_root_11", true).is_ignore());
assert!(m("ROOT/dir_root_11/file", false).is_ignore());
assert!(m("ROOT/dir_root_11/child_dir", true).is_ignore());
assert!(m("ROOT/dir_root_11/child_dir/file", false).is_ignore());
// 12
assert!(m("ROOT/dir_root_12", true).is_none()); // dir itself doesn't match
assert!(m("ROOT/dir_root_12/file", false).is_ignore());
assert!(m("ROOT/dir_root_12/child_dir", true).is_ignore());
assert!(m("ROOT/dir_root_12/child_dir/file", false).is_ignore());
// 13
assert!(m("ROOT/dir_root_13", true).is_none());
assert!(m("ROOT/dir_root_13/file", false).is_ignore());
assert!(m("ROOT/dir_root_13/child_dir", true).is_ignore());
assert!(m("ROOT/dir_root_13/child_dir/file", false).is_ignore());
// 20
assert!(m("ROOT/dir_root_20", true).is_none());
assert!(m("ROOT/dir_root_20/file", false).is_none());
assert!(m("ROOT/dir_root_20/child_dir", true).is_none());
assert!(m("ROOT/dir_root_20/child_dir/file", false).is_none());
// 21
assert!(m("ROOT/dir_root_21", true).is_none());
assert!(m("ROOT/dir_root_21/file", false).is_none());
assert!(m("ROOT/dir_root_21/child_dir", true).is_none());
assert!(m("ROOT/dir_root_21/child_dir/file", false).is_none());
// 22
assert!(m("ROOT/dir_root_22", true).is_none());
assert!(m("ROOT/dir_root_22/file", false).is_none());
assert!(m("ROOT/dir_root_22/child_dir", true).is_none());
assert!(m("ROOT/dir_root_22/child_dir/file", false).is_none());
// 23
assert!(m("ROOT/dir_root_23", true).is_none());
assert!(m("ROOT/dir_root_23/file", false).is_none());
assert!(m("ROOT/dir_root_23/child_dir", true).is_none());
assert!(m("ROOT/dir_root_23/child_dir/file", false).is_none());
// 30
assert!(m("ROOT/dir_root_30", true).is_ignore());
assert!(m("ROOT/dir_root_30/file", false).is_ignore());
assert!(m("ROOT/dir_root_30/child_dir", true).is_ignore());
assert!(m("ROOT/dir_root_30/child_dir/file", false).is_ignore());
// 31
assert!(m("ROOT/dir_root_31", true).is_ignore());
assert!(m("ROOT/dir_root_31/file", false).is_ignore());
assert!(m("ROOT/dir_root_31/child_dir", true).is_ignore());
assert!(m("ROOT/dir_root_31/child_dir/file", false).is_ignore());
// 32
assert!(m("ROOT/dir_root_32", true).is_none()); // dir itself doesn't match
assert!(m("ROOT/dir_root_32/file", false).is_ignore());
assert!(m("ROOT/dir_root_32/child_dir", true).is_ignore());
assert!(m("ROOT/dir_root_32/child_dir/file", false).is_ignore());
// 33
assert!(m("ROOT/dir_root_33", true).is_none()); // dir itself doesn't match
assert!(m("ROOT/dir_root_33/file", false).is_ignore());
assert!(m("ROOT/dir_root_33/child_dir", true).is_ignore());
assert!(m("ROOT/dir_root_33/child_dir/file", false).is_ignore());
}
#[test]
fn test_dirs_in_deep() {
let gitignore = get_gitignore();
let m = |path: &str, is_dir: bool| {
gitignore.matched_path_or_any_parents(Path::new(path), is_dir)
};
// 00
assert!(m("ROOT/parent_dir/dir_deep_00", true).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_00/file", false).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_00/child_dir", true).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_00/child_dir/file", false).is_ignore());
// 01
assert!(m("ROOT/parent_dir/dir_deep_01", true).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_01/file", false).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_01/child_dir", true).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_01/child_dir/file", false).is_ignore());
// 02
assert!(m("ROOT/parent_dir/dir_deep_02", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_02/file", false).is_none());
assert!(m("ROOT/parent_dir/dir_deep_02/child_dir", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_02/child_dir/file", false).is_none());
// 03
assert!(m("ROOT/parent_dir/dir_deep_03", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_03/file", false).is_none());
assert!(m("ROOT/parent_dir/dir_deep_03/child_dir", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_03/child_dir/file", false).is_none());
// 10
assert!(m("ROOT/parent_dir/dir_deep_10", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_10/file", false).is_none());
assert!(m("ROOT/parent_dir/dir_deep_10/child_dir", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_10/child_dir/file", false).is_none());
// 11
assert!(m("ROOT/parent_dir/dir_deep_11", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_11/file", false).is_none());
assert!(m("ROOT/parent_dir/dir_deep_11/child_dir", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_11/child_dir/file", false).is_none());
// 12
assert!(m("ROOT/parent_dir/dir_deep_12", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_12/file", false).is_none());
assert!(m("ROOT/parent_dir/dir_deep_12/child_dir", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_12/child_dir/file", false).is_none());
// 13
assert!(m("ROOT/parent_dir/dir_deep_13", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_13/file", false).is_none());
assert!(m("ROOT/parent_dir/dir_deep_13/child_dir", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_13/child_dir/file", false).is_none());
// 20
assert!(m("ROOT/parent_dir/dir_deep_20", true).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_20/file", false).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_20/child_dir", true).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_20/child_dir/file", false).is_ignore());
// 21
assert!(m("ROOT/parent_dir/dir_deep_21", true).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_21/file", false).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_21/child_dir", true).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_21/child_dir/file", false).is_ignore());
// 22
// dir itself doesn't match
assert!(m("ROOT/parent_dir/dir_deep_22", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_22/file", false).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_22/child_dir", true).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_22/child_dir/file", false).is_ignore());
// 23
// dir itself doesn't match
assert!(m("ROOT/parent_dir/dir_deep_23", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_23/file", false).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_23/child_dir", true).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_23/child_dir/file", false).is_ignore());
// 30
assert!(m("ROOT/parent_dir/dir_deep_30", true).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_30/file", false).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_30/child_dir", true).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_30/child_dir/file", false).is_ignore());
// 31
assert!(m("ROOT/parent_dir/dir_deep_31", true).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_31/file", false).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_31/child_dir", true).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_31/child_dir/file", false).is_ignore());
// 32
// dir itself doesn't match
assert!(m("ROOT/parent_dir/dir_deep_32", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_32/file", false).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_32/child_dir", true).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_32/child_dir/file", false).is_ignore());
// 33
// dir itself doesn't match
assert!(m("ROOT/parent_dir/dir_deep_33", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_33/file", false).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_33/child_dir", true).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_33/child_dir/file", false).is_ignore());
}

24
crates/matcher/Cargo.toml Normal file
View File

@@ -0,0 +1,24 @@
[package]
name = "grep-matcher"
version = "0.1.3" #:version
authors = ["Andrew Gallant <jamslam@gmail.com>"]
description = """
A trait for regular expressions, with a focus on line oriented search.
"""
documentation = "https://docs.rs/grep-matcher"
homepage = "https://github.com/BurntSushi/ripgrep"
repository = "https://github.com/BurntSushi/ripgrep"
readme = "README.md"
keywords = ["regex", "pattern", "trait"]
license = "Unlicense/MIT"
autotests = false
[dependencies]
memchr = "2.1"
[dev-dependencies]
regex = "1.1"
[[test]]
name = "integration"
path = "tests/tests.rs"

View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 Andrew Gallant
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

36
crates/matcher/README.md Normal file
View File

@@ -0,0 +1,36 @@
grep-matcher
------------
This crate provides a low level interface for describing regular expression
matchers. The `grep` crate uses this interface in order to make the regex
engine it uses pluggable.
[![Linux build status](https://api.travis-ci.org/BurntSushi/ripgrep.svg)](https://travis-ci.org/BurntSushi/ripgrep)
[![Windows build status](https://ci.appveyor.com/api/projects/status/github/BurntSushi/ripgrep?svg=true)](https://ci.appveyor.com/project/BurntSushi/ripgrep)
[![](https://img.shields.io/crates/v/grep-matcher.svg)](https://crates.io/crates/grep-matcher)
Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org).
### Documentation
[https://docs.rs/grep-matcher](https://docs.rs/grep-matcher)
**NOTE:** You probably don't want to use this crate directly. Instead, you
should prefer the facade defined in the
[`grep`](https://docs.rs/grep)
crate.
### Usage
Add this to your `Cargo.toml`:
```toml
[dependencies]
grep-matcher = "0.1"
```
and this to your crate root:
```rust
extern crate grep_matcher;
```

24
crates/matcher/UNLICENSE Normal file
View File

@@ -0,0 +1,24 @@
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>

View File

@@ -0,0 +1,328 @@
use std::str;
use memchr::memchr;
/// Interpolate capture references in `replacement` and write the interpolation
/// result to `dst`. References in `replacement` take the form of $N or $name,
/// where `N` is a capture group index and `name` is a capture group name. The
/// function provided, `name_to_index`, maps capture group names to indices.
///
/// The `append` function given is responsible for writing the replacement
/// to the `dst` buffer. That is, it is called with the capture group index
/// of a capture group reference and is expected to resolve the index to its
/// corresponding matched text. If no such match exists, then `append` should
/// not write anything to its given buffer.
pub fn interpolate<A, N>(
mut replacement: &[u8],
mut append: A,
mut name_to_index: N,
dst: &mut Vec<u8>,
) where
A: FnMut(usize, &mut Vec<u8>),
N: FnMut(&str) -> Option<usize>,
{
while !replacement.is_empty() {
match memchr(b'$', replacement) {
None => break,
Some(i) => {
dst.extend(&replacement[..i]);
replacement = &replacement[i..];
}
}
if replacement.get(1).map_or(false, |&b| b == b'$') {
dst.push(b'$');
replacement = &replacement[2..];
continue;
}
debug_assert!(!replacement.is_empty());
let cap_ref = match find_cap_ref(replacement) {
Some(cap_ref) => cap_ref,
None => {
dst.push(b'$');
replacement = &replacement[1..];
continue;
}
};
replacement = &replacement[cap_ref.end..];
match cap_ref.cap {
Ref::Number(i) => append(i, dst),
Ref::Named(name) => {
if let Some(i) = name_to_index(name) {
append(i, dst);
}
}
}
}
dst.extend(replacement);
}
/// `CaptureRef` represents a reference to a capture group inside some text.
/// The reference is either a capture group name or a number.
///
/// It is also tagged with the position in the text immediately proceding the
/// capture reference.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
struct CaptureRef<'a> {
cap: Ref<'a>,
end: usize,
}
/// A reference to a capture group in some text.
///
/// e.g., `$2`, `$foo`, `${foo}`.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
enum Ref<'a> {
Named(&'a str),
Number(usize),
}
impl<'a> From<&'a str> for Ref<'a> {
fn from(x: &'a str) -> Ref<'a> {
Ref::Named(x)
}
}
impl From<usize> for Ref<'static> {
fn from(x: usize) -> Ref<'static> {
Ref::Number(x)
}
}
/// Parses a possible reference to a capture group name in the given text,
/// starting at the beginning of `replacement`.
///
/// If no such valid reference could be found, None is returned.
fn find_cap_ref(replacement: &[u8]) -> Option<CaptureRef> {
let mut i = 0;
if replacement.len() <= 1 || replacement[0] != b'$' {
return None;
}
let mut brace = false;
i += 1;
if replacement[i] == b'{' {
brace = true;
i += 1;
}
let mut cap_end = i;
while replacement.get(cap_end).map_or(false, is_valid_cap_letter) {
cap_end += 1;
}
if cap_end == i {
return None;
}
// We just verified that the range 0..cap_end is valid ASCII, so it must
// therefore be valid UTF-8. If we really cared, we could avoid this UTF-8
// check with an unchecked conversion or by parsing the number straight
// from &[u8].
let cap = str::from_utf8(&replacement[i..cap_end])
.expect("valid UTF-8 capture name");
if brace {
if !replacement.get(cap_end).map_or(false, |&b| b == b'}') {
return None;
}
cap_end += 1;
}
Some(CaptureRef {
cap: match cap.parse::<u32>() {
Ok(i) => Ref::Number(i as usize),
Err(_) => Ref::Named(cap),
},
end: cap_end,
})
}
/// Returns true if and only if the given byte is allowed in a capture name.
fn is_valid_cap_letter(b: &u8) -> bool {
match *b {
b'0'..=b'9' | b'a'..=b'z' | b'A'..=b'Z' | b'_' => true,
_ => false,
}
}
#[cfg(test)]
mod tests {
use super::{find_cap_ref, interpolate, CaptureRef};
macro_rules! find {
($name:ident, $text:expr) => {
#[test]
fn $name() {
assert_eq!(None, find_cap_ref($text.as_bytes()));
}
};
($name:ident, $text:expr, $capref:expr) => {
#[test]
fn $name() {
assert_eq!(Some($capref), find_cap_ref($text.as_bytes()));
}
};
}
macro_rules! c {
($name_or_number:expr, $pos:expr) => {
CaptureRef { cap: $name_or_number.into(), end: $pos }
};
}
find!(find_cap_ref1, "$foo", c!("foo", 4));
find!(find_cap_ref2, "${foo}", c!("foo", 6));
find!(find_cap_ref3, "$0", c!(0, 2));
find!(find_cap_ref4, "$5", c!(5, 2));
find!(find_cap_ref5, "$10", c!(10, 3));
find!(find_cap_ref6, "$42a", c!("42a", 4));
find!(find_cap_ref7, "${42}a", c!(42, 5));
find!(find_cap_ref8, "${42");
find!(find_cap_ref9, "${42 ");
find!(find_cap_ref10, " $0 ");
find!(find_cap_ref11, "$");
find!(find_cap_ref12, " ");
find!(find_cap_ref13, "");
// A convenience routine for using interpolate's unwieldy but flexible API.
fn interpolate_string(
mut name_to_index: Vec<(&'static str, usize)>,
caps: Vec<&'static str>,
replacement: &str,
) -> String {
name_to_index.sort_by_key(|x| x.0);
let mut dst = vec![];
interpolate(
replacement.as_bytes(),
|i, dst| {
if let Some(&s) = caps.get(i) {
dst.extend(s.as_bytes());
}
},
|name| -> Option<usize> {
name_to_index
.binary_search_by_key(&name, |x| x.0)
.ok()
.map(|i| name_to_index[i].1)
},
&mut dst,
);
String::from_utf8(dst).unwrap()
}
macro_rules! interp {
($name:ident, $map:expr, $caps:expr, $hay:expr, $expected:expr $(,)*) => {
#[test]
fn $name() {
assert_eq!($expected, interpolate_string($map, $caps, $hay));
}
};
}
interp!(
interp1,
vec![("foo", 2)],
vec!["", "", "xxx"],
"test $foo test",
"test xxx test",
);
interp!(
interp2,
vec![("foo", 2)],
vec!["", "", "xxx"],
"test$footest",
"test",
);
interp!(
interp3,
vec![("foo", 2)],
vec!["", "", "xxx"],
"test${foo}test",
"testxxxtest",
);
interp!(
interp4,
vec![("foo", 2)],
vec!["", "", "xxx"],
"test$2test",
"test",
);
interp!(
interp5,
vec![("foo", 2)],
vec!["", "", "xxx"],
"test${2}test",
"testxxxtest",
);
interp!(
interp6,
vec![("foo", 2)],
vec!["", "", "xxx"],
"test $$foo test",
"test $foo test",
);
interp!(
interp7,
vec![("foo", 2)],
vec!["", "", "xxx"],
"test $foo",
"test xxx",
);
interp!(
interp8,
vec![("foo", 2)],
vec!["", "", "xxx"],
"$foo test",
"xxx test",
);
interp!(
interp9,
vec![("bar", 1), ("foo", 2)],
vec!["", "yyy", "xxx"],
"test $bar$foo",
"test yyyxxx",
);
interp!(
interp10,
vec![("bar", 1), ("foo", 2)],
vec!["", "yyy", "xxx"],
"test $ test",
"test $ test",
);
interp!(
interp11,
vec![("bar", 1), ("foo", 2)],
vec!["", "yyy", "xxx"],
"test ${} test",
"test ${} test",
);
interp!(
interp12,
vec![("bar", 1), ("foo", 2)],
vec!["", "yyy", "xxx"],
"test ${ } test",
"test ${ } test",
);
interp!(
interp13,
vec![("bar", 1), ("foo", 2)],
vec!["", "yyy", "xxx"],
"test ${a b} test",
"test ${a b} test",
);
interp!(
interp14,
vec![("bar", 1), ("foo", 2)],
vec!["", "yyy", "xxx"],
"test ${a} test",
"test test",
);
}

1151
crates/matcher/src/lib.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,230 @@
use grep_matcher::{Captures, Match, Matcher};
use regex::bytes::Regex;
use util::{RegexMatcher, RegexMatcherNoCaps};
fn matcher(pattern: &str) -> RegexMatcher {
RegexMatcher::new(Regex::new(pattern).unwrap())
}
fn matcher_no_caps(pattern: &str) -> RegexMatcherNoCaps {
RegexMatcherNoCaps(Regex::new(pattern).unwrap())
}
fn m(start: usize, end: usize) -> Match {
Match::new(start, end)
}
#[test]
fn find() {
let matcher = matcher(r"(\w+)\s+(\w+)");
assert_eq!(matcher.find(b" homer simpson ").unwrap(), Some(m(1, 14)));
}
#[test]
fn find_iter() {
let matcher = matcher(r"(\w+)\s+(\w+)");
let mut matches = vec![];
matcher
.find_iter(b"aa bb cc dd", |m| {
matches.push(m);
true
})
.unwrap();
assert_eq!(matches, vec![m(0, 5), m(6, 11)]);
// Test that find_iter respects short circuiting.
matches.clear();
matcher
.find_iter(b"aa bb cc dd", |m| {
matches.push(m);
false
})
.unwrap();
assert_eq!(matches, vec![m(0, 5)]);
}
#[test]
fn try_find_iter() {
#[derive(Clone, Debug, Eq, PartialEq)]
struct MyError;
let matcher = matcher(r"(\w+)\s+(\w+)");
let mut matches = vec![];
let err = matcher
.try_find_iter(b"aa bb cc dd", |m| {
if matches.is_empty() {
matches.push(m);
Ok(true)
} else {
Err(MyError)
}
})
.unwrap()
.unwrap_err();
assert_eq!(matches, vec![m(0, 5)]);
assert_eq!(err, MyError);
}
#[test]
fn shortest_match() {
let matcher = matcher(r"a+");
// This tests that the default impl isn't doing anything smart, and simply
// defers to `find`.
assert_eq!(matcher.shortest_match(b"aaa").unwrap(), Some(3));
// The actual underlying regex is smarter.
assert_eq!(matcher.re.shortest_match(b"aaa"), Some(1));
}
#[test]
fn captures() {
let matcher = matcher(r"(?P<a>\w+)\s+(?P<b>\w+)");
assert_eq!(matcher.capture_count(), 3);
assert_eq!(matcher.capture_index("a"), Some(1));
assert_eq!(matcher.capture_index("b"), Some(2));
assert_eq!(matcher.capture_index("nada"), None);
let mut caps = matcher.new_captures().unwrap();
assert!(matcher.captures(b" homer simpson ", &mut caps).unwrap());
assert_eq!(caps.get(0), Some(m(1, 14)));
assert_eq!(caps.get(1), Some(m(1, 6)));
assert_eq!(caps.get(2), Some(m(7, 14)));
}
#[test]
fn captures_iter() {
let matcher = matcher(r"(?P<a>\w+)\s+(?P<b>\w+)");
let mut caps = matcher.new_captures().unwrap();
let mut matches = vec![];
matcher
.captures_iter(b"aa bb cc dd", &mut caps, |caps| {
matches.push(caps.get(0).unwrap());
matches.push(caps.get(1).unwrap());
matches.push(caps.get(2).unwrap());
true
})
.unwrap();
assert_eq!(
matches,
vec![m(0, 5), m(0, 2), m(3, 5), m(6, 11), m(6, 8), m(9, 11),]
);
// Test that captures_iter respects short circuiting.
matches.clear();
matcher
.captures_iter(b"aa bb cc dd", &mut caps, |caps| {
matches.push(caps.get(0).unwrap());
matches.push(caps.get(1).unwrap());
matches.push(caps.get(2).unwrap());
false
})
.unwrap();
assert_eq!(matches, vec![m(0, 5), m(0, 2), m(3, 5),]);
}
#[test]
fn try_captures_iter() {
#[derive(Clone, Debug, Eq, PartialEq)]
struct MyError;
let matcher = matcher(r"(?P<a>\w+)\s+(?P<b>\w+)");
let mut caps = matcher.new_captures().unwrap();
let mut matches = vec![];
let err = matcher
.try_captures_iter(b"aa bb cc dd", &mut caps, |caps| {
if matches.is_empty() {
matches.push(caps.get(0).unwrap());
matches.push(caps.get(1).unwrap());
matches.push(caps.get(2).unwrap());
Ok(true)
} else {
Err(MyError)
}
})
.unwrap()
.unwrap_err();
assert_eq!(matches, vec![m(0, 5), m(0, 2), m(3, 5)]);
assert_eq!(err, MyError);
}
// Test that our default impls for capturing are correct. Namely, when
// capturing isn't supported by the underlying matcher, then all of the
// various capturing related APIs fail fast.
#[test]
fn no_captures() {
let matcher = matcher_no_caps(r"(?P<a>\w+)\s+(?P<b>\w+)");
assert_eq!(matcher.capture_count(), 0);
assert_eq!(matcher.capture_index("a"), None);
assert_eq!(matcher.capture_index("b"), None);
assert_eq!(matcher.capture_index("nada"), None);
let mut caps = matcher.new_captures().unwrap();
assert!(!matcher.captures(b"homer simpson", &mut caps).unwrap());
let mut called = false;
matcher
.captures_iter(b"homer simpson", &mut caps, |_| {
called = true;
true
})
.unwrap();
assert!(!called);
}
#[test]
fn replace() {
let matcher = matcher(r"(\w+)\s+(\w+)");
let mut dst = vec![];
matcher
.replace(b"aa bb cc dd", &mut dst, |_, dst| {
dst.push(b'z');
true
})
.unwrap();
assert_eq!(dst, b"z z");
// Test that replacements respect short circuiting.
dst.clear();
matcher
.replace(b"aa bb cc dd", &mut dst, |_, dst| {
dst.push(b'z');
false
})
.unwrap();
assert_eq!(dst, b"z cc dd");
}
#[test]
fn replace_with_captures() {
let matcher = matcher(r"(\w+)\s+(\w+)");
let haystack = b"aa bb cc dd";
let mut caps = matcher.new_captures().unwrap();
let mut dst = vec![];
matcher
.replace_with_captures(haystack, &mut caps, &mut dst, |caps, dst| {
caps.interpolate(
|name| matcher.capture_index(name),
haystack,
b"$2 $1",
dst,
);
true
})
.unwrap();
assert_eq!(dst, b"bb aa dd cc");
// Test that replacements respect short circuiting.
dst.clear();
matcher
.replace_with_captures(haystack, &mut caps, &mut dst, |caps, dst| {
caps.interpolate(
|name| matcher.capture_index(name),
haystack,
b"$2 $1",
dst,
);
false
})
.unwrap();
assert_eq!(dst, b"bb aa cc dd");
}

View File

@@ -0,0 +1,6 @@
extern crate grep_matcher;
extern crate regex;
mod util;
mod test_matcher;

View File

@@ -0,0 +1,95 @@
use std::collections::HashMap;
use std::result;
use grep_matcher::{Captures, Match, Matcher, NoCaptures, NoError};
use regex::bytes::{CaptureLocations, Regex};
#[derive(Debug)]
pub struct RegexMatcher {
pub re: Regex,
pub names: HashMap<String, usize>,
}
impl RegexMatcher {
pub fn new(re: Regex) -> RegexMatcher {
let mut names = HashMap::new();
for (i, optional_name) in re.capture_names().enumerate() {
if let Some(name) = optional_name {
names.insert(name.to_string(), i);
}
}
RegexMatcher { re: re, names: names }
}
}
type Result<T> = result::Result<T, NoError>;
impl Matcher for RegexMatcher {
type Captures = RegexCaptures;
type Error = NoError;
fn find_at(&self, haystack: &[u8], at: usize) -> Result<Option<Match>> {
Ok(self
.re
.find_at(haystack, at)
.map(|m| Match::new(m.start(), m.end())))
}
fn new_captures(&self) -> Result<RegexCaptures> {
Ok(RegexCaptures(self.re.capture_locations()))
}
fn captures_at(
&self,
haystack: &[u8],
at: usize,
caps: &mut RegexCaptures,
) -> Result<bool> {
Ok(self.re.captures_read_at(&mut caps.0, haystack, at).is_some())
}
fn capture_count(&self) -> usize {
self.re.captures_len()
}
fn capture_index(&self, name: &str) -> Option<usize> {
self.names.get(name).map(|i| *i)
}
// We purposely don't implement any other methods, so that we test the
// default impls. The "real" Regex impl for Matcher provides a few more
// impls. e.g., Its `find_iter` impl is faster than what we can do here,
// since the regex crate avoids synchronization overhead.
}
#[derive(Debug)]
pub struct RegexMatcherNoCaps(pub Regex);
impl Matcher for RegexMatcherNoCaps {
type Captures = NoCaptures;
type Error = NoError;
fn find_at(&self, haystack: &[u8], at: usize) -> Result<Option<Match>> {
Ok(self
.0
.find_at(haystack, at)
.map(|m| Match::new(m.start(), m.end())))
}
fn new_captures(&self) -> Result<NoCaptures> {
Ok(NoCaptures::new())
}
}
#[derive(Clone, Debug)]
pub struct RegexCaptures(CaptureLocations);
impl Captures for RegexCaptures {
fn len(&self) -> usize {
self.0.len()
}
fn get(&self, i: usize) -> Option<Match> {
self.0.pos(i).map(|(s, e)| Match::new(s, e))
}
}

17
crates/pcre2/Cargo.toml Normal file
View File

@@ -0,0 +1,17 @@
[package]
name = "grep-pcre2"
version = "0.1.3" #:version
authors = ["Andrew Gallant <jamslam@gmail.com>"]
description = """
Use PCRE2 with the 'grep' crate.
"""
documentation = "https://docs.rs/grep-pcre2"
homepage = "https://github.com/BurntSushi/ripgrep"
repository = "https://github.com/BurntSushi/ripgrep"
readme = "README.md"
keywords = ["regex", "grep", "pcre", "backreference", "look"]
license = "Unlicense/MIT"
[dependencies]
grep-matcher = { version = "0.1.2", path = "../grep-matcher" }
pcre2 = "0.2.0"

21
crates/pcre2/LICENSE-MIT Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 Andrew Gallant
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

39
crates/pcre2/README.md Normal file
View File

@@ -0,0 +1,39 @@
grep-pcre2
----------
The `grep-pcre2` crate provides an implementation of the `Matcher` trait from
the `grep-matcher` crate. This implementation permits PCRE2 to be used in the
`grep` crate for fast line oriented searching.
[![Linux build status](https://api.travis-ci.org/BurntSushi/ripgrep.svg)](https://travis-ci.org/BurntSushi/ripgrep)
[![Windows build status](https://ci.appveyor.com/api/projects/status/github/BurntSushi/ripgrep?svg=true)](https://ci.appveyor.com/project/BurntSushi/ripgrep)
[![](https://img.shields.io/crates/v/grep-pcre2.svg)](https://crates.io/crates/grep-pcre2)
Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org).
### Documentation
[https://docs.rs/grep-pcre2](https://docs.rs/grep-pcre2)
**NOTE:** You probably don't want to use this crate directly. Instead, you
should prefer the facade defined in the
[`grep`](https://docs.rs/grep)
crate.
If you're looking to just use PCRE2 from Rust, then you probably want the
[`pcre2`](https://docs.rs/pcre2)
crate, which provide high level safe bindings to PCRE2.
### Usage
Add this to your `Cargo.toml`:
```toml
[dependencies]
grep-pcre2 = "0.1"
```
and this to your crate root:
```rust
extern crate grep_pcre2;
```

24
crates/pcre2/UNLICENSE Normal file
View File

@@ -0,0 +1,24 @@
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>

59
crates/pcre2/src/error.rs Normal file
View File

@@ -0,0 +1,59 @@
use std::error;
use std::fmt;
/// An error that can occur in this crate.
///
/// Generally, this error corresponds to problems building a regular
/// expression, whether it's in parsing, compilation or a problem with
/// guaranteeing a configured optimization.
#[derive(Clone, Debug)]
pub struct Error {
kind: ErrorKind,
}
impl Error {
pub(crate) fn regex<E: error::Error>(err: E) -> Error {
Error { kind: ErrorKind::Regex(err.to_string()) }
}
/// Return the kind of this error.
pub fn kind(&self) -> &ErrorKind {
&self.kind
}
}
/// The kind of an error that can occur.
#[derive(Clone, Debug)]
pub enum ErrorKind {
/// An error that occurred as a result of parsing a regular expression.
/// This can be a syntax error or an error that results from attempting to
/// compile a regular expression that is too big.
///
/// The string here is the underlying error converted to a string.
Regex(String),
/// Hints that destructuring should not be exhaustive.
///
/// This enum may grow additional variants, so this makes sure clients
/// don't count on exhaustive matching. (Otherwise, adding a new variant
/// could break existing code.)
#[doc(hidden)]
__Nonexhaustive,
}
impl error::Error for Error {
fn description(&self) -> &str {
match self.kind {
ErrorKind::Regex(_) => "regex error",
ErrorKind::__Nonexhaustive => unreachable!(),
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.kind {
ErrorKind::Regex(ref s) => write!(f, "{}", s),
ErrorKind::__Nonexhaustive => unreachable!(),
}
}
}

16
crates/pcre2/src/lib.rs Normal file
View File

@@ -0,0 +1,16 @@
/*!
An implementation of `grep-matcher`'s `Matcher` trait for
[PCRE2](https://www.pcre.org/).
*/
#![deny(missing_docs)]
extern crate grep_matcher;
extern crate pcre2;
pub use error::{Error, ErrorKind};
pub use matcher::{RegexCaptures, RegexMatcher, RegexMatcherBuilder};
pub use pcre2::{is_jit_available, version};
mod error;
mod matcher;

456
crates/pcre2/src/matcher.rs Normal file
View File

@@ -0,0 +1,456 @@
use std::collections::HashMap;
use grep_matcher::{Captures, Match, Matcher};
use pcre2::bytes::{CaptureLocations, Regex, RegexBuilder};
use error::Error;
/// A builder for configuring the compilation of a PCRE2 regex.
#[derive(Clone, Debug)]
pub struct RegexMatcherBuilder {
builder: RegexBuilder,
case_smart: bool,
word: bool,
}
impl RegexMatcherBuilder {
/// Create a new matcher builder with a default configuration.
pub fn new() -> RegexMatcherBuilder {
RegexMatcherBuilder {
builder: RegexBuilder::new(),
case_smart: false,
word: false,
}
}
/// Compile the given pattern into a PCRE matcher using the current
/// configuration.
///
/// If there was a problem compiling the pattern, then an error is
/// returned.
pub fn build(&self, pattern: &str) -> Result<RegexMatcher, Error> {
let mut builder = self.builder.clone();
if self.case_smart && !has_uppercase_literal(pattern) {
builder.caseless(true);
}
let res = if self.word {
let pattern = format!(r"(?<!\w)(?:{})(?!\w)", pattern);
builder.build(&pattern)
} else {
builder.build(pattern)
};
res.map_err(Error::regex).map(|regex| {
let mut names = HashMap::new();
for (i, name) in regex.capture_names().iter().enumerate() {
if let Some(ref name) = *name {
names.insert(name.to_string(), i);
}
}
RegexMatcher { regex, names }
})
}
/// Enables case insensitive matching.
///
/// If the `utf` option is also set, then Unicode case folding is used
/// to determine case insensitivity. When the `utf` option is not set,
/// then only standard ASCII case insensitivity is considered.
///
/// This option corresponds to the `i` flag.
pub fn caseless(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
self.builder.caseless(yes);
self
}
/// Whether to enable "smart case" or not.
///
/// When smart case is enabled, the builder will automatically enable
/// case insensitive matching based on how the pattern is written. Namely,
/// case insensitive mode is enabled when both of the following things
/// are believed to be true:
///
/// 1. The pattern contains at least one literal character. For example,
/// `a\w` contains a literal (`a`) but `\w` does not.
/// 2. Of the literals in the pattern, none of them are considered to be
/// uppercase according to Unicode. For example, `foo\pL` has no
/// uppercase literals but `Foo\pL` does.
///
/// Note that the implementation of this is not perfect. Namely, `\p{Ll}`
/// will prevent case insensitive matching even though it is part of a meta
/// sequence. This bug will probably never be fixed.
pub fn case_smart(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
self.case_smart = yes;
self
}
/// Enables "dot all" matching.
///
/// When enabled, the `.` metacharacter in the pattern matches any
/// character, include `\n`. When disabled (the default), `.` will match
/// any character except for `\n`.
///
/// This option corresponds to the `s` flag.
pub fn dotall(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
self.builder.dotall(yes);
self
}
/// Enable "extended" mode in the pattern, where whitespace is ignored.
///
/// This option corresponds to the `x` flag.
pub fn extended(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
self.builder.extended(yes);
self
}
/// Enable multiline matching mode.
///
/// When enabled, the `^` and `$` anchors will match both at the beginning
/// and end of a subject string, in addition to matching at the start of
/// a line and the end of a line. When disabled, the `^` and `$` anchors
/// will only match at the beginning and end of a subject string.
///
/// This option corresponds to the `m` flag.
pub fn multi_line(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
self.builder.multi_line(yes);
self
}
/// Enable matching of CRLF as a line terminator.
///
/// When enabled, anchors such as `^` and `$` will match any of the
/// following as a line terminator: `\r`, `\n` or `\r\n`.
///
/// This is disabled by default, in which case, only `\n` is recognized as
/// a line terminator.
pub fn crlf(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
self.builder.crlf(yes);
self
}
/// Require that all matches occur on word boundaries.
///
/// Enabling this option is subtly different than putting `\b` assertions
/// on both sides of your pattern. In particular, a `\b` assertion requires
/// that one side of it match a word character while the other match a
/// non-word character. This option, in contrast, merely requires that
/// one side match a non-word character.
///
/// For example, `\b-2\b` will not match `foo -2 bar` since `-` is not a
/// word character. However, `-2` with this `word` option enabled will
/// match the `-2` in `foo -2 bar`.
pub fn word(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
self.word = yes;
self
}
/// Enable Unicode matching mode.
///
/// When enabled, the following patterns become Unicode aware: `\b`, `\B`,
/// `\d`, `\D`, `\s`, `\S`, `\w`, `\W`.
///
/// When set, this implies UTF matching mode. It is not possible to enable
/// Unicode matching mode without enabling UTF matching mode.
///
/// This is disabled by default.
pub fn ucp(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
self.builder.ucp(yes);
self
}
/// Enable UTF matching mode.
///
/// When enabled, characters are treated as sequences of code units that
/// make up a single codepoint instead of as single bytes. For example,
/// this will cause `.` to match any single UTF-8 encoded codepoint, where
/// as when this is disabled, `.` will any single byte (except for `\n` in
/// both cases, unless "dot all" mode is enabled).
///
/// Note that when UTF matching mode is enabled, every search performed
/// will do a UTF-8 validation check, which can impact performance. The
/// UTF-8 check can be disabled via the `disable_utf_check` option, but it
/// is undefined behavior to enable UTF matching mode and search invalid
/// UTF-8.
///
/// This is disabled by default.
pub fn utf(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
self.builder.utf(yes);
self
}
/// When UTF matching mode is enabled, this will disable the UTF checking
/// that PCRE2 will normally perform automatically. If UTF matching mode
/// is not enabled, then this has no effect.
///
/// UTF checking is enabled by default when UTF matching mode is enabled.
/// If UTF matching mode is enabled and UTF checking is enabled, then PCRE2
/// will return an error if you attempt to search a subject string that is
/// not valid UTF-8.
///
/// # Safety
///
/// It is undefined behavior to disable the UTF check in UTF matching mode
/// and search a subject string that is not valid UTF-8. When the UTF check
/// is disabled, callers must guarantee that the subject string is valid
/// UTF-8.
pub unsafe fn disable_utf_check(&mut self) -> &mut RegexMatcherBuilder {
self.builder.disable_utf_check();
self
}
/// Enable PCRE2's JIT and return an error if it's not available.
///
/// This generally speeds up matching quite a bit. The downside is that it
/// can increase the time it takes to compile a pattern.
///
/// If the JIT isn't available or if JIT compilation returns an error, then
/// regex compilation will fail with the corresponding error.
///
/// This is disabled by default, and always overrides `jit_if_available`.
pub fn jit(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
self.builder.jit(yes);
self
}
/// Enable PCRE2's JIT if it's available.
///
/// This generally speeds up matching quite a bit. The downside is that it
/// can increase the time it takes to compile a pattern.
///
/// If the JIT isn't available or if JIT compilation returns an error,
/// then a debug message with the error will be emitted and the regex will
/// otherwise silently fall back to non-JIT matching.
///
/// This is disabled by default, and always overrides `jit`.
pub fn jit_if_available(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
self.builder.jit_if_available(yes);
self
}
/// Set the maximum size of PCRE2's JIT stack, in bytes. If the JIT is
/// not enabled, then this has no effect.
///
/// When `None` is given, no custom JIT stack will be created, and instead,
/// the default JIT stack is used. When the default is used, its maximum
/// size is 32 KB.
///
/// When this is set, then a new JIT stack will be created with the given
/// maximum size as its limit.
///
/// Increasing the stack size can be useful for larger regular expressions.
///
/// By default, this is set to `None`.
pub fn max_jit_stack_size(
&mut self,
bytes: Option<usize>,
) -> &mut RegexMatcherBuilder {
self.builder.max_jit_stack_size(bytes);
self
}
}
/// An implementation of the `Matcher` trait using PCRE2.
#[derive(Clone, Debug)]
pub struct RegexMatcher {
regex: Regex,
names: HashMap<String, usize>,
}
impl RegexMatcher {
/// Create a new matcher from the given pattern using the default
/// configuration.
pub fn new(pattern: &str) -> Result<RegexMatcher, Error> {
RegexMatcherBuilder::new().build(pattern)
}
}
impl Matcher for RegexMatcher {
type Captures = RegexCaptures;
type Error = Error;
fn find_at(
&self,
haystack: &[u8],
at: usize,
) -> Result<Option<Match>, Error> {
Ok(self
.regex
.find_at(haystack, at)
.map_err(Error::regex)?
.map(|m| Match::new(m.start(), m.end())))
}
fn new_captures(&self) -> Result<RegexCaptures, Error> {
Ok(RegexCaptures::new(self.regex.capture_locations()))
}
fn capture_count(&self) -> usize {
self.regex.captures_len()
}
fn capture_index(&self, name: &str) -> Option<usize> {
self.names.get(name).map(|i| *i)
}
fn try_find_iter<F, E>(
&self,
haystack: &[u8],
mut matched: F,
) -> Result<Result<(), E>, Error>
where
F: FnMut(Match) -> Result<bool, E>,
{
for result in self.regex.find_iter(haystack) {
let m = result.map_err(Error::regex)?;
match matched(Match::new(m.start(), m.end())) {
Ok(true) => continue,
Ok(false) => return Ok(Ok(())),
Err(err) => return Ok(Err(err)),
}
}
Ok(Ok(()))
}
fn captures_at(
&self,
haystack: &[u8],
at: usize,
caps: &mut RegexCaptures,
) -> Result<bool, Error> {
Ok(self
.regex
.captures_read_at(&mut caps.locs, haystack, at)
.map_err(Error::regex)?
.is_some())
}
}
/// Represents the match offsets of each capturing group in a match.
///
/// The first, or `0`th capture group, always corresponds to the entire match
/// and is guaranteed to be present when a match occurs. The next capture
/// group, at index `1`, corresponds to the first capturing group in the regex,
/// ordered by the position at which the left opening parenthesis occurs.
///
/// Note that not all capturing groups are guaranteed to be present in a match.
/// For example, in the regex, `(?P<foo>\w)|(?P<bar>\W)`, only one of `foo`
/// or `bar` will ever be set in any given match.
///
/// In order to access a capture group by name, you'll need to first find the
/// index of the group using the corresponding matcher's `capture_index`
/// method, and then use that index with `RegexCaptures::get`.
#[derive(Clone, Debug)]
pub struct RegexCaptures {
/// Where the locations are stored.
locs: CaptureLocations,
}
impl Captures for RegexCaptures {
fn len(&self) -> usize {
self.locs.len()
}
fn get(&self, i: usize) -> Option<Match> {
self.locs.get(i).map(|(s, e)| Match::new(s, e))
}
}
impl RegexCaptures {
pub(crate) fn new(locs: CaptureLocations) -> RegexCaptures {
RegexCaptures { locs }
}
}
/// Determine whether the pattern contains an uppercase character which should
/// negate the effect of the smart-case option.
///
/// Ideally we would be able to check the AST in order to correctly handle
/// things like '\p{Ll}' and '\p{Lu}' (which should be treated as explicitly
/// cased), but PCRE doesn't expose enough details for that kind of analysis.
/// For now, our 'good enough' solution is to simply perform a semi-naïve
/// scan of the input pattern and ignore all characters following a '\'. The
/// This at least lets us support the most common cases, like 'foo\w' and
/// 'foo\S', in an intuitive manner.
fn has_uppercase_literal(pattern: &str) -> bool {
let mut chars = pattern.chars();
while let Some(c) = chars.next() {
if c == '\\' {
chars.next();
} else if c.is_uppercase() {
return true;
}
}
false
}
#[cfg(test)]
mod tests {
use super::*;
use grep_matcher::{LineMatchKind, Matcher};
// Test that enabling word matches does the right thing and demonstrate
// the difference between it and surrounding the regex in `\b`.
#[test]
fn word() {
let matcher =
RegexMatcherBuilder::new().word(true).build(r"-2").unwrap();
assert!(matcher.is_match(b"abc -2 foo").unwrap());
let matcher =
RegexMatcherBuilder::new().word(false).build(r"\b-2\b").unwrap();
assert!(!matcher.is_match(b"abc -2 foo").unwrap());
}
// Test that enabling CRLF permits `$` to match at the end of a line.
#[test]
fn line_terminator_crlf() {
// Test normal use of `$` with a `\n` line terminator.
let matcher = RegexMatcherBuilder::new()
.multi_line(true)
.build(r"abc$")
.unwrap();
assert!(matcher.is_match(b"abc\n").unwrap());
// Test that `$` doesn't match at `\r\n` boundary normally.
let matcher = RegexMatcherBuilder::new()
.multi_line(true)
.build(r"abc$")
.unwrap();
assert!(!matcher.is_match(b"abc\r\n").unwrap());
// Now check the CRLF handling.
let matcher = RegexMatcherBuilder::new()
.multi_line(true)
.crlf(true)
.build(r"abc$")
.unwrap();
assert!(matcher.is_match(b"abc\r\n").unwrap());
}
// Test that smart case works.
#[test]
fn case_smart() {
let matcher =
RegexMatcherBuilder::new().case_smart(true).build(r"abc").unwrap();
assert!(matcher.is_match(b"ABC").unwrap());
let matcher =
RegexMatcherBuilder::new().case_smart(true).build(r"aBc").unwrap();
assert!(!matcher.is_match(b"ABC").unwrap());
}
// Test that finding candidate lines works as expected.
#[test]
fn candidate_lines() {
fn is_confirmed(m: LineMatchKind) -> bool {
match m {
LineMatchKind::Confirmed(_) => true,
_ => false,
}
}
let matcher = RegexMatcherBuilder::new().build(r"\wfoo\s").unwrap();
let m = matcher.find_candidate_line(b"afoo ").unwrap().unwrap();
assert!(is_confirmed(m));
}
}

31
crates/printer/Cargo.toml Normal file
View File

@@ -0,0 +1,31 @@
[package]
name = "grep-printer"
version = "0.1.3" #:version
authors = ["Andrew Gallant <jamslam@gmail.com>"]
description = """
An implementation of the grep crate's Sink trait that provides standard
printing of search results, similar to grep itself.
"""
documentation = "https://docs.rs/grep-printer"
homepage = "https://github.com/BurntSushi/ripgrep"
repository = "https://github.com/BurntSushi/ripgrep"
readme = "README.md"
keywords = ["grep", "pattern", "print", "printer", "sink"]
license = "Unlicense/MIT"
[features]
default = ["serde1"]
serde1 = ["base64", "serde", "serde_derive", "serde_json"]
[dependencies]
base64 = { version = "0.11.0", optional = true }
bstr = "0.2.0"
grep-matcher = { version = "0.1.2", path = "../grep-matcher" }
grep-searcher = { version = "0.1.4", path = "../grep-searcher" }
termcolor = "1.0.4"
serde = { version = "1.0.77", optional = true }
serde_derive = { version = "1.0.77", optional = true }
serde_json = { version = "1.0.27", optional = true }
[dev-dependencies]
grep-regex = { version = "0.1.3", path = "../grep-regex" }

View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 Andrew Gallant
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

35
crates/printer/README.md Normal file
View File

@@ -0,0 +1,35 @@
grep-printer
------------
Print results from line oriented searching in a human readable, aggregate or
JSON Lines format.
[![Linux build status](https://api.travis-ci.org/BurntSushi/ripgrep.svg)](https://travis-ci.org/BurntSushi/ripgrep)
[![Windows build status](https://ci.appveyor.com/api/projects/status/github/BurntSushi/ripgrep?svg=true)](https://ci.appveyor.com/project/BurntSushi/ripgrep)
[![](https://img.shields.io/crates/v/grep-printer.svg)](https://crates.io/crates/grep-printer)
Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org).
### Documentation
[https://docs.rs/grep-printer](https://docs.rs/grep-printer)
**NOTE:** You probably don't want to use this crate directly. Instead, you
should prefer the facade defined in the
[`grep`](https://docs.rs/grep)
crate.
### Usage
Add this to your `Cargo.toml`:
```toml
[dependencies]
grep-printer = "0.1"
```
and this to your crate root:
```rust
extern crate grep_printer;
```

24
crates/printer/UNLICENSE Normal file
View File

@@ -0,0 +1,24 @@
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>

393
crates/printer/src/color.rs Normal file
View File

@@ -0,0 +1,393 @@
use std::error;
use std::fmt;
use std::str::FromStr;
use termcolor::{Color, ColorSpec, ParseColorError};
/// Returns a default set of color specifications.
///
/// This may change over time, but the color choices are meant to be fairly
/// conservative that work across terminal themes.
///
/// Additional color specifications can be added to the list returned. More
/// recently added specifications override previously added specifications.
pub fn default_color_specs() -> Vec<UserColorSpec> {
vec![
#[cfg(unix)]
"path:fg:magenta".parse().unwrap(),
#[cfg(windows)]
"path:fg:cyan".parse().unwrap(),
"line:fg:green".parse().unwrap(),
"match:fg:red".parse().unwrap(),
"match:style:bold".parse().unwrap(),
]
}
/// An error that can occur when parsing color specifications.
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum ColorError {
/// This occurs when an unrecognized output type is used.
UnrecognizedOutType(String),
/// This occurs when an unrecognized spec type is used.
UnrecognizedSpecType(String),
/// This occurs when an unrecognized color name is used.
UnrecognizedColor(String, String),
/// This occurs when an unrecognized style attribute is used.
UnrecognizedStyle(String),
/// This occurs when the format of a color specification is invalid.
InvalidFormat(String),
}
impl error::Error for ColorError {
fn description(&self) -> &str {
match *self {
ColorError::UnrecognizedOutType(_) => "unrecognized output type",
ColorError::UnrecognizedSpecType(_) => "unrecognized spec type",
ColorError::UnrecognizedColor(_, _) => "unrecognized color name",
ColorError::UnrecognizedStyle(_) => "unrecognized style attribute",
ColorError::InvalidFormat(_) => "invalid color spec",
}
}
}
impl ColorError {
fn from_parse_error(err: ParseColorError) -> ColorError {
ColorError::UnrecognizedColor(
err.invalid().to_string(),
err.to_string(),
)
}
}
impl fmt::Display for ColorError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ColorError::UnrecognizedOutType(ref name) => write!(
f,
"unrecognized output type '{}'. Choose from: \
path, line, column, match.",
name,
),
ColorError::UnrecognizedSpecType(ref name) => write!(
f,
"unrecognized spec type '{}'. Choose from: \
fg, bg, style, none.",
name,
),
ColorError::UnrecognizedColor(_, ref msg) => write!(f, "{}", msg),
ColorError::UnrecognizedStyle(ref name) => write!(
f,
"unrecognized style attribute '{}'. Choose from: \
nobold, bold, nointense, intense, nounderline, \
underline.",
name,
),
ColorError::InvalidFormat(ref original) => write!(
f,
"invalid color spec format: '{}'. Valid format \
is '(path|line|column|match):(fg|bg|style):(value)'.",
original,
),
}
}
}
/// A merged set of color specifications.
///
/// This set of color specifications represents the various color types that
/// are supported by the printers in this crate. A set of color specifications
/// can be created from a sequence of
/// [`UserColorSpec`s](struct.UserColorSpec.html).
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct ColorSpecs {
path: ColorSpec,
line: ColorSpec,
column: ColorSpec,
matched: ColorSpec,
}
/// A single color specification provided by the user.
///
/// ## Format
///
/// The format of a `Spec` is a triple: `{type}:{attribute}:{value}`. Each
/// component is defined as follows:
///
/// * `{type}` can be one of `path`, `line`, `column` or `match`.
/// * `{attribute}` can be one of `fg`, `bg` or `style`. `{attribute}` may also
/// be the special value `none`, in which case, `{value}` can be omitted.
/// * `{value}` is either a color name (for `fg`/`bg`) or a style instruction.
///
/// `{type}` controls which part of the output should be styled.
///
/// When `{attribute}` is `none`, then this should cause any existing style
/// settings to be cleared for the specified `type`.
///
/// `{value}` should be a color when `{attribute}` is `fg` or `bg`, or it
/// should be a style instruction when `{attribute}` is `style`. When
/// `{attribute}` is `none`, `{value}` must be omitted.
///
/// Valid colors are `black`, `blue`, `green`, `red`, `cyan`, `magenta`,
/// `yellow`, `white`. Extended colors can also be specified, and are formatted
/// as `x` (for 256-bit colors) or `x,x,x` (for 24-bit true color), where
/// `x` is a number between 0 and 255 inclusive. `x` may be given as a normal
/// decimal number of a hexadecimal number, where the latter is prefixed by
/// `0x`.
///
/// Valid style instructions are `nobold`, `bold`, `intense`, `nointense`,
/// `underline`, `nounderline`.
///
/// ## Example
///
/// The standard way to build a `UserColorSpec` is to parse it from a string.
/// Once multiple `UserColorSpec`s have been constructed, they can be provided
/// to the standard printer where they will automatically be applied to the
/// output.
///
/// A `UserColorSpec` can also be converted to a `termcolor::ColorSpec`:
///
/// ```rust
/// extern crate grep_printer;
/// extern crate termcolor;
///
/// # fn main() {
/// use termcolor::{Color, ColorSpec};
/// use grep_printer::UserColorSpec;
///
/// let user_spec1: UserColorSpec = "path:fg:blue".parse().unwrap();
/// let user_spec2: UserColorSpec = "match:bg:0xff,0x7f,0x00".parse().unwrap();
///
/// let spec1 = user_spec1.to_color_spec();
/// let spec2 = user_spec2.to_color_spec();
///
/// assert_eq!(spec1.fg(), Some(&Color::Blue));
/// assert_eq!(spec2.bg(), Some(&Color::Rgb(0xFF, 0x7F, 0x00)));
/// # }
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct UserColorSpec {
ty: OutType,
value: SpecValue,
}
impl UserColorSpec {
/// Convert this user provided color specification to a specification that
/// can be used with `termcolor`. This drops the type of this specification
/// (where the type indicates where the color is applied in the standard
/// printer, e.g., to the file path or the line numbers, etc.).
pub fn to_color_spec(&self) -> ColorSpec {
let mut spec = ColorSpec::default();
self.value.merge_into(&mut spec);
spec
}
}
/// The actual value given by the specification.
#[derive(Clone, Debug, Eq, PartialEq)]
enum SpecValue {
None,
Fg(Color),
Bg(Color),
Style(Style),
}
/// The set of configurable portions of ripgrep's output.
#[derive(Clone, Debug, Eq, PartialEq)]
enum OutType {
Path,
Line,
Column,
Match,
}
/// The specification type.
#[derive(Clone, Debug, Eq, PartialEq)]
enum SpecType {
Fg,
Bg,
Style,
None,
}
/// The set of available styles for use in the terminal.
#[derive(Clone, Debug, Eq, PartialEq)]
enum Style {
Bold,
NoBold,
Intense,
NoIntense,
Underline,
NoUnderline,
}
impl ColorSpecs {
/// Create color specifications from a list of user supplied
/// specifications.
pub fn new(specs: &[UserColorSpec]) -> ColorSpecs {
let mut merged = ColorSpecs::default();
for spec in specs {
match spec.ty {
OutType::Path => spec.merge_into(&mut merged.path),
OutType::Line => spec.merge_into(&mut merged.line),
OutType::Column => spec.merge_into(&mut merged.column),
OutType::Match => spec.merge_into(&mut merged.matched),
}
}
merged
}
/// Create a default set of specifications that have color.
///
/// This is distinct from `ColorSpecs`'s `Default` implementation in that
/// this provides a set of default color choices, where as the `Default`
/// implementation provides no color choices.
pub fn default_with_color() -> ColorSpecs {
ColorSpecs::new(&default_color_specs())
}
/// Return the color specification for coloring file paths.
pub fn path(&self) -> &ColorSpec {
&self.path
}
/// Return the color specification for coloring line numbers.
pub fn line(&self) -> &ColorSpec {
&self.line
}
/// Return the color specification for coloring column numbers.
pub fn column(&self) -> &ColorSpec {
&self.column
}
/// Return the color specification for coloring matched text.
pub fn matched(&self) -> &ColorSpec {
&self.matched
}
}
impl UserColorSpec {
/// Merge this spec into the given color specification.
fn merge_into(&self, cspec: &mut ColorSpec) {
self.value.merge_into(cspec);
}
}
impl SpecValue {
/// Merge this spec value into the given color specification.
fn merge_into(&self, cspec: &mut ColorSpec) {
match *self {
SpecValue::None => cspec.clear(),
SpecValue::Fg(ref color) => {
cspec.set_fg(Some(color.clone()));
}
SpecValue::Bg(ref color) => {
cspec.set_bg(Some(color.clone()));
}
SpecValue::Style(ref style) => match *style {
Style::Bold => {
cspec.set_bold(true);
}
Style::NoBold => {
cspec.set_bold(false);
}
Style::Intense => {
cspec.set_intense(true);
}
Style::NoIntense => {
cspec.set_intense(false);
}
Style::Underline => {
cspec.set_underline(true);
}
Style::NoUnderline => {
cspec.set_underline(false);
}
},
}
}
}
impl FromStr for UserColorSpec {
type Err = ColorError;
fn from_str(s: &str) -> Result<UserColorSpec, ColorError> {
let pieces: Vec<&str> = s.split(':').collect();
if pieces.len() <= 1 || pieces.len() > 3 {
return Err(ColorError::InvalidFormat(s.to_string()));
}
let otype: OutType = pieces[0].parse()?;
match pieces[1].parse()? {
SpecType::None => {
Ok(UserColorSpec { ty: otype, value: SpecValue::None })
}
SpecType::Style => {
if pieces.len() < 3 {
return Err(ColorError::InvalidFormat(s.to_string()));
}
let style: Style = pieces[2].parse()?;
Ok(UserColorSpec { ty: otype, value: SpecValue::Style(style) })
}
SpecType::Fg => {
if pieces.len() < 3 {
return Err(ColorError::InvalidFormat(s.to_string()));
}
let color: Color =
pieces[2].parse().map_err(ColorError::from_parse_error)?;
Ok(UserColorSpec { ty: otype, value: SpecValue::Fg(color) })
}
SpecType::Bg => {
if pieces.len() < 3 {
return Err(ColorError::InvalidFormat(s.to_string()));
}
let color: Color =
pieces[2].parse().map_err(ColorError::from_parse_error)?;
Ok(UserColorSpec { ty: otype, value: SpecValue::Bg(color) })
}
}
}
}
impl FromStr for OutType {
type Err = ColorError;
fn from_str(s: &str) -> Result<OutType, ColorError> {
match &*s.to_lowercase() {
"path" => Ok(OutType::Path),
"line" => Ok(OutType::Line),
"column" => Ok(OutType::Column),
"match" => Ok(OutType::Match),
_ => Err(ColorError::UnrecognizedOutType(s.to_string())),
}
}
}
impl FromStr for SpecType {
type Err = ColorError;
fn from_str(s: &str) -> Result<SpecType, ColorError> {
match &*s.to_lowercase() {
"fg" => Ok(SpecType::Fg),
"bg" => Ok(SpecType::Bg),
"style" => Ok(SpecType::Style),
"none" => Ok(SpecType::None),
_ => Err(ColorError::UnrecognizedSpecType(s.to_string())),
}
}
}
impl FromStr for Style {
type Err = ColorError;
fn from_str(s: &str) -> Result<Style, ColorError> {
match &*s.to_lowercase() {
"bold" => Ok(Style::Bold),
"nobold" => Ok(Style::NoBold),
"intense" => Ok(Style::Intense),
"nointense" => Ok(Style::NoIntense),
"underline" => Ok(Style::Underline),
"nounderline" => Ok(Style::NoUnderline),
_ => Err(ColorError::UnrecognizedStyle(s.to_string())),
}
}
}

View File

@@ -0,0 +1,90 @@
use std::io::{self, Write};
use termcolor::{ColorSpec, WriteColor};
/// A writer that counts the number of bytes that have been successfully
/// written.
#[derive(Clone, Debug)]
pub struct CounterWriter<W> {
wtr: W,
count: u64,
total_count: u64,
}
impl<W: Write> CounterWriter<W> {
pub fn new(wtr: W) -> CounterWriter<W> {
CounterWriter { wtr: wtr, count: 0, total_count: 0 }
}
}
impl<W> CounterWriter<W> {
/// Returns the total number of bytes written since construction or the
/// last time `reset` was called.
pub fn count(&self) -> u64 {
self.count
}
/// Returns the total number of bytes written since construction.
pub fn total_count(&self) -> u64 {
self.total_count + self.count
}
/// Resets the number of bytes written to `0`.
pub fn reset_count(&mut self) {
self.total_count += self.count;
self.count = 0;
}
/// Clear resets all counting related state for this writer.
///
/// After this call, the total count of bytes written to the underlying
/// writer is erased and reset.
#[allow(dead_code)]
pub fn clear(&mut self) {
self.count = 0;
self.total_count = 0;
}
#[allow(dead_code)]
pub fn get_ref(&self) -> &W {
&self.wtr
}
pub fn get_mut(&mut self) -> &mut W {
&mut self.wtr
}
pub fn into_inner(self) -> W {
self.wtr
}
}
impl<W: Write> Write for CounterWriter<W> {
fn write(&mut self, buf: &[u8]) -> Result<usize, io::Error> {
let n = self.wtr.write(buf)?;
self.count += n as u64;
Ok(n)
}
fn flush(&mut self) -> Result<(), io::Error> {
self.wtr.flush()
}
}
impl<W: WriteColor> WriteColor for CounterWriter<W> {
fn supports_color(&self) -> bool {
self.wtr.supports_color()
}
fn set_color(&mut self, spec: &ColorSpec) -> io::Result<()> {
self.wtr.set_color(spec)
}
fn reset(&mut self) -> io::Result<()> {
self.wtr.reset()
}
fn is_synchronous(&self) -> bool {
self.wtr.is_synchronous()
}
}

936
crates/printer/src/json.rs Normal file
View File

@@ -0,0 +1,936 @@
use std::io::{self, Write};
use std::path::Path;
use std::time::Instant;
use grep_matcher::{Match, Matcher};
use grep_searcher::{
Searcher, Sink, SinkContext, SinkContextKind, SinkError, SinkFinish,
SinkMatch,
};
use serde_json as json;
use counter::CounterWriter;
use jsont;
use stats::Stats;
/// The configuration for the JSON printer.
///
/// This is manipulated by the JSONBuilder and then referenced by the actual
/// implementation. Once a printer is build, the configuration is frozen and
/// cannot changed.
#[derive(Debug, Clone)]
struct Config {
pretty: bool,
max_matches: Option<u64>,
always_begin_end: bool,
}
impl Default for Config {
fn default() -> Config {
Config { pretty: false, max_matches: None, always_begin_end: false }
}
}
/// A builder for a JSON lines printer.
///
/// The builder permits configuring how the printer behaves. The JSON printer
/// has fewer configuration options than the standard printer because it is
/// a structured format, and the printer always attempts to find the most
/// information possible.
///
/// Some configuration options, such as whether line numbers are included or
/// whether contextual lines are shown, are drawn directly from the
/// `grep_searcher::Searcher`'s configuration.
///
/// Once a `JSON` printer is built, its configuration cannot be changed.
#[derive(Clone, Debug)]
pub struct JSONBuilder {
config: Config,
}
impl JSONBuilder {
/// Return a new builder for configuring the JSON printer.
pub fn new() -> JSONBuilder {
JSONBuilder { config: Config::default() }
}
/// Create a JSON printer that writes results to the given writer.
pub fn build<W: io::Write>(&self, wtr: W) -> JSON<W> {
JSON {
config: self.config.clone(),
wtr: CounterWriter::new(wtr),
matches: vec![],
}
}
/// Print JSON in a pretty printed format.
///
/// Enabling this will no longer produce a "JSON lines" format, in that
/// each JSON object printed may span multiple lines.
///
/// This is disabled by default.
pub fn pretty(&mut self, yes: bool) -> &mut JSONBuilder {
self.config.pretty = yes;
self
}
/// Set the maximum amount of matches that are printed.
///
/// If multi line search is enabled and a match spans multiple lines, then
/// that match is counted exactly once for the purposes of enforcing this
/// limit, regardless of how many lines it spans.
pub fn max_matches(&mut self, limit: Option<u64>) -> &mut JSONBuilder {
self.config.max_matches = limit;
self
}
/// When enabled, the `begin` and `end` messages are always emitted, even
/// when no match is found.
///
/// When disabled, the `begin` and `end` messages are only shown if there
/// is at least one `match` or `context` message.
///
/// This is disabled by default.
pub fn always_begin_end(&mut self, yes: bool) -> &mut JSONBuilder {
self.config.always_begin_end = yes;
self
}
}
/// The JSON printer, which emits results in a JSON lines format.
///
/// This type is generic over `W`, which represents any implementation of
/// the standard library `io::Write` trait.
///
/// # Format
///
/// This section describes the JSON format used by this printer.
///
/// To skip the rigamarole, take a look at the
/// [example](#example)
/// at the end.
///
/// ## Overview
///
/// The format of this printer is the [JSON Lines](http://jsonlines.org/)
/// format. Specifically, this printer emits a sequence of messages, where
/// each message is encoded as a single JSON value on a single line. There are
/// four different types of messages (and this number may expand over time):
///
/// * **begin** - A message that indicates a file is being searched.
/// * **end** - A message the indicates a file is done being searched. This
/// message also include summary statistics about the search.
/// * **match** - A message that indicates a match was found. This includes
/// the text and offsets of the match.
/// * **context** - A message that indicates a contextual line was found.
/// This includes the text of the line, along with any match information if
/// the search was inverted.
///
/// Every message is encoded in the same envelope format, which includes a tag
/// indicating the message type along with an object for the payload:
///
/// ```json
/// {
/// "type": "{begin|end|match|context}",
/// "data": { ... }
/// }
/// ```
///
/// The message itself is encoded in the envelope's `data` key.
///
/// ## Text encoding
///
/// Before describing each message format, we first must briefly discuss text
/// encoding, since it factors into every type of message. In particular, JSON
/// may only be encoded in UTF-8, UTF-16 or UTF-32. For the purposes of this
/// printer, we need only worry about UTF-8. The problem here is that searching
/// is not limited to UTF-8 exclusively, which in turn implies that matches
/// may be reported that contain invalid UTF-8. Moreover, this printer may
/// also print file paths, and the encoding of file paths is itself not
/// guarnateed to be valid UTF-8. Therefore, this printer must deal with the
/// presence of invalid UTF-8 somehow. The printer could silently ignore such
/// things completely, or even lossily transcode invalid UTF-8 to valid UTF-8
/// by replacing all invalid sequences with the Unicode replacement character.
/// However, this would prevent consumers of this format from accessing the
/// original data in a non-lossy way.
///
/// Therefore, this printer will emit valid UTF-8 encoded bytes as normal
/// JSON strings and otherwise base64 encode data that isn't valid UTF-8. To
/// communicate whether this process occurs or not, strings are keyed by the
/// name `text` where as arbitrary bytes are keyed by `bytes`.
///
/// For example, when a path is included in a message, it is formatted like so,
/// if and only if the path is valid UTF-8:
///
/// ```json
/// {
/// "path": {
/// "text": "/home/ubuntu/lib.rs"
/// }
/// }
/// ```
///
/// If instead our path was `/home/ubuntu/lib\xFF.rs`, where the `\xFF` byte
/// makes it invalid UTF-8, the path would instead be encoded like so:
///
/// ```json
/// {
/// "path": {
/// "bytes": "L2hvbWUvdWJ1bnR1L2xpYv8ucnM="
/// }
/// }
/// ```
///
/// This same representation is used for reporting matches as well.
///
/// The printer guarantees that the `text` field is used whenever the
/// underlying bytes are valid UTF-8.
///
/// ## Wire format
///
/// This section documents the wire format emitted by this printer, starting
/// with the four types of messages.
///
/// Each message has its own format, and is contained inside an envelope that
/// indicates the type of message. The envelope has these fields:
///
/// * **type** - A string indicating the type of this message. It may be one
/// of four possible strings: `begin`, `end`, `match` or `context`. This
/// list may expand over time.
/// * **data** - The actual message data. The format of this field depends on
/// the value of `type`. The possible message formats are
/// [`begin`](#message-begin),
/// [`end`](#message-end),
/// [`match`](#message-match),
/// [`context`](#message-context).
///
/// #### Message: **begin**
///
/// This message indicates that a search has begun. It has these fields:
///
/// * **path** - An
/// [arbitrary data object](#object-arbitrary-data)
/// representing the file path corresponding to the search, if one is
/// present. If no file path is available, then this field is `null`.
///
/// #### Message: **end**
///
/// This message indicates that a search has finished. It has these fields:
///
/// * **path** - An
/// [arbitrary data object](#object-arbitrary-data)
/// representing the file path corresponding to the search, if one is
/// present. If no file path is available, then this field is `null`.
/// * **binary_offset** - The absolute offset in the data searched
/// corresponding to the place at which binary data was detected. If no
/// binary data was detected (or if binary detection was disabled), then this
/// field is `null`.
/// * **stats** - A [`stats` object](#object-stats) that contains summary
/// statistics for the previous search.
///
/// #### Message: **match**
///
/// This message indicates that a match has been found. A match generally
/// corresponds to a single line of text, although it may correspond to
/// multiple lines if the search can emit matches over multiple lines. It
/// has these fields:
///
/// * **path** - An
/// [arbitrary data object](#object-arbitrary-data)
/// representing the file path corresponding to the search, if one is
/// present. If no file path is available, then this field is `null`.
/// * **lines** - An
/// [arbitrary data object](#object-arbitrary-data)
/// representing one or more lines contained in this match.
/// * **line_number** - If the searcher has been configured to report line
/// numbers, then this corresponds to the line number of the first line
/// in `lines`. If no line numbers are available, then this is `null`.
/// * **absolute_offset** - The absolute byte offset corresponding to the start
/// of `lines` in the data being searched.
/// * **submatches** - An array of [`submatch` objects](#object-submatch)
/// corresponding to matches in `lines`. The offsets included in each
/// `submatch` correspond to byte offsets into `lines`. (If `lines` is base64
/// encoded, then the byte offsets correspond to the data after base64
/// decoding.) The `submatch` objects are guaranteed to be sorted by their
/// starting offsets. Note that it is possible for this array to be empty,
/// for example, when searching reports inverted matches.
///
/// #### Message: **context**
///
/// This message indicates that a contextual line has been found. A contextual
/// line is a line that doesn't contain a match, but is generally adjacent to
/// a line that does contain a match. The precise way in which contextual lines
/// are reported is determined by the searcher. It has these fields, which are
/// exactly the same fields found in a [`match`](#message-match):
///
/// * **path** - An
/// [arbitrary data object](#object-arbitrary-data)
/// representing the file path corresponding to the search, if one is
/// present. If no file path is available, then this field is `null`.
/// * **lines** - An
/// [arbitrary data object](#object-arbitrary-data)
/// representing one or more lines contained in this context. This includes
/// line terminators, if they're present.
/// * **line_number** - If the searcher has been configured to report line
/// numbers, then this corresponds to the line number of the first line
/// in `lines`. If no line numbers are available, then this is `null`.
/// * **absolute_offset** - The absolute byte offset corresponding to the start
/// of `lines` in the data being searched.
/// * **submatches** - An array of [`submatch` objects](#object-submatch)
/// corresponding to matches in `lines`. The offsets included in each
/// `submatch` correspond to byte offsets into `lines`. (If `lines` is base64
/// encoded, then the byte offsets correspond to the data after base64
/// decoding.) The `submatch` objects are guaranteed to be sorted by
/// their starting offsets. Note that it is possible for this array to be
/// non-empty, for example, when searching reports inverted matches such that
/// the original matcher could match things in the contextual lines.
///
/// #### Object: **submatch**
///
/// This object describes submatches found within `match` or `context`
/// messages. The `start` and `end` fields indicate the half-open interval on
/// which the match occurs (`start` is included, but `end` is not). It is
/// guaranteed that `start <= end`. It has these fields:
///
/// * **match** - An
/// [arbitrary data object](#object-arbitrary-data)
/// corresponding to the text in this submatch.
/// * **start** - A byte offset indicating the start of this match. This offset
/// is generally reported in terms of the parent object's data. For example,
/// the `lines` field in the
/// [`match`](#message-match) or [`context`](#message-context)
/// messages.
/// * **end** - A byte offset indicating the end of this match. This offset
/// is generally reported in terms of the parent object's data. For example,
/// the `lines` field in the
/// [`match`](#message-match) or [`context`](#message-context)
/// messages.
///
/// #### Object: **stats**
///
/// This object is included in messages and contains summary statistics about
/// a search. It has these fields:
///
/// * **elapsed** - A [`duration` object](#object-duration) describing the
/// length of time that elapsed while performing the search.
/// * **searches** - The number of searches that have run. For this printer,
/// this value is always `1`. (Implementations may emit additional message
/// types that use this same `stats` object that represents summary
/// statistics over multiple searches.)
/// * **searches_with_match** - The number of searches that have run that have
/// found at least one match. This is never more than `searches`.
/// * **bytes_searched** - The total number of bytes that have been searched.
/// * **bytes_printed** - The total number of bytes that have been printed.
/// This includes everything emitted by this printer.
/// * **matched_lines** - The total number of lines that participated in a
/// match. When matches may contain multiple lines, then this includes every
/// line that is part of every match.
/// * **matches** - The total number of matches. There may be multiple matches
/// per line. When matches may contain multiple lines, each match is counted
/// only once, regardless of how many lines it spans.
///
/// #### Object: **duration**
///
/// This object includes a few fields for describing a duration. Two of its
/// fields, `secs` and `nanos`, can be combined to give nanosecond precision
/// on systems that support it. It has these fields:
///
/// * **secs** - A whole number of seconds indicating the length of this
/// duration.
/// * **nanos** - A fractional part of this duration represent by nanoseconds.
/// If nanosecond precision isn't supported, then this is typically rounded
/// up to the nearest number of nanoseconds.
/// * **human** - A human readable string describing the length of the
/// duration. The format of the string is itself unspecified.
///
/// #### Object: **arbitrary data**
///
/// This object is used whenever arbitrary data needs to be represented as a
/// JSON value. This object contains two fields, where generally only one of
/// the fields is present:
///
/// * **text** - A normal JSON string that is UTF-8 encoded. This field is
/// populated if and only if the underlying data is valid UTF-8.
/// * **bytes** - A normal JSON string that is a base64 encoding of the
/// underlying bytes.
///
/// More information on the motivation for this representation can be seen in
/// the section [text encoding](#text-encoding) above.
///
/// ## Example
///
/// This section shows a small example that includes all message types.
///
/// Here's the file we want to search, located at `/home/andrew/sherlock`:
///
/// ```text
/// For the Doctor Watsons of this world, as opposed to the Sherlock
/// Holmeses, success in the province of detective work must always
/// be, to a very large extent, the result of luck. Sherlock Holmes
/// can extract a clew from a wisp of straw or a flake of cigar ash;
/// but Doctor Watson has to have it taken out for him and dusted,
/// and exhibited clearly, with a label attached.
/// ```
///
/// Searching for `Watson` with a `before_context` of `1` with line numbers
/// enabled shows something like this using the standard printer:
///
/// ```text
/// sherlock:1:For the Doctor Watsons of this world, as opposed to the Sherlock
/// --
/// sherlock-4-can extract a clew from a wisp of straw or a flake of cigar ash;
/// sherlock:5:but Doctor Watson has to have it taken out for him and dusted,
/// ```
///
/// Here's what the same search looks like using the JSON wire format described
/// above, where in we show semi-prettified JSON (instead of a strict JSON
/// Lines format), for illustrative purposes:
///
/// ```json
/// {
/// "type": "begin",
/// "data": {
/// "path": {"text": "/home/andrew/sherlock"}}
/// }
/// }
/// {
/// "type": "match",
/// "data": {
/// "path": {"text": "/home/andrew/sherlock"},
/// "lines": {"text": "For the Doctor Watsons of this world, as opposed to the Sherlock\n"},
/// "line_number": 1,
/// "absolute_offset": 0,
/// "submatches": [
/// {"match": {"text": "Watson"}, "start": 15, "end": 21}
/// ]
/// }
/// }
/// {
/// "type": "context",
/// "data": {
/// "path": {"text": "/home/andrew/sherlock"},
/// "lines": {"text": "can extract a clew from a wisp of straw or a flake of cigar ash;\n"},
/// "line_number": 4,
/// "absolute_offset": 193,
/// "submatches": []
/// }
/// }
/// {
/// "type": "match",
/// "data": {
/// "path": {"text": "/home/andrew/sherlock"},
/// "lines": {"text": "but Doctor Watson has to have it taken out for him and dusted,\n"},
/// "line_number": 5,
/// "absolute_offset": 258,
/// "submatches": [
/// {"match": {"text": "Watson"}, "start": 11, "end": 17}
/// ]
/// }
/// }
/// {
/// "type": "end",
/// "data": {
/// "path": {"text": "/home/andrew/sherlock"},
/// "binary_offset": null,
/// "stats": {
/// "elapsed": {"secs": 0, "nanos": 36296, "human": "0.0000s"},
/// "searches": 1,
/// "searches_with_match": 1,
/// "bytes_searched": 367,
/// "bytes_printed": 1151,
/// "matched_lines": 2,
/// "matches": 2
/// }
/// }
/// }
/// ```
#[derive(Debug)]
pub struct JSON<W> {
config: Config,
wtr: CounterWriter<W>,
matches: Vec<Match>,
}
impl<W: io::Write> JSON<W> {
/// Return a JSON lines printer with a default configuration that writes
/// matches to the given writer.
pub fn new(wtr: W) -> JSON<W> {
JSONBuilder::new().build(wtr)
}
/// Return an implementation of `Sink` for the JSON printer.
///
/// This does not associate the printer with a file path, which means this
/// implementation will never print a file path along with the matches.
pub fn sink<'s, M: Matcher>(
&'s mut self,
matcher: M,
) -> JSONSink<'static, 's, M, W> {
JSONSink {
matcher: matcher,
json: self,
path: None,
start_time: Instant::now(),
match_count: 0,
after_context_remaining: 0,
binary_byte_offset: None,
begin_printed: false,
stats: Stats::new(),
}
}
/// Return an implementation of `Sink` associated with a file path.
///
/// When the printer is associated with a path, then it may, depending on
/// its configuration, print the path along with the matches found.
pub fn sink_with_path<'p, 's, M, P>(
&'s mut self,
matcher: M,
path: &'p P,
) -> JSONSink<'p, 's, M, W>
where
M: Matcher,
P: ?Sized + AsRef<Path>,
{
JSONSink {
matcher: matcher,
json: self,
path: Some(path.as_ref()),
start_time: Instant::now(),
match_count: 0,
after_context_remaining: 0,
binary_byte_offset: None,
begin_printed: false,
stats: Stats::new(),
}
}
/// Write the given message followed by a new line. The new line is
/// determined from the configuration of the given searcher.
fn write_message(&mut self, message: &jsont::Message) -> io::Result<()> {
if self.config.pretty {
json::to_writer_pretty(&mut self.wtr, message)?;
} else {
json::to_writer(&mut self.wtr, message)?;
}
self.wtr.write(&[b'\n'])?;
Ok(())
}
}
impl<W> JSON<W> {
/// Returns true if and only if this printer has written at least one byte
/// to the underlying writer during any of the previous searches.
pub fn has_written(&self) -> bool {
self.wtr.total_count() > 0
}
/// Return a mutable reference to the underlying writer.
pub fn get_mut(&mut self) -> &mut W {
self.wtr.get_mut()
}
/// Consume this printer and return back ownership of the underlying
/// writer.
pub fn into_inner(self) -> W {
self.wtr.into_inner()
}
}
/// An implementation of `Sink` associated with a matcher and an optional file
/// path for the JSON printer.
///
/// This type is generic over a few type parameters:
///
/// * `'p` refers to the lifetime of the file path, if one is provided. When
/// no file path is given, then this is `'static`.
/// * `'s` refers to the lifetime of the
/// [`JSON`](struct.JSON.html)
/// printer that this type borrows.
/// * `M` refers to the type of matcher used by
/// `grep_searcher::Searcher` that is reporting results to this sink.
/// * `W` refers to the underlying writer that this printer is writing its
/// output to.
#[derive(Debug)]
pub struct JSONSink<'p, 's, M: Matcher, W: 's> {
matcher: M,
json: &'s mut JSON<W>,
path: Option<&'p Path>,
start_time: Instant,
match_count: u64,
after_context_remaining: u64,
binary_byte_offset: Option<u64>,
begin_printed: bool,
stats: Stats,
}
impl<'p, 's, M: Matcher, W: io::Write> JSONSink<'p, 's, M, W> {
/// Returns true if and only if this printer received a match in the
/// previous search.
///
/// This is unaffected by the result of searches before the previous
/// search.
pub fn has_match(&self) -> bool {
self.match_count > 0
}
/// Return the total number of matches reported to this sink.
///
/// This corresponds to the number of times `Sink::matched` is called.
pub fn match_count(&self) -> u64 {
self.match_count
}
/// If binary data was found in the previous search, this returns the
/// offset at which the binary data was first detected.
///
/// The offset returned is an absolute offset relative to the entire
/// set of bytes searched.
///
/// This is unaffected by the result of searches before the previous
/// search. e.g., If the search prior to the previous search found binary
/// data but the previous search found no binary data, then this will
/// return `None`.
pub fn binary_byte_offset(&self) -> Option<u64> {
self.binary_byte_offset
}
/// Return a reference to the stats produced by the printer for all
/// searches executed on this sink.
pub fn stats(&self) -> &Stats {
&self.stats
}
/// Execute the matcher over the given bytes and record the match
/// locations if the current configuration demands match granularity.
fn record_matches(&mut self, bytes: &[u8]) -> io::Result<()> {
self.json.matches.clear();
// If printing requires knowing the location of each individual match,
// then compute and stored those right now for use later. While this
// adds an extra copy for storing the matches, we do amortize the
// allocation for it and this greatly simplifies the printing logic to
// the extent that it's easy to ensure that we never do more than
// one search to find the matches.
let matches = &mut self.json.matches;
self.matcher
.find_iter(bytes, |m| {
matches.push(m);
true
})
.map_err(io::Error::error_message)?;
// Don't report empty matches appearing at the end of the bytes.
if !matches.is_empty()
&& matches.last().unwrap().is_empty()
&& matches.last().unwrap().start() >= bytes.len()
{
matches.pop().unwrap();
}
Ok(())
}
/// Returns true if this printer should quit.
///
/// This implements the logic for handling quitting after seeing a certain
/// amount of matches. In most cases, the logic is simple, but we must
/// permit all "after" contextual lines to print after reaching the limit.
fn should_quit(&self) -> bool {
let limit = match self.json.config.max_matches {
None => return false,
Some(limit) => limit,
};
if self.match_count < limit {
return false;
}
self.after_context_remaining == 0
}
/// Write the "begin" message.
fn write_begin_message(&mut self) -> io::Result<()> {
if self.begin_printed {
return Ok(());
}
let msg = jsont::Message::Begin(jsont::Begin { path: self.path });
self.json.write_message(&msg)?;
self.begin_printed = true;
Ok(())
}
}
impl<'p, 's, M: Matcher, W: io::Write> Sink for JSONSink<'p, 's, M, W> {
type Error = io::Error;
fn matched(
&mut self,
searcher: &Searcher,
mat: &SinkMatch,
) -> Result<bool, io::Error> {
self.write_begin_message()?;
self.match_count += 1;
self.after_context_remaining = searcher.after_context() as u64;
self.record_matches(mat.bytes())?;
self.stats.add_matches(self.json.matches.len() as u64);
self.stats.add_matched_lines(mat.lines().count() as u64);
let submatches = SubMatches::new(mat.bytes(), &self.json.matches);
let msg = jsont::Message::Match(jsont::Match {
path: self.path,
lines: mat.bytes(),
line_number: mat.line_number(),
absolute_offset: mat.absolute_byte_offset(),
submatches: submatches.as_slice(),
});
self.json.write_message(&msg)?;
Ok(!self.should_quit())
}
fn context(
&mut self,
searcher: &Searcher,
ctx: &SinkContext,
) -> Result<bool, io::Error> {
self.write_begin_message()?;
self.json.matches.clear();
if ctx.kind() == &SinkContextKind::After {
self.after_context_remaining =
self.after_context_remaining.saturating_sub(1);
}
let submatches = if searcher.invert_match() {
self.record_matches(ctx.bytes())?;
SubMatches::new(ctx.bytes(), &self.json.matches)
} else {
SubMatches::empty()
};
let msg = jsont::Message::Context(jsont::Context {
path: self.path,
lines: ctx.bytes(),
line_number: ctx.line_number(),
absolute_offset: ctx.absolute_byte_offset(),
submatches: submatches.as_slice(),
});
self.json.write_message(&msg)?;
Ok(!self.should_quit())
}
fn begin(&mut self, _searcher: &Searcher) -> Result<bool, io::Error> {
self.json.wtr.reset_count();
self.start_time = Instant::now();
self.match_count = 0;
self.after_context_remaining = 0;
self.binary_byte_offset = None;
if self.json.config.max_matches == Some(0) {
return Ok(false);
}
if !self.json.config.always_begin_end {
return Ok(true);
}
self.write_begin_message()?;
Ok(true)
}
fn finish(
&mut self,
_searcher: &Searcher,
finish: &SinkFinish,
) -> Result<(), io::Error> {
if !self.begin_printed {
return Ok(());
}
self.binary_byte_offset = finish.binary_byte_offset();
self.stats.add_elapsed(self.start_time.elapsed());
self.stats.add_searches(1);
if self.match_count > 0 {
self.stats.add_searches_with_match(1);
}
self.stats.add_bytes_searched(finish.byte_count());
self.stats.add_bytes_printed(self.json.wtr.count());
let msg = jsont::Message::End(jsont::End {
path: self.path,
binary_offset: finish.binary_byte_offset(),
stats: self.stats.clone(),
});
self.json.write_message(&msg)?;
Ok(())
}
}
/// SubMatches represents a set of matches in a contiguous range of bytes.
///
/// A simpler representation for this would just simply be `Vec<SubMatch>`,
/// but the common case is exactly one match per range of bytes, which we
/// specialize here using a fixed size array without any allocation.
enum SubMatches<'a> {
Empty,
Small([jsont::SubMatch<'a>; 1]),
Big(Vec<jsont::SubMatch<'a>>),
}
impl<'a> SubMatches<'a> {
/// Create a new set of match ranges from a set of matches and the
/// corresponding bytes that those matches apply to.
fn new(bytes: &'a [u8], matches: &[Match]) -> SubMatches<'a> {
if matches.len() == 1 {
let mat = matches[0];
SubMatches::Small([jsont::SubMatch {
m: &bytes[mat],
start: mat.start(),
end: mat.end(),
}])
} else {
let mut match_ranges = vec![];
for &mat in matches {
match_ranges.push(jsont::SubMatch {
m: &bytes[mat],
start: mat.start(),
end: mat.end(),
});
}
SubMatches::Big(match_ranges)
}
}
/// Create an empty set of match ranges.
fn empty() -> SubMatches<'static> {
SubMatches::Empty
}
/// Return this set of match ranges as a slice.
fn as_slice(&self) -> &[jsont::SubMatch] {
match *self {
SubMatches::Empty => &[],
SubMatches::Small(ref x) => x,
SubMatches::Big(ref x) => x,
}
}
}
#[cfg(test)]
mod tests {
use grep_matcher::LineTerminator;
use grep_regex::{RegexMatcher, RegexMatcherBuilder};
use grep_searcher::SearcherBuilder;
use super::{JSONBuilder, JSON};
const SHERLOCK: &'static [u8] = b"\
For the Doctor Watsons of this world, as opposed to the Sherlock
Holmeses, success in the province of detective work must always
be, to a very large extent, the result of luck. Sherlock Holmes
can extract a clew from a wisp of straw or a flake of cigar ash;
but Doctor Watson has to have it taken out for him and dusted,
and exhibited clearly, with a label attached.
";
fn printer_contents(printer: &mut JSON<Vec<u8>>) -> String {
String::from_utf8(printer.get_mut().to_owned()).unwrap()
}
#[test]
fn binary_detection() {
use grep_searcher::BinaryDetection;
const BINARY: &'static [u8] = b"\
For the Doctor Watsons of this world, as opposed to the Sherlock
Holmeses, success in the province of detective work must always
be, to a very large extent, the result of luck. Sherlock Holmes
can extract a clew \x00 from a wisp of straw or a flake of cigar ash;
but Doctor Watson has to have it taken out for him and dusted,
and exhibited clearly, with a label attached.\
";
let matcher = RegexMatcher::new(r"Watson").unwrap();
let mut printer = JSONBuilder::new().build(vec![]);
SearcherBuilder::new()
.binary_detection(BinaryDetection::quit(b'\x00'))
.heap_limit(Some(80))
.build()
.search_reader(&matcher, BINARY, printer.sink(&matcher))
.unwrap();
let got = printer_contents(&mut printer);
assert_eq!(got.lines().count(), 3);
let last = got.lines().last().unwrap();
assert!(last.contains(r#""binary_offset":212,"#));
}
#[test]
fn max_matches() {
let matcher = RegexMatcher::new(r"Watson").unwrap();
let mut printer =
JSONBuilder::new().max_matches(Some(1)).build(vec![]);
SearcherBuilder::new()
.build()
.search_reader(&matcher, SHERLOCK, printer.sink(&matcher))
.unwrap();
let got = printer_contents(&mut printer);
assert_eq!(got.lines().count(), 3);
}
#[test]
fn no_match() {
let matcher = RegexMatcher::new(r"DOES NOT MATCH").unwrap();
let mut printer = JSONBuilder::new().build(vec![]);
SearcherBuilder::new()
.build()
.search_reader(&matcher, SHERLOCK, printer.sink(&matcher))
.unwrap();
let got = printer_contents(&mut printer);
assert!(got.is_empty());
}
#[test]
fn always_begin_end_no_match() {
let matcher = RegexMatcher::new(r"DOES NOT MATCH").unwrap();
let mut printer =
JSONBuilder::new().always_begin_end(true).build(vec![]);
SearcherBuilder::new()
.build()
.search_reader(&matcher, SHERLOCK, printer.sink(&matcher))
.unwrap();
let got = printer_contents(&mut printer);
assert_eq!(got.lines().count(), 2);
assert!(got.contains("begin") && got.contains("end"));
}
#[test]
fn missing_crlf() {
let haystack = "test\r\n".as_bytes();
let matcher = RegexMatcherBuilder::new().build("test").unwrap();
let mut printer = JSONBuilder::new().build(vec![]);
SearcherBuilder::new()
.build()
.search_reader(&matcher, haystack, printer.sink(&matcher))
.unwrap();
let got = printer_contents(&mut printer);
assert_eq!(got.lines().count(), 3);
assert!(
got.lines().nth(1).unwrap().contains(r"test\r\n"),
r"missing 'test\r\n' in '{}'",
got.lines().nth(1).unwrap(),
);
let matcher =
RegexMatcherBuilder::new().crlf(true).build("test").unwrap();
let mut printer = JSONBuilder::new().build(vec![]);
SearcherBuilder::new()
.line_terminator(LineTerminator::crlf())
.build()
.search_reader(&matcher, haystack, printer.sink(&matcher))
.unwrap();
let got = printer_contents(&mut printer);
assert_eq!(got.lines().count(), 3);
assert!(
got.lines().nth(1).unwrap().contains(r"test\r\n"),
r"missing 'test\r\n' in '{}'",
got.lines().nth(1).unwrap(),
);
}
}

143
crates/printer/src/jsont.rs Normal file
View File

@@ -0,0 +1,143 @@
// This module defines the types we use for JSON serialization. We specifically
// omit deserialization, partially because there isn't a clear use case for
// them at this time, but also because deserialization will complicate things.
// Namely, the types below are designed in a way that permits JSON
// serialization with little or no allocation. Allocation is often quite
// convenient for deserialization however, so these types would become a bit
// more complex.
use std::borrow::Cow;
use std::path::Path;
use std::str;
use base64;
use serde::{Serialize, Serializer};
use stats::Stats;
#[derive(Serialize)]
#[serde(tag = "type", content = "data")]
#[serde(rename_all = "snake_case")]
pub enum Message<'a> {
Begin(Begin<'a>),
End(End<'a>),
Match(Match<'a>),
Context(Context<'a>),
}
#[derive(Serialize)]
pub struct Begin<'a> {
#[serde(serialize_with = "ser_path")]
pub path: Option<&'a Path>,
}
#[derive(Serialize)]
pub struct End<'a> {
#[serde(serialize_with = "ser_path")]
pub path: Option<&'a Path>,
pub binary_offset: Option<u64>,
pub stats: Stats,
}
#[derive(Serialize)]
pub struct Match<'a> {
#[serde(serialize_with = "ser_path")]
pub path: Option<&'a Path>,
#[serde(serialize_with = "ser_bytes")]
pub lines: &'a [u8],
pub line_number: Option<u64>,
pub absolute_offset: u64,
pub submatches: &'a [SubMatch<'a>],
}
#[derive(Serialize)]
pub struct Context<'a> {
#[serde(serialize_with = "ser_path")]
pub path: Option<&'a Path>,
#[serde(serialize_with = "ser_bytes")]
pub lines: &'a [u8],
pub line_number: Option<u64>,
pub absolute_offset: u64,
pub submatches: &'a [SubMatch<'a>],
}
#[derive(Serialize)]
pub struct SubMatch<'a> {
#[serde(rename = "match")]
#[serde(serialize_with = "ser_bytes")]
pub m: &'a [u8],
pub start: usize,
pub end: usize,
}
/// Data represents things that look like strings, but may actually not be
/// valid UTF-8. To handle this, `Data` is serialized as an object with one
/// of two keys: `text` (for valid UTF-8) or `bytes` (for invalid UTF-8).
///
/// The happy path is valid UTF-8, which streams right through as-is, since
/// it is natively supported by JSON. When invalid UTF-8 is found, then it is
/// represented as arbitrary bytes and base64 encoded.
#[derive(Clone, Debug, Hash, PartialEq, Eq, Serialize)]
#[serde(untagged)]
enum Data<'a> {
Text {
text: Cow<'a, str>,
},
Bytes {
#[serde(serialize_with = "to_base64")]
bytes: &'a [u8],
},
}
impl<'a> Data<'a> {
fn from_bytes(bytes: &[u8]) -> Data {
match str::from_utf8(bytes) {
Ok(text) => Data::Text { text: Cow::Borrowed(text) },
Err(_) => Data::Bytes { bytes },
}
}
#[cfg(unix)]
fn from_path(path: &Path) -> Data {
use std::os::unix::ffi::OsStrExt;
match path.to_str() {
Some(text) => Data::Text { text: Cow::Borrowed(text) },
None => Data::Bytes { bytes: path.as_os_str().as_bytes() },
}
}
#[cfg(not(unix))]
fn from_path(path: &Path) -> Data {
// Using lossy conversion means some paths won't round trip precisely,
// but it's not clear what we should actually do. Serde rejects
// non-UTF-8 paths, and OsStr's are serialized as a sequence of UTF-16
// code units on Windows. Neither seem appropriate for this use case,
// so we do the easy thing for now.
Data::Text { text: path.to_string_lossy() }
}
}
fn to_base64<T, S>(bytes: T, ser: S) -> Result<S::Ok, S::Error>
where
T: AsRef<[u8]>,
S: Serializer,
{
ser.serialize_str(&base64::encode(&bytes))
}
fn ser_bytes<T, S>(bytes: T, ser: S) -> Result<S::Ok, S::Error>
where
T: AsRef<[u8]>,
S: Serializer,
{
Data::from_bytes(bytes.as_ref()).serialize(ser)
}
fn ser_path<P, S>(path: &Option<P>, ser: S) -> Result<S::Ok, S::Error>
where
P: AsRef<Path>,
S: Serializer,
{
path.as_ref().map(|p| Data::from_path(p.as_ref())).serialize(ser)
}

107
crates/printer/src/lib.rs Normal file
View File

@@ -0,0 +1,107 @@
/*!
This crate provides featureful and fast printers that interoperate with the
[`grep-searcher`](https://docs.rs/grep-searcher)
crate.
# Brief overview
The [`Standard`](struct.Standard.html) printer shows results in a human
readable format, and is modeled after the formats used by standard grep-like
tools. Features include, but are not limited to, cross platform terminal
coloring, search & replace, multi-line result handling and reporting summary
statistics.
The [`JSON`](struct.JSON.html) printer shows results in a machine readable
format. To facilitate a stream of search results, the format uses
[JSON Lines](http://jsonlines.org/)
by emitting a series of messages as search results are found.
The [`Summary`](struct.Summary.html) printer shows *aggregate* results for a
single search in a human readable format, and is modeled after similar formats
found in standard grep-like tools. This printer is useful for showing the total
number of matches and/or printing file paths that either contain or don't
contain matches.
# Example
This example shows how to create a "standard" printer and execute a search.
```
extern crate grep_regex;
extern crate grep_printer;
extern crate grep_searcher;
use std::error::Error;
use grep_regex::RegexMatcher;
use grep_printer::Standard;
use grep_searcher::Searcher;
const SHERLOCK: &'static [u8] = b"\
For the Doctor Watsons of this world, as opposed to the Sherlock
Holmeses, success in the province of detective work must always
be, to a very large extent, the result of luck. Sherlock Holmes
can extract a clew from a wisp of straw or a flake of cigar ash;
but Doctor Watson has to have it taken out for him and dusted,
and exhibited clearly, with a label attached.
";
# fn main() { example().unwrap(); }
fn example() -> Result<(), Box<Error>> {
let matcher = RegexMatcher::new(r"Sherlock")?;
let mut printer = Standard::new_no_color(vec![]);
Searcher::new().search_slice(&matcher, SHERLOCK, printer.sink(&matcher))?;
// into_inner gives us back the underlying writer we provided to
// new_no_color, which is wrapped in a termcolor::NoColor. Thus, a second
// into_inner gives us back the actual buffer.
let output = String::from_utf8(printer.into_inner().into_inner())?;
let expected = "\
1:For the Doctor Watsons of this world, as opposed to the Sherlock
3:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(output, expected);
Ok(())
}
```
*/
#![deny(missing_docs)]
#[cfg(feature = "serde1")]
extern crate base64;
extern crate bstr;
extern crate grep_matcher;
#[cfg(test)]
extern crate grep_regex;
extern crate grep_searcher;
#[cfg(feature = "serde1")]
extern crate serde;
#[cfg(feature = "serde1")]
#[macro_use]
extern crate serde_derive;
#[cfg(feature = "serde1")]
extern crate serde_json;
extern crate termcolor;
pub use color::{default_color_specs, ColorError, ColorSpecs, UserColorSpec};
#[cfg(feature = "serde1")]
pub use json::{JSONBuilder, JSONSink, JSON};
pub use standard::{Standard, StandardBuilder, StandardSink};
pub use stats::Stats;
pub use summary::{Summary, SummaryBuilder, SummaryKind, SummarySink};
pub use util::PrinterPath;
#[macro_use]
mod macros;
mod color;
mod counter;
#[cfg(feature = "serde1")]
mod json;
#[cfg(feature = "serde1")]
mod jsont;
mod standard;
mod stats;
mod summary;
mod util;

View File

@@ -0,0 +1,24 @@
/// Like assert_eq, but nicer output for long strings.
#[cfg(test)]
#[macro_export]
macro_rules! assert_eq_printed {
($expected:expr, $got:expr) => {
let expected = &*$expected;
let got = &*$got;
if expected != got {
panic!("
printed outputs differ!
expected:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
{}
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
got:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
{}
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
", expected, got);
}
}
}

File diff suppressed because it is too large Load Diff

147
crates/printer/src/stats.rs Normal file
View File

@@ -0,0 +1,147 @@
use std::ops::{Add, AddAssign};
use std::time::Duration;
use util::NiceDuration;
/// Summary statistics produced at the end of a search.
///
/// When statistics are reported by a printer, they correspond to all searches
/// executed with that printer.
#[derive(Clone, Debug, Default, PartialEq, Eq)]
#[cfg_attr(feature = "serde1", derive(Serialize))]
pub struct Stats {
elapsed: NiceDuration,
searches: u64,
searches_with_match: u64,
bytes_searched: u64,
bytes_printed: u64,
matched_lines: u64,
matches: u64,
}
impl Add for Stats {
type Output = Stats;
fn add(self, rhs: Stats) -> Stats {
self + &rhs
}
}
impl<'a> Add<&'a Stats> for Stats {
type Output = Stats;
fn add(self, rhs: &'a Stats) -> Stats {
Stats {
elapsed: NiceDuration(self.elapsed.0 + rhs.elapsed.0),
searches: self.searches + rhs.searches,
searches_with_match: self.searches_with_match
+ rhs.searches_with_match,
bytes_searched: self.bytes_searched + rhs.bytes_searched,
bytes_printed: self.bytes_printed + rhs.bytes_printed,
matched_lines: self.matched_lines + rhs.matched_lines,
matches: self.matches + rhs.matches,
}
}
}
impl AddAssign for Stats {
fn add_assign(&mut self, rhs: Stats) {
*self += &rhs;
}
}
impl<'a> AddAssign<&'a Stats> for Stats {
fn add_assign(&mut self, rhs: &'a Stats) {
self.elapsed.0 += rhs.elapsed.0;
self.searches += rhs.searches;
self.searches_with_match += rhs.searches_with_match;
self.bytes_searched += rhs.bytes_searched;
self.bytes_printed += rhs.bytes_printed;
self.matched_lines += rhs.matched_lines;
self.matches += rhs.matches;
}
}
impl Stats {
/// Return a new value for tracking aggregate statistics across searches.
///
/// All statistics are set to `0`.
pub fn new() -> Stats {
Stats::default()
}
/// Return the total amount of time elapsed.
pub fn elapsed(&self) -> Duration {
self.elapsed.0
}
/// Return the total number of searches executed.
pub fn searches(&self) -> u64 {
self.searches
}
/// Return the total number of searches that found at least one match.
pub fn searches_with_match(&self) -> u64 {
self.searches_with_match
}
/// Return the total number of bytes searched.
pub fn bytes_searched(&self) -> u64 {
self.bytes_searched
}
/// Return the total number of bytes printed.
pub fn bytes_printed(&self) -> u64 {
self.bytes_printed
}
/// Return the total number of lines that participated in a match.
///
/// When matches may contain multiple lines then this includes every line
/// that is part of every match.
pub fn matched_lines(&self) -> u64 {
self.matched_lines
}
/// Return the total number of matches.
///
/// There may be multiple matches per line.
pub fn matches(&self) -> u64 {
self.matches
}
/// Add to the elapsed time.
pub fn add_elapsed(&mut self, duration: Duration) {
self.elapsed.0 += duration;
}
/// Add to the number of searches executed.
pub fn add_searches(&mut self, n: u64) {
self.searches += n;
}
/// Add to the number of searches that found at least one match.
pub fn add_searches_with_match(&mut self, n: u64) {
self.searches_with_match += n;
}
/// Add to the total number of bytes searched.
pub fn add_bytes_searched(&mut self, n: u64) {
self.bytes_searched += n;
}
/// Add to the total number of bytes printed.
pub fn add_bytes_printed(&mut self, n: u64) {
self.bytes_printed += n;
}
/// Add to the total number of lines that participated in a match.
pub fn add_matched_lines(&mut self, n: u64) {
self.matched_lines += n;
}
/// Add to the total number of matches.
pub fn add_matches(&mut self, n: u64) {
self.matches += n;
}
}

File diff suppressed because it is too large Load Diff

359
crates/printer/src/util.rs Normal file
View File

@@ -0,0 +1,359 @@
use std::borrow::Cow;
use std::fmt;
use std::io;
use std::path::Path;
use std::time;
use bstr::{ByteSlice, ByteVec};
use grep_matcher::{Captures, LineTerminator, Match, Matcher};
use grep_searcher::{
LineIter, SinkContext, SinkContextKind, SinkError, SinkMatch,
};
#[cfg(feature = "serde1")]
use serde::{Serialize, Serializer};
/// A type for handling replacements while amortizing allocation.
pub struct Replacer<M: Matcher> {
space: Option<Space<M>>,
}
struct Space<M: Matcher> {
/// The place to store capture locations.
caps: M::Captures,
/// The place to write a replacement to.
dst: Vec<u8>,
/// The place to store match offsets in terms of `dst`.
matches: Vec<Match>,
}
impl<M: Matcher> fmt::Debug for Replacer<M> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let (dst, matches) = self.replacement().unwrap_or((&[], &[]));
f.debug_struct("Replacer")
.field("dst", &dst)
.field("matches", &matches)
.finish()
}
}
impl<M: Matcher> Replacer<M> {
/// Create a new replacer for use with a particular matcher.
///
/// This constructor does not allocate. Instead, space for dealing with
/// replacements is allocated lazily only when needed.
pub fn new() -> Replacer<M> {
Replacer { space: None }
}
/// Executes a replacement on the given subject string by replacing all
/// matches with the given replacement. To access the result of the
/// replacement, use the `replacement` method.
///
/// This can fail if the underlying matcher reports an error.
pub fn replace_all<'a>(
&'a mut self,
matcher: &M,
subject: &[u8],
replacement: &[u8],
) -> io::Result<()> {
{
let &mut Space { ref mut dst, ref mut caps, ref mut matches } =
self.allocate(matcher)?;
dst.clear();
matches.clear();
matcher
.replace_with_captures(subject, caps, dst, |caps, dst| {
let start = dst.len();
caps.interpolate(
|name| matcher.capture_index(name),
subject,
replacement,
dst,
);
let end = dst.len();
matches.push(Match::new(start, end));
true
})
.map_err(io::Error::error_message)?;
}
Ok(())
}
/// Return the result of the prior replacement and the match offsets for
/// all replacement occurrences within the returned replacement buffer.
///
/// If no replacement has occurred then `None` is returned.
pub fn replacement<'a>(&'a self) -> Option<(&'a [u8], &'a [Match])> {
match self.space {
None => None,
Some(ref space) => {
if space.matches.is_empty() {
None
} else {
Some((&space.dst, &space.matches))
}
}
}
}
/// Clear space used for performing a replacement.
///
/// Subsequent calls to `replacement` after calling `clear` (but before
/// executing another replacement) will always return `None`.
pub fn clear(&mut self) {
if let Some(ref mut space) = self.space {
space.dst.clear();
space.matches.clear();
}
}
/// Allocate space for replacements when used with the given matcher and
/// return a mutable reference to that space.
///
/// This can fail if allocating space for capture locations from the given
/// matcher fails.
fn allocate(&mut self, matcher: &M) -> io::Result<&mut Space<M>> {
if self.space.is_none() {
let caps =
matcher.new_captures().map_err(io::Error::error_message)?;
self.space =
Some(Space { caps: caps, dst: vec![], matches: vec![] });
}
Ok(self.space.as_mut().unwrap())
}
}
/// A simple layer of abstraction over either a match or a contextual line
/// reported by the searcher.
///
/// In particular, this provides an API that unions the `SinkMatch` and
/// `SinkContext` types while also exposing a list of all individual match
/// locations.
///
/// While this serves as a convenient mechanism to abstract over `SinkMatch`
/// and `SinkContext`, this also provides a way to abstract over replacements.
/// Namely, after a replacement, a `Sunk` value can be constructed using the
/// results of the replacement instead of the bytes reported directly by the
/// searcher.
#[derive(Debug)]
pub struct Sunk<'a> {
bytes: &'a [u8],
absolute_byte_offset: u64,
line_number: Option<u64>,
context_kind: Option<&'a SinkContextKind>,
matches: &'a [Match],
original_matches: &'a [Match],
}
impl<'a> Sunk<'a> {
#[inline]
pub fn empty() -> Sunk<'static> {
Sunk {
bytes: &[],
absolute_byte_offset: 0,
line_number: None,
context_kind: None,
matches: &[],
original_matches: &[],
}
}
#[inline]
pub fn from_sink_match(
sunk: &'a SinkMatch<'a>,
original_matches: &'a [Match],
replacement: Option<(&'a [u8], &'a [Match])>,
) -> Sunk<'a> {
let (bytes, matches) =
replacement.unwrap_or_else(|| (sunk.bytes(), original_matches));
Sunk {
bytes: bytes,
absolute_byte_offset: sunk.absolute_byte_offset(),
line_number: sunk.line_number(),
context_kind: None,
matches: matches,
original_matches: original_matches,
}
}
#[inline]
pub fn from_sink_context(
sunk: &'a SinkContext<'a>,
original_matches: &'a [Match],
replacement: Option<(&'a [u8], &'a [Match])>,
) -> Sunk<'a> {
let (bytes, matches) =
replacement.unwrap_or_else(|| (sunk.bytes(), original_matches));
Sunk {
bytes: bytes,
absolute_byte_offset: sunk.absolute_byte_offset(),
line_number: sunk.line_number(),
context_kind: Some(sunk.kind()),
matches: matches,
original_matches: original_matches,
}
}
#[inline]
pub fn context_kind(&self) -> Option<&'a SinkContextKind> {
self.context_kind
}
#[inline]
pub fn bytes(&self) -> &'a [u8] {
self.bytes
}
#[inline]
pub fn matches(&self) -> &'a [Match] {
self.matches
}
#[inline]
pub fn original_matches(&self) -> &'a [Match] {
self.original_matches
}
#[inline]
pub fn lines(&self, line_term: u8) -> LineIter<'a> {
LineIter::new(line_term, self.bytes())
}
#[inline]
pub fn absolute_byte_offset(&self) -> u64 {
self.absolute_byte_offset
}
#[inline]
pub fn line_number(&self) -> Option<u64> {
self.line_number
}
}
/// A simple encapsulation of a file path used by a printer.
///
/// This represents any transforms that we might want to perform on the path,
/// such as converting it to valid UTF-8 and/or replacing its separator with
/// something else. This allows us to amortize work if we are printing the
/// file path for every match.
///
/// In the common case, no transformation is needed, which lets us avoid the
/// allocation. Typically, only Windows requires a transform, since we can't
/// access the raw bytes of a path directly and first need to lossily convert
/// to UTF-8. Windows is also typically where the path separator replacement
/// is used, e.g., in cygwin environments to use `/` instead of `\`.
///
/// Users of this type are expected to construct it from a normal `Path`
/// found in the standard library. It can then be written to any `io::Write`
/// implementation using the `as_bytes` method. This achieves platform
/// portability with a small cost: on Windows, paths that are not valid UTF-16
/// will not roundtrip correctly.
#[derive(Clone, Debug)]
pub struct PrinterPath<'a>(Cow<'a, [u8]>);
impl<'a> PrinterPath<'a> {
/// Create a new path suitable for printing.
pub fn new(path: &'a Path) -> PrinterPath<'a> {
PrinterPath(Vec::from_path_lossy(path))
}
/// Create a new printer path from the given path which can be efficiently
/// written to a writer without allocation.
///
/// If the given separator is present, then any separators in `path` are
/// replaced with it.
pub fn with_separator(path: &'a Path, sep: Option<u8>) -> PrinterPath<'a> {
let mut ppath = PrinterPath::new(path);
if let Some(sep) = sep {
ppath.replace_separator(sep);
}
ppath
}
/// Replace the path separator in this path with the given separator
/// and do it in place. On Windows, both `/` and `\` are treated as
/// path separators that are both replaced by `new_sep`. In all other
/// environments, only `/` is treated as a path separator.
fn replace_separator(&mut self, new_sep: u8) {
let transformed_path: Vec<u8> = self
.0
.bytes()
.map(|b| {
if b == b'/' || (cfg!(windows) && b == b'\\') {
new_sep
} else {
b
}
})
.collect();
self.0 = Cow::Owned(transformed_path);
}
/// Return the raw bytes for this path.
pub fn as_bytes(&self) -> &[u8] {
&self.0
}
}
/// A type that provides "nicer" Display and Serialize impls for
/// std::time::Duration. The serialization format should actually be compatible
/// with the Deserialize impl for std::time::Duration, since this type only
/// adds new fields.
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
pub struct NiceDuration(pub time::Duration);
impl fmt::Display for NiceDuration {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:0.6}s", self.fractional_seconds())
}
}
impl NiceDuration {
/// Returns the number of seconds in this duration in fraction form.
/// The number to the left of the decimal point is the number of seconds,
/// and the number to the right is the number of milliseconds.
fn fractional_seconds(&self) -> f64 {
let fractional = (self.0.subsec_nanos() as f64) / 1_000_000_000.0;
self.0.as_secs() as f64 + fractional
}
}
#[cfg(feature = "serde1")]
impl Serialize for NiceDuration {
fn serialize<S: Serializer>(&self, ser: S) -> Result<S::Ok, S::Error> {
use serde::ser::SerializeStruct;
let mut state = ser.serialize_struct("Duration", 2)?;
state.serialize_field("secs", &self.0.as_secs())?;
state.serialize_field("nanos", &self.0.subsec_nanos())?;
state.serialize_field("human", &format!("{}", self))?;
state.end()
}
}
/// Trim prefix ASCII spaces from the given slice and return the corresponding
/// range.
///
/// This stops trimming a prefix as soon as it sees non-whitespace or a line
/// terminator.
pub fn trim_ascii_prefix(
line_term: LineTerminator,
slice: &[u8],
range: Match,
) -> Match {
fn is_space(b: u8) -> bool {
match b {
b'\t' | b'\n' | b'\x0B' | b'\x0C' | b'\r' | b' ' => true,
_ => false,
}
}
let count = slice[range]
.iter()
.take_while(|&&b| -> bool {
is_space(b) && !line_term.as_bytes().contains(&b)
})
.count();
range.with_start(range.start() + count)
}

22
crates/regex/Cargo.toml Normal file
View File

@@ -0,0 +1,22 @@
[package]
name = "grep-regex"
version = "0.1.5" #:version
authors = ["Andrew Gallant <jamslam@gmail.com>"]
description = """
Use Rust's regex library with the 'grep' crate.
"""
documentation = "https://docs.rs/grep-regex"
homepage = "https://github.com/BurntSushi/ripgrep"
repository = "https://github.com/BurntSushi/ripgrep"
readme = "README.md"
keywords = ["regex", "grep", "search", "pattern", "line"]
license = "Unlicense/MIT"
[dependencies]
aho-corasick = "0.7.3"
bstr = "0.2.10"
grep-matcher = { version = "0.1.2", path = "../grep-matcher" }
log = "0.4.5"
regex = "1.1"
regex-syntax = "0.6.5"
thread_local = "1"

21
crates/regex/LICENSE-MIT Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 Andrew Gallant
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

35
crates/regex/README.md Normal file
View File

@@ -0,0 +1,35 @@
grep-regex
----------
The `grep-regex` crate provides an implementation of the `Matcher` trait from
the `grep-matcher` crate. This implementation permits Rust's regex engine to
be used in the `grep` crate for fast line oriented searching.
[![Linux build status](https://api.travis-ci.org/BurntSushi/ripgrep.svg)](https://travis-ci.org/BurntSushi/ripgrep)
[![Windows build status](https://ci.appveyor.com/api/projects/status/github/BurntSushi/ripgrep?svg=true)](https://ci.appveyor.com/project/BurntSushi/ripgrep)
[![](https://img.shields.io/crates/v/grep-regex.svg)](https://crates.io/crates/grep-regex)
Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org).
### Documentation
[https://docs.rs/grep-regex](https://docs.rs/grep-regex)
**NOTE:** You probably don't want to use this crate directly. Instead, you
should prefer the facade defined in the
[`grep`](https://docs.rs/grep)
crate.
### Usage
Add this to your `Cargo.toml`:
```toml
[dependencies]
grep-regex = "0.1"
```
and this to your crate root:
```rust
extern crate grep_regex;
```

24
crates/regex/UNLICENSE Normal file
View File

@@ -0,0 +1,24 @@
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>

263
crates/regex/src/ast.rs Normal file
View File

@@ -0,0 +1,263 @@
use regex_syntax::ast::parse::Parser;
use regex_syntax::ast::{self, Ast};
/// The results of analyzing AST of a regular expression (e.g., for supporting
/// smart case).
#[derive(Clone, Debug)]
pub struct AstAnalysis {
/// True if and only if a literal uppercase character occurs in the regex.
any_uppercase: bool,
/// True if and only if the regex contains any literal at all.
any_literal: bool,
/// True if and only if the regex consists entirely of a literal and no
/// other special regex characters.
all_verbatim_literal: bool,
}
impl AstAnalysis {
/// Returns a `AstAnalysis` value by doing analysis on the AST of `pattern`.
///
/// If `pattern` is not a valid regular expression, then `None` is
/// returned.
#[allow(dead_code)]
pub fn from_pattern(pattern: &str) -> Option<AstAnalysis> {
Parser::new()
.parse(pattern)
.map(|ast| AstAnalysis::from_ast(&ast))
.ok()
}
/// Perform an AST analysis given the AST.
pub fn from_ast(ast: &Ast) -> AstAnalysis {
let mut analysis = AstAnalysis::new();
analysis.from_ast_impl(ast);
analysis
}
/// Returns true if and only if a literal uppercase character occurs in
/// the pattern.
///
/// For example, a pattern like `\pL` contains no uppercase literals,
/// even though `L` is uppercase and the `\pL` class contains uppercase
/// characters.
pub fn any_uppercase(&self) -> bool {
self.any_uppercase
}
/// Returns true if and only if the regex contains any literal at all.
///
/// For example, a pattern like `\pL` reports `false`, but a pattern like
/// `\pLfoo` reports `true`.
pub fn any_literal(&self) -> bool {
self.any_literal
}
/// Returns true if and only if the entire pattern is a verbatim literal
/// with no special meta characters.
///
/// When this is true, then the pattern satisfies the following law:
/// `escape(pattern) == pattern`. Notable examples where this returns
/// `false` include patterns like `a\u0061` even though `\u0061` is just
/// a literal `a`.
///
/// The purpose of this flag is to determine whether the patterns can be
/// given to non-regex substring search algorithms as-is.
#[allow(dead_code)]
pub fn all_verbatim_literal(&self) -> bool {
self.all_verbatim_literal
}
/// Creates a new `AstAnalysis` value with an initial configuration.
fn new() -> AstAnalysis {
AstAnalysis {
any_uppercase: false,
any_literal: false,
all_verbatim_literal: true,
}
}
fn from_ast_impl(&mut self, ast: &Ast) {
if self.done() {
return;
}
match *ast {
Ast::Empty(_) => {}
Ast::Flags(_)
| Ast::Dot(_)
| Ast::Assertion(_)
| Ast::Class(ast::Class::Unicode(_))
| Ast::Class(ast::Class::Perl(_)) => {
self.all_verbatim_literal = false;
}
Ast::Literal(ref x) => {
self.from_ast_literal(x);
}
Ast::Class(ast::Class::Bracketed(ref x)) => {
self.all_verbatim_literal = false;
self.from_ast_class_set(&x.kind);
}
Ast::Repetition(ref x) => {
self.all_verbatim_literal = false;
self.from_ast_impl(&x.ast);
}
Ast::Group(ref x) => {
self.all_verbatim_literal = false;
self.from_ast_impl(&x.ast);
}
Ast::Alternation(ref alt) => {
self.all_verbatim_literal = false;
for x in &alt.asts {
self.from_ast_impl(x);
}
}
Ast::Concat(ref alt) => {
for x in &alt.asts {
self.from_ast_impl(x);
}
}
}
}
fn from_ast_class_set(&mut self, ast: &ast::ClassSet) {
if self.done() {
return;
}
match *ast {
ast::ClassSet::Item(ref item) => {
self.from_ast_class_set_item(item);
}
ast::ClassSet::BinaryOp(ref x) => {
self.from_ast_class_set(&x.lhs);
self.from_ast_class_set(&x.rhs);
}
}
}
fn from_ast_class_set_item(&mut self, ast: &ast::ClassSetItem) {
if self.done() {
return;
}
match *ast {
ast::ClassSetItem::Empty(_)
| ast::ClassSetItem::Ascii(_)
| ast::ClassSetItem::Unicode(_)
| ast::ClassSetItem::Perl(_) => {}
ast::ClassSetItem::Literal(ref x) => {
self.from_ast_literal(x);
}
ast::ClassSetItem::Range(ref x) => {
self.from_ast_literal(&x.start);
self.from_ast_literal(&x.end);
}
ast::ClassSetItem::Bracketed(ref x) => {
self.from_ast_class_set(&x.kind);
}
ast::ClassSetItem::Union(ref union) => {
for x in &union.items {
self.from_ast_class_set_item(x);
}
}
}
}
fn from_ast_literal(&mut self, ast: &ast::Literal) {
if ast.kind != ast::LiteralKind::Verbatim {
self.all_verbatim_literal = false;
}
self.any_literal = true;
self.any_uppercase = self.any_uppercase || ast.c.is_uppercase();
}
/// Returns true if and only if the attributes can never change no matter
/// what other AST it might see.
fn done(&self) -> bool {
self.any_uppercase && self.any_literal && !self.all_verbatim_literal
}
}
#[cfg(test)]
mod tests {
use super::*;
fn analysis(pattern: &str) -> AstAnalysis {
AstAnalysis::from_pattern(pattern).unwrap()
}
#[test]
fn various() {
let x = analysis("");
assert!(!x.any_uppercase);
assert!(!x.any_literal);
assert!(x.all_verbatim_literal);
let x = analysis("foo");
assert!(!x.any_uppercase);
assert!(x.any_literal);
assert!(x.all_verbatim_literal);
let x = analysis("Foo");
assert!(x.any_uppercase);
assert!(x.any_literal);
assert!(x.all_verbatim_literal);
let x = analysis("foO");
assert!(x.any_uppercase);
assert!(x.any_literal);
assert!(x.all_verbatim_literal);
let x = analysis(r"foo\\");
assert!(!x.any_uppercase);
assert!(x.any_literal);
assert!(!x.all_verbatim_literal);
let x = analysis(r"foo\w");
assert!(!x.any_uppercase);
assert!(x.any_literal);
assert!(!x.all_verbatim_literal);
let x = analysis(r"foo\S");
assert!(!x.any_uppercase);
assert!(x.any_literal);
assert!(!x.all_verbatim_literal);
let x = analysis(r"foo\p{Ll}");
assert!(!x.any_uppercase);
assert!(x.any_literal);
assert!(!x.all_verbatim_literal);
let x = analysis(r"foo[a-z]");
assert!(!x.any_uppercase);
assert!(x.any_literal);
assert!(!x.all_verbatim_literal);
let x = analysis(r"foo[A-Z]");
assert!(x.any_uppercase);
assert!(x.any_literal);
assert!(!x.all_verbatim_literal);
let x = analysis(r"foo[\S\t]");
assert!(!x.any_uppercase);
assert!(x.any_literal);
assert!(!x.all_verbatim_literal);
let x = analysis(r"foo\\S");
assert!(x.any_uppercase);
assert!(x.any_literal);
assert!(!x.all_verbatim_literal);
let x = analysis(r"\p{Ll}");
assert!(!x.any_uppercase);
assert!(!x.any_literal);
assert!(!x.all_verbatim_literal);
let x = analysis(r"aBc\w");
assert!(x.any_uppercase);
assert!(x.any_literal);
assert!(!x.all_verbatim_literal);
let x = analysis(r"a\u0061");
assert!(!x.any_uppercase);
assert!(x.any_literal);
assert!(!x.all_verbatim_literal);
}
}

288
crates/regex/src/config.rs Normal file
View File

@@ -0,0 +1,288 @@
use grep_matcher::{ByteSet, LineTerminator};
use regex::bytes::{Regex, RegexBuilder};
use regex_syntax::ast::{self, Ast};
use regex_syntax::hir::{self, Hir};
use ast::AstAnalysis;
use crlf::crlfify;
use error::Error;
use literal::LiteralSets;
use multi::alternation_literals;
use non_matching::non_matching_bytes;
use strip::strip_from_match;
/// Config represents the configuration of a regex matcher in this crate.
/// The configuration is itself a rough combination of the knobs found in
/// the `regex` crate itself, along with additional `grep-matcher` specific
/// options.
///
/// The configuration can be used to build a "configured" HIR expression. A
/// configured HIR expression is an HIR expression that is aware of the
/// configuration which generated it, and provides transformation on that HIR
/// such that the configuration is preserved.
#[derive(Clone, Debug)]
pub struct Config {
pub case_insensitive: bool,
pub case_smart: bool,
pub multi_line: bool,
pub dot_matches_new_line: bool,
pub swap_greed: bool,
pub ignore_whitespace: bool,
pub unicode: bool,
pub octal: bool,
pub size_limit: usize,
pub dfa_size_limit: usize,
pub nest_limit: u32,
pub line_terminator: Option<LineTerminator>,
pub crlf: bool,
pub word: bool,
}
impl Default for Config {
fn default() -> Config {
Config {
case_insensitive: false,
case_smart: false,
multi_line: false,
dot_matches_new_line: false,
swap_greed: false,
ignore_whitespace: false,
unicode: true,
octal: false,
// These size limits are much bigger than what's in the regex
// crate.
size_limit: 100 * (1 << 20),
dfa_size_limit: 1000 * (1 << 20),
nest_limit: 250,
line_terminator: None,
crlf: false,
word: false,
}
}
}
impl Config {
/// Parse the given pattern and returned its HIR expression along with
/// the current configuration.
///
/// If there was a problem parsing the given expression then an error
/// is returned.
pub fn hir(&self, pattern: &str) -> Result<ConfiguredHIR, Error> {
let ast = self.ast(pattern)?;
let analysis = self.analysis(&ast)?;
let expr = hir::translate::TranslatorBuilder::new()
.allow_invalid_utf8(true)
.case_insensitive(self.is_case_insensitive(&analysis))
.multi_line(self.multi_line)
.dot_matches_new_line(self.dot_matches_new_line)
.swap_greed(self.swap_greed)
.unicode(self.unicode)
.build()
.translate(pattern, &ast)
.map_err(Error::regex)?;
let expr = match self.line_terminator {
None => expr,
Some(line_term) => strip_from_match(expr, line_term)?,
};
Ok(ConfiguredHIR {
original: pattern.to_string(),
config: self.clone(),
analysis: analysis,
// If CRLF mode is enabled, replace `$` with `(?:\r?$)`.
expr: if self.crlf { crlfify(expr) } else { expr },
})
}
/// Accounting for the `smart_case` config knob, return true if and only if
/// this pattern should be matched case insensitively.
fn is_case_insensitive(&self, analysis: &AstAnalysis) -> bool {
if self.case_insensitive {
return true;
}
if !self.case_smart {
return false;
}
analysis.any_literal() && !analysis.any_uppercase()
}
/// Returns true if and only if this config is simple enough such that
/// if the pattern is a simple alternation of literals, then it can be
/// constructed via a plain Aho-Corasick automaton.
///
/// Note that it is OK to return true even when settings like `multi_line`
/// are enabled, since if multi-line can impact the match semantics of a
/// regex, then it is by definition not a simple alternation of literals.
pub fn can_plain_aho_corasick(&self) -> bool {
!self.word && !self.case_insensitive && !self.case_smart
}
/// Perform analysis on the AST of this pattern.
///
/// This returns an error if the given pattern failed to parse.
fn analysis(&self, ast: &Ast) -> Result<AstAnalysis, Error> {
Ok(AstAnalysis::from_ast(ast))
}
/// Parse the given pattern into its abstract syntax.
///
/// This returns an error if the given pattern failed to parse.
fn ast(&self, pattern: &str) -> Result<Ast, Error> {
ast::parse::ParserBuilder::new()
.nest_limit(self.nest_limit)
.octal(self.octal)
.ignore_whitespace(self.ignore_whitespace)
.build()
.parse(pattern)
.map_err(Error::regex)
}
}
/// A "configured" HIR expression, which is aware of the configuration which
/// produced this HIR.
///
/// Since the configuration is tracked, values with this type can be
/// transformed into other HIR expressions (or regular expressions) in a way
/// that preserves the configuration. For example, the `fast_line_regex`
/// method will apply literal extraction to the inner HIR and use that to build
/// a new regex that matches the extracted literals in a way that is
/// consistent with the configuration that produced this HIR. For example, the
/// size limits set on the configured HIR will be propagated out to any
/// subsequently constructed HIR or regular expression.
#[derive(Clone, Debug)]
pub struct ConfiguredHIR {
original: String,
config: Config,
analysis: AstAnalysis,
expr: Hir,
}
impl ConfiguredHIR {
/// Return the configuration for this HIR expression.
pub fn config(&self) -> &Config {
&self.config
}
/// Compute the set of non-matching bytes for this HIR expression.
pub fn non_matching_bytes(&self) -> ByteSet {
non_matching_bytes(&self.expr)
}
/// Returns true if and only if this regex needs to have its match offsets
/// tweaked because of CRLF support. Specifically, this occurs when the
/// CRLF hack is enabled and the regex is line anchored at the end. In
/// this case, matches that end with a `\r` have the `\r` stripped.
pub fn needs_crlf_stripped(&self) -> bool {
self.config.crlf && self.expr.is_line_anchored_end()
}
/// Builds a regular expression from this HIR expression.
pub fn regex(&self) -> Result<Regex, Error> {
self.pattern_to_regex(&self.expr.to_string())
}
/// If this HIR corresponds to an alternation of literals with no
/// capturing groups, then this returns those literals.
pub fn alternation_literals(&self) -> Option<Vec<Vec<u8>>> {
if !self.config.can_plain_aho_corasick() {
return None;
}
alternation_literals(&self.expr)
}
/// Applies the given function to the concrete syntax of this HIR and then
/// generates a new HIR based on the result of the function in a way that
/// preserves the configuration.
///
/// For example, this can be used to wrap a user provided regular
/// expression with additional semantics. e.g., See the `WordMatcher`.
pub fn with_pattern<F: FnMut(&str) -> String>(
&self,
mut f: F,
) -> Result<ConfiguredHIR, Error> {
self.pattern_to_hir(&f(&self.expr.to_string()))
}
/// If the current configuration has a line terminator set and if useful
/// literals could be extracted, then a regular expression matching those
/// literals is returned. If no line terminator is set, then `None` is
/// returned.
///
/// If compiling the resulting regular expression failed, then an error
/// is returned.
///
/// This method only returns something when a line terminator is set
/// because matches from this regex are generally candidates that must be
/// confirmed before reporting a match. When performing a line oriented
/// search, confirmation is easy: just extend the candidate match to its
/// respective line boundaries and then re-search that line for a full
/// match. This only works when the line terminator is set because the line
/// terminator setting guarantees that the regex itself can never match
/// through the line terminator byte.
pub fn fast_line_regex(&self) -> Result<Option<Regex>, Error> {
if self.config.line_terminator.is_none() {
return Ok(None);
}
match LiteralSets::new(&self.expr).one_regex(self.config.word) {
None => Ok(None),
Some(pattern) => self.pattern_to_regex(&pattern).map(Some),
}
}
/// Create a regex from the given pattern using this HIR's configuration.
fn pattern_to_regex(&self, pattern: &str) -> Result<Regex, Error> {
// The settings we explicitly set here are intentionally a subset
// of the settings we have. The key point here is that our HIR
// expression is computed with the settings in mind, such that setting
// them here could actually lead to unintended behavior. For example,
// consider the pattern `(?U)a+`. This will get folded into the HIR
// as a non-greedy repetition operator which will in turn get printed
// to the concrete syntax as `a+?`, which is correct. But if we
// set the `swap_greed` option again, then we'll wind up with `(?U)a+?`
// which is equal to `a+` which is not the same as what we were given.
//
// We also don't need to apply `case_insensitive` since this gets
// folded into the HIR and would just cause us to do redundant work.
//
// Finally, we don't need to set `ignore_whitespace` since the concrete
// syntax emitted by the HIR printer never needs it.
//
// We set the rest of the options. Some of them are important, such as
// the size limit, and some of them are necessary to preserve the
// intention of the original pattern. For example, the Unicode flag
// will impact how the WordMatcher functions, namely, whether its
// word boundaries are Unicode aware or not.
RegexBuilder::new(&pattern)
.nest_limit(self.config.nest_limit)
.octal(self.config.octal)
.multi_line(self.config.multi_line)
.dot_matches_new_line(self.config.dot_matches_new_line)
.unicode(self.config.unicode)
.size_limit(self.config.size_limit)
.dfa_size_limit(self.config.dfa_size_limit)
.build()
.map_err(Error::regex)
}
/// Create an HIR expression from the given pattern using this HIR's
/// configuration.
fn pattern_to_hir(&self, pattern: &str) -> Result<ConfiguredHIR, Error> {
// See `pattern_to_regex` comment for explanation of why we only set
// a subset of knobs here. e.g., `swap_greed` is explicitly left out.
let expr = ::regex_syntax::ParserBuilder::new()
.nest_limit(self.config.nest_limit)
.octal(self.config.octal)
.allow_invalid_utf8(true)
.multi_line(self.config.multi_line)
.dot_matches_new_line(self.config.dot_matches_new_line)
.unicode(self.config.unicode)
.build()
.parse(pattern)
.map_err(Error::regex)?;
Ok(ConfiguredHIR {
original: self.original.clone(),
config: self.config.clone(),
analysis: self.analysis.clone(),
expr: expr,
})
}
}

189
crates/regex/src/crlf.rs Normal file
View File

@@ -0,0 +1,189 @@
use std::collections::HashMap;
use grep_matcher::{Match, Matcher, NoError};
use regex::bytes::Regex;
use regex_syntax::hir::{self, Hir, HirKind};
use config::ConfiguredHIR;
use error::Error;
use matcher::RegexCaptures;
/// A matcher for implementing "word match" semantics.
#[derive(Clone, Debug)]
pub struct CRLFMatcher {
/// The regex.
regex: Regex,
/// A map from capture group name to capture group index.
names: HashMap<String, usize>,
}
impl CRLFMatcher {
/// Create a new matcher from the given pattern that strips `\r` from the
/// end of every match.
///
/// This panics if the given expression doesn't need its CRLF stripped.
pub fn new(expr: &ConfiguredHIR) -> Result<CRLFMatcher, Error> {
assert!(expr.needs_crlf_stripped());
let regex = expr.regex()?;
let mut names = HashMap::new();
for (i, optional_name) in regex.capture_names().enumerate() {
if let Some(name) = optional_name {
names.insert(name.to_string(), i.checked_sub(1).unwrap());
}
}
Ok(CRLFMatcher { regex, names })
}
/// Return the underlying regex used by this matcher.
pub fn regex(&self) -> &Regex {
&self.regex
}
}
impl Matcher for CRLFMatcher {
type Captures = RegexCaptures;
type Error = NoError;
fn find_at(
&self,
haystack: &[u8],
at: usize,
) -> Result<Option<Match>, NoError> {
let m = match self.regex.find_at(haystack, at) {
None => return Ok(None),
Some(m) => Match::new(m.start(), m.end()),
};
Ok(Some(adjust_match(haystack, m)))
}
fn new_captures(&self) -> Result<RegexCaptures, NoError> {
Ok(RegexCaptures::new(self.regex.capture_locations()))
}
fn capture_count(&self) -> usize {
self.regex.captures_len().checked_sub(1).unwrap()
}
fn capture_index(&self, name: &str) -> Option<usize> {
self.names.get(name).map(|i| *i)
}
fn captures_at(
&self,
haystack: &[u8],
at: usize,
caps: &mut RegexCaptures,
) -> Result<bool, NoError> {
caps.strip_crlf(false);
let r =
self.regex.captures_read_at(caps.locations_mut(), haystack, at);
if !r.is_some() {
return Ok(false);
}
// If the end of our match includes a `\r`, then strip it from all
// capture groups ending at the same location.
let end = caps.locations().get(0).unwrap().1;
if end > 0 && haystack.get(end - 1) == Some(&b'\r') {
caps.strip_crlf(true);
}
Ok(true)
}
// We specifically do not implement other methods like find_iter or
// captures_iter. Namely, the iter methods are guaranteed to be correct
// by virtue of implementing find_at and captures_at above.
}
/// If the given match ends with a `\r`, then return a new match that ends
/// immediately before the `\r`.
pub fn adjust_match(haystack: &[u8], m: Match) -> Match {
if m.end() > 0 && haystack.get(m.end() - 1) == Some(&b'\r') {
m.with_end(m.end() - 1)
} else {
m
}
}
/// Substitutes all occurrences of multi-line enabled `$` with `(?:\r?$)`.
///
/// This does not preserve the exact semantics of the given expression,
/// however, it does have the useful property that anything that matched the
/// given expression will also match the returned expression. The difference is
/// that the returned expression can match possibly other things as well.
///
/// The principle reason why we do this is because the underlying regex engine
/// doesn't support CRLF aware `$` look-around. It's planned to fix it at that
/// level, but we perform this kludge in the mean time.
///
/// Note that while the match preserving semantics are nice and neat, the
/// match position semantics are quite a bit messier. Namely, `$` only ever
/// matches the position between characters where as `\r??` can match a
/// character and change the offset. This is regretable, but works out pretty
/// nicely in most cases, especially when a match is limited to a single line.
pub fn crlfify(expr: Hir) -> Hir {
match expr.into_kind() {
HirKind::Anchor(hir::Anchor::EndLine) => {
let concat = Hir::concat(vec![
Hir::repetition(hir::Repetition {
kind: hir::RepetitionKind::ZeroOrOne,
greedy: false,
hir: Box::new(Hir::literal(hir::Literal::Unicode('\r'))),
}),
Hir::anchor(hir::Anchor::EndLine),
]);
Hir::group(hir::Group {
kind: hir::GroupKind::NonCapturing,
hir: Box::new(concat),
})
}
HirKind::Empty => Hir::empty(),
HirKind::Literal(x) => Hir::literal(x),
HirKind::Class(x) => Hir::class(x),
HirKind::Anchor(x) => Hir::anchor(x),
HirKind::WordBoundary(x) => Hir::word_boundary(x),
HirKind::Repetition(mut x) => {
x.hir = Box::new(crlfify(*x.hir));
Hir::repetition(x)
}
HirKind::Group(mut x) => {
x.hir = Box::new(crlfify(*x.hir));
Hir::group(x)
}
HirKind::Concat(xs) => {
Hir::concat(xs.into_iter().map(crlfify).collect())
}
HirKind::Alternation(xs) => {
Hir::alternation(xs.into_iter().map(crlfify).collect())
}
}
}
#[cfg(test)]
mod tests {
use super::crlfify;
use regex_syntax::Parser;
fn roundtrip(pattern: &str) -> String {
let expr1 = Parser::new().parse(pattern).unwrap();
let expr2 = crlfify(expr1);
expr2.to_string()
}
#[test]
fn various() {
assert_eq!(roundtrip(r"(?m)$"), "(?:\r??(?m:$))");
assert_eq!(roundtrip(r"(?m)$$"), "(?:\r??(?m:$))(?:\r??(?m:$))");
assert_eq!(
roundtrip(r"(?m)(?:foo$|bar$)"),
"(?:foo(?:\r??(?m:$))|bar(?:\r??(?m:$)))"
);
assert_eq!(roundtrip(r"(?m)$a"), "(?:\r??(?m:$))a");
// Not a multiline `$`, so no crlfifying occurs.
assert_eq!(roundtrip(r"$"), "\\z");
// It's a literal, derp.
assert_eq!(roundtrip(r"\$"), "\\$");
}
}

88
crates/regex/src/error.rs Normal file
View File

@@ -0,0 +1,88 @@
use std::error;
use std::fmt;
use util;
/// An error that can occur in this crate.
///
/// Generally, this error corresponds to problems building a regular
/// expression, whether it's in parsing, compilation or a problem with
/// guaranteeing a configured optimization.
#[derive(Clone, Debug)]
pub struct Error {
kind: ErrorKind,
}
impl Error {
pub(crate) fn new(kind: ErrorKind) -> Error {
Error { kind }
}
pub(crate) fn regex<E: error::Error>(err: E) -> Error {
Error { kind: ErrorKind::Regex(err.to_string()) }
}
/// Return the kind of this error.
pub fn kind(&self) -> &ErrorKind {
&self.kind
}
}
/// The kind of an error that can occur.
#[derive(Clone, Debug)]
pub enum ErrorKind {
/// An error that occurred as a result of parsing a regular expression.
/// This can be a syntax error or an error that results from attempting to
/// compile a regular expression that is too big.
///
/// The string here is the underlying error converted to a string.
Regex(String),
/// An error that occurs when a building a regex that isn't permitted to
/// match a line terminator. In general, building the regex will do its
/// best to make matching a line terminator impossible (e.g., by removing
/// `\n` from the `\s` character class), but if the regex contains a
/// `\n` literal, then there is no reasonable choice that can be made and
/// therefore an error is reported.
///
/// The string is the literal sequence found in the regex that is not
/// allowed.
NotAllowed(String),
/// This error occurs when a non-ASCII line terminator was provided.
///
/// The invalid byte is included in this error.
InvalidLineTerminator(u8),
/// Hints that destructuring should not be exhaustive.
///
/// This enum may grow additional variants, so this makes sure clients
/// don't count on exhaustive matching. (Otherwise, adding a new variant
/// could break existing code.)
#[doc(hidden)]
__Nonexhaustive,
}
impl error::Error for Error {
fn description(&self) -> &str {
match self.kind {
ErrorKind::Regex(_) => "regex error",
ErrorKind::NotAllowed(_) => "literal not allowed",
ErrorKind::InvalidLineTerminator(_) => "invalid line terminator",
ErrorKind::__Nonexhaustive => unreachable!(),
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.kind {
ErrorKind::Regex(ref s) => write!(f, "{}", s),
ErrorKind::NotAllowed(ref lit) => {
write!(f, "the literal '{:?}' is not allowed in a regex", lit)
}
ErrorKind::InvalidLineTerminator(byte) => {
let x = util::show_bytes(&[byte]);
write!(f, "line terminators must be ASCII, but '{}' is not", x)
}
ErrorKind::__Nonexhaustive => unreachable!(),
}
}
}

28
crates/regex/src/lib.rs Normal file
View File

@@ -0,0 +1,28 @@
/*!
An implementation of `grep-matcher`'s `Matcher` trait for Rust's regex engine.
*/
#![deny(missing_docs)]
extern crate aho_corasick;
extern crate grep_matcher;
#[macro_use]
extern crate log;
extern crate regex;
extern crate regex_syntax;
extern crate thread_local;
pub use error::{Error, ErrorKind};
pub use matcher::{RegexCaptures, RegexMatcher, RegexMatcherBuilder};
mod ast;
mod config;
mod crlf;
mod error;
mod literal;
mod matcher;
mod multi;
mod non_matching;
mod strip;
mod util;
mod word;

391
crates/regex/src/literal.rs Normal file
View File

@@ -0,0 +1,391 @@
/*
This module is responsible for extracting *inner* literals out of the AST of a
regular expression. Normally this is the job of the regex engine itself, but
the regex engine doesn't look for inner literals. Since we're doing line based
searching, we can use them, so we need to do it ourselves.
*/
use regex_syntax::hir::literal::{Literal, Literals};
use regex_syntax::hir::{self, Hir, HirKind};
use util;
/// Represents prefix, suffix and inner "required" literals for a regular
/// expression.
///
/// Prefixes and suffixes are detected using regex-syntax. The inner required
/// literals are detected using something custom (but based on the code in
/// regex-syntax).
#[derive(Clone, Debug)]
pub struct LiteralSets {
/// A set of prefix literals.
prefixes: Literals,
/// A set of suffix literals.
suffixes: Literals,
/// A set of literals such that at least one of them must appear in every
/// match. A literal in this set may be neither a prefix nor a suffix.
required: Literals,
}
impl LiteralSets {
/// Create a set of literals from the given HIR expression.
pub fn new(expr: &Hir) -> LiteralSets {
let mut required = Literals::empty();
union_required(expr, &mut required);
LiteralSets {
prefixes: Literals::prefixes(expr),
suffixes: Literals::suffixes(expr),
required: required,
}
}
/// If it is deemed advantageuous to do so (via various suspicious
/// heuristics), this will return a single regular expression pattern that
/// matches a subset of the language matched by the regular expression that
/// generated these literal sets. The idea here is that the pattern
/// returned by this method is much cheaper to search for. i.e., It is
/// usually a single literal or an alternation of literals.
pub fn one_regex(&self, word: bool) -> Option<String> {
// TODO: The logic in this function is basically inscrutable. It grew
// organically in the old grep 0.1 crate. Ideally, it would be
// re-worked. In fact, the entire inner literal extraction should be
// re-worked. Actually, most of regex-syntax's literal extraction
// should also be re-worked. Alas... only so much time in the day.
if !word {
if self.prefixes.all_complete() && !self.prefixes.is_empty() {
debug!("literal prefixes detected: {:?}", self.prefixes);
// When this is true, the regex engine will do a literal scan,
// so we don't need to return anything. But we only do this
// if we aren't doing a word regex, since a word regex adds
// a `(?:\W|^)` to the beginning of the regex, thereby
// defeating the regex engine's literal detection.
return None;
}
}
// Out of inner required literals, prefixes and suffixes, which one
// is the longest? We pick the longest to do fast literal scan under
// the assumption that a longer literal will have a lower false
// positive rate.
let pre_lcp = self.prefixes.longest_common_prefix();
let pre_lcs = self.prefixes.longest_common_suffix();
let suf_lcp = self.suffixes.longest_common_prefix();
let suf_lcs = self.suffixes.longest_common_suffix();
let req_lits = self.required.literals();
let req = match req_lits.iter().max_by_key(|lit| lit.len()) {
None => &[],
Some(req) => &***req,
};
let mut lit = pre_lcp;
if pre_lcs.len() > lit.len() {
lit = pre_lcs;
}
if suf_lcp.len() > lit.len() {
lit = suf_lcp;
}
if suf_lcs.len() > lit.len() {
lit = suf_lcs;
}
if req_lits.len() == 1 && req.len() > lit.len() {
lit = req;
}
// Special case: if we detected an alternation of inner required
// literals and its longest literal is bigger than the longest
// prefix/suffix, then choose the alternation. In practice, this
// helps with case insensitive matching, which can generate lots of
// inner required literals.
let any_empty = req_lits.iter().any(|lit| lit.is_empty());
if req.len() > lit.len() && req_lits.len() > 1 && !any_empty {
debug!("required literals found: {:?}", req_lits);
let alts: Vec<String> = req_lits
.into_iter()
.map(|x| util::bytes_to_regex(x))
.collect();
// We're matching raw bytes, so disable Unicode mode.
Some(format!("(?-u:{})", alts.join("|")))
} else if lit.is_empty() {
// If we're here, then we have no LCP. No LCS. And no detected
// inner required literals. In theory this shouldn't happen, but
// the inner literal detector isn't as nice as we hope and doens't
// actually support returning a set of alternating required
// literals. (Instead, it only returns a set where EVERY literal
// in it is required. It cannot currently express "either P or Q
// is required.")
//
// In this case, it is possible that we still have meaningful
// prefixes or suffixes to use. So we look for the set of literals
// with the highest minimum length and use that to build our "fast"
// regex.
//
// This manifest in fairly common scenarios. e.g.,
//
// rg -w 'foo|bar|baz|quux'
//
// Normally, without the `-w`, the regex engine itself would
// detect the prefix correctly. Unfortunately, the `-w` option
// turns the regex into something like this:
//
// rg '(^|\W)(foo|bar|baz|quux)($|\W)'
//
// Which will defeat all prefix and suffix literal optimizations.
// (Not in theory---it could be better. But the current
// implementation isn't good enough.) ... So we make up for it
// here.
let p_min_len = self.prefixes.min_len();
let s_min_len = self.suffixes.min_len();
let lits = match (p_min_len, s_min_len) {
(None, None) => return None,
(Some(_), None) => {
debug!("prefix literals found");
self.prefixes.literals()
}
(None, Some(_)) => {
debug!("suffix literals found");
self.suffixes.literals()
}
(Some(p), Some(s)) => {
if p >= s {
debug!("prefix literals found");
self.prefixes.literals()
} else {
debug!("suffix literals found");
self.suffixes.literals()
}
}
};
debug!("prefix/suffix literals found: {:?}", lits);
let alts: Vec<String> =
lits.into_iter().map(|x| util::bytes_to_regex(x)).collect();
// We're matching raw bytes, so disable Unicode mode.
Some(format!("(?-u:{})", alts.join("|")))
} else {
debug!("required literal found: {:?}", util::show_bytes(lit));
Some(format!("(?-u:{})", util::bytes_to_regex(&lit)))
}
}
}
fn union_required(expr: &Hir, lits: &mut Literals) {
match *expr.kind() {
HirKind::Literal(hir::Literal::Unicode(c)) => {
let mut buf = [0u8; 4];
lits.cross_add(c.encode_utf8(&mut buf).as_bytes());
}
HirKind::Literal(hir::Literal::Byte(b)) => {
lits.cross_add(&[b]);
}
HirKind::Class(hir::Class::Unicode(ref cls)) => {
if count_unicode_class(cls) >= 5 || !lits.add_char_class(cls) {
lits.cut();
}
}
HirKind::Class(hir::Class::Bytes(ref cls)) => {
if count_byte_class(cls) >= 5 || !lits.add_byte_class(cls) {
lits.cut();
}
}
HirKind::Group(hir::Group { ref hir, .. }) => {
union_required(&**hir, lits);
}
HirKind::Repetition(ref x) => match x.kind {
hir::RepetitionKind::ZeroOrOne => lits.cut(),
hir::RepetitionKind::ZeroOrMore => lits.cut(),
hir::RepetitionKind::OneOrMore => {
union_required(&x.hir, lits);
}
hir::RepetitionKind::Range(ref rng) => {
let (min, max) = match *rng {
hir::RepetitionRange::Exactly(m) => (m, Some(m)),
hir::RepetitionRange::AtLeast(m) => (m, None),
hir::RepetitionRange::Bounded(m, n) => (m, Some(n)),
};
repeat_range_literals(
&x.hir,
min,
max,
x.greedy,
lits,
union_required,
);
}
},
HirKind::Concat(ref es) if es.is_empty() => {}
HirKind::Concat(ref es) if es.len() == 1 => {
union_required(&es[0], lits)
}
HirKind::Concat(ref es) => {
for e in es {
let mut lits2 = lits.to_empty();
union_required(e, &mut lits2);
if lits2.is_empty() {
lits.cut();
continue;
}
if lits2.contains_empty() || !is_simple(&e) {
lits.cut();
}
if !lits.cross_product(&lits2) || !lits2.any_complete() {
// If this expression couldn't yield any literal that
// could be extended, then we need to quit. Since we're
// short-circuiting, we also need to freeze every member.
lits.cut();
break;
}
}
}
HirKind::Alternation(ref es) => {
alternate_literals(es, lits, union_required);
}
_ => lits.cut(),
}
}
fn repeat_range_literals<F: FnMut(&Hir, &mut Literals)>(
e: &Hir,
min: u32,
_max: Option<u32>,
_greedy: bool,
lits: &mut Literals,
mut f: F,
) {
if min == 0 {
// This is a bit conservative. If `max` is set, then we could
// treat this as a finite set of alternations. For now, we
// just treat it as `e*`.
lits.cut();
} else {
// We only extract literals from a single repetition, even though
// we could do more. e.g., `a{3}` will have `a` extracted instead of
// `aaa`. The reason is that inner literal extraction can't be unioned
// across repetitions. e.g., extracting `foofoofoo` from `(\w+foo){3}`
// is wrong.
f(e, lits);
lits.cut();
}
}
fn alternate_literals<F: FnMut(&Hir, &mut Literals)>(
es: &[Hir],
lits: &mut Literals,
mut f: F,
) {
let mut lits2 = lits.to_empty();
for e in es {
let mut lits3 = lits.to_empty();
lits3.set_limit_size(lits.limit_size() / 5);
f(e, &mut lits3);
if lits3.is_empty() || !lits2.union(lits3) {
// If we couldn't find suffixes for *any* of the
// alternates, then the entire alternation has to be thrown
// away and any existing members must be frozen. Similarly,
// if the union couldn't complete, stop and freeze.
lits.cut();
return;
}
}
// All we do at the moment is look for prefixes and suffixes. If both
// are empty, then we report nothing. We should be able to do better than
// this, but we'll need something more expressive than just a "set of
// literals."
let lcp = lits2.longest_common_prefix();
let lcs = lits2.longest_common_suffix();
if !lcp.is_empty() {
lits.cross_add(lcp);
}
lits.cut();
if !lcs.is_empty() {
lits.add(Literal::empty());
lits.add(Literal::new(lcs.to_vec()));
}
}
fn is_simple(expr: &Hir) -> bool {
match *expr.kind() {
HirKind::Empty
| HirKind::Literal(_)
| HirKind::Class(_)
| HirKind::Repetition(_)
| HirKind::Concat(_)
| HirKind::Alternation(_) => true,
HirKind::Anchor(_) | HirKind::WordBoundary(_) | HirKind::Group(_) => {
false
}
}
}
/// Return the number of characters in the given class.
fn count_unicode_class(cls: &hir::ClassUnicode) -> u32 {
cls.iter().map(|r| 1 + (r.end() as u32 - r.start() as u32)).sum()
}
/// Return the number of bytes in the given class.
fn count_byte_class(cls: &hir::ClassBytes) -> u32 {
cls.iter().map(|r| 1 + (r.end() as u32 - r.start() as u32)).sum()
}
#[cfg(test)]
mod tests {
use super::LiteralSets;
use regex_syntax::Parser;
fn sets(pattern: &str) -> LiteralSets {
let hir = Parser::new().parse(pattern).unwrap();
LiteralSets::new(&hir)
}
fn one_regex(pattern: &str) -> Option<String> {
sets(pattern).one_regex(false)
}
// Put a pattern into the same format as the one returned by `one_regex`.
fn pat(pattern: &str) -> Option<String> {
Some(format!("(?-u:{})", pattern))
}
#[test]
fn various() {
// Obviously no literals.
assert!(one_regex(r"\w").is_none());
assert!(one_regex(r"\pL").is_none());
// Tantalizingly close.
assert!(one_regex(r"\w|foo").is_none());
// There's a literal, but it's better if the regex engine handles it
// internally.
assert!(one_regex(r"abc").is_none());
// Core use cases.
assert_eq!(one_regex(r"\wabc\w"), pat("abc"));
assert_eq!(one_regex(r"abc\w"), pat("abc"));
// TODO: Make these pass. We're missing some potentially big wins
// without these.
// assert_eq!(one_regex(r"\w(foo|bar|baz)"), pat("foo|bar|baz"));
// assert_eq!(one_regex(r"\w(foo|bar|baz)\w"), pat("foo|bar|baz"));
}
#[test]
fn regression_1064() {
// Regression from:
// https://github.com/BurntSushi/ripgrep/issues/1064
// assert_eq!(one_regex(r"a.*c"), pat("a"));
assert_eq!(one_regex(r"a(.*c)"), pat("a"));
}
#[test]
fn regression_1319() {
// Regression from:
// https://github.com/BurntSushi/ripgrep/issues/1319
assert_eq!(
one_regex(r"TTGAGTCCAGGAG[ATCG]{2}C"),
pat("TTGAGTCCAGGAGA|TTGAGTCCAGGAGC|\
TTGAGTCCAGGAGG|TTGAGTCCAGGAGT")
);
}
}

1066
crates/regex/src/matcher.rs Normal file

File diff suppressed because it is too large Load Diff

125
crates/regex/src/multi.rs Normal file
View File

@@ -0,0 +1,125 @@
use aho_corasick::{AhoCorasick, AhoCorasickBuilder, MatchKind};
use grep_matcher::{Match, Matcher, NoError};
use regex_syntax::hir::Hir;
use error::Error;
use matcher::RegexCaptures;
/// A matcher for an alternation of literals.
///
/// Ideally, this optimization would be pushed down into the regex engine, but
/// making this work correctly there would require quite a bit of refactoring.
/// Moreover, doing it one layer above lets us do thing like, "if we
/// specifically only want to search for literals, then don't bother with
/// regex parsing at all."
#[derive(Clone, Debug)]
pub struct MultiLiteralMatcher {
/// The Aho-Corasick automaton.
ac: AhoCorasick,
}
impl MultiLiteralMatcher {
/// Create a new multi-literal matcher from the given literals.
pub fn new<B: AsRef<[u8]>>(
literals: &[B],
) -> Result<MultiLiteralMatcher, Error> {
let ac = AhoCorasickBuilder::new()
.match_kind(MatchKind::LeftmostFirst)
.auto_configure(literals)
.build_with_size::<usize, _, _>(literals)
.map_err(Error::regex)?;
Ok(MultiLiteralMatcher { ac })
}
}
impl Matcher for MultiLiteralMatcher {
type Captures = RegexCaptures;
type Error = NoError;
fn find_at(
&self,
haystack: &[u8],
at: usize,
) -> Result<Option<Match>, NoError> {
match self.ac.find(&haystack[at..]) {
None => Ok(None),
Some(m) => Ok(Some(Match::new(at + m.start(), at + m.end()))),
}
}
fn new_captures(&self) -> Result<RegexCaptures, NoError> {
Ok(RegexCaptures::simple())
}
fn capture_count(&self) -> usize {
1
}
fn capture_index(&self, _: &str) -> Option<usize> {
None
}
fn captures_at(
&self,
haystack: &[u8],
at: usize,
caps: &mut RegexCaptures,
) -> Result<bool, NoError> {
caps.set_simple(None);
let mat = self.find_at(haystack, at)?;
caps.set_simple(mat);
Ok(mat.is_some())
}
// We specifically do not implement other methods like find_iter. Namely,
// the iter methods are guaranteed to be correct by virtue of implementing
// find_at above.
}
/// Alternation literals checks if the given HIR is a simple alternation of
/// literals, and if so, returns them. Otherwise, this returns None.
pub fn alternation_literals(expr: &Hir) -> Option<Vec<Vec<u8>>> {
use regex_syntax::hir::{HirKind, Literal};
// This is pretty hacky, but basically, if `is_alternation_literal` is
// true, then we can make several assumptions about the structure of our
// HIR. This is what justifies the `unreachable!` statements below.
if !expr.is_alternation_literal() {
return None;
}
let alts = match *expr.kind() {
HirKind::Alternation(ref alts) => alts,
_ => return None, // one literal isn't worth it
};
let extendlit = |lit: &Literal, dst: &mut Vec<u8>| match *lit {
Literal::Unicode(c) => {
let mut buf = [0; 4];
dst.extend_from_slice(c.encode_utf8(&mut buf).as_bytes());
}
Literal::Byte(b) => {
dst.push(b);
}
};
let mut lits = vec![];
for alt in alts {
let mut lit = vec![];
match *alt.kind() {
HirKind::Empty => {}
HirKind::Literal(ref x) => extendlit(x, &mut lit),
HirKind::Concat(ref exprs) => {
for e in exprs {
match *e.kind() {
HirKind::Literal(ref x) => extendlit(x, &mut lit),
_ => unreachable!("expected literal, got {:?}", e),
}
}
}
_ => unreachable!("expected literal or concat, got {:?}", alt),
}
lits.push(lit);
}
Some(lits)
}

View File

@@ -0,0 +1,128 @@
use grep_matcher::ByteSet;
use regex_syntax::hir::{self, Hir, HirKind};
use regex_syntax::utf8::Utf8Sequences;
/// Return a confirmed set of non-matching bytes from the given expression.
pub fn non_matching_bytes(expr: &Hir) -> ByteSet {
let mut set = ByteSet::full();
remove_matching_bytes(expr, &mut set);
set
}
/// Remove any bytes from the given set that can occur in a matched produced by
/// the given expression.
fn remove_matching_bytes(expr: &Hir, set: &mut ByteSet) {
match *expr.kind() {
HirKind::Empty | HirKind::Anchor(_) | HirKind::WordBoundary(_) => {}
HirKind::Literal(hir::Literal::Unicode(c)) => {
for &b in c.encode_utf8(&mut [0; 4]).as_bytes() {
set.remove(b);
}
}
HirKind::Literal(hir::Literal::Byte(b)) => {
set.remove(b);
}
HirKind::Class(hir::Class::Unicode(ref cls)) => {
for range in cls.iter() {
// This is presumably faster than encoding every codepoint
// to UTF-8 and then removing those bytes from the set.
for seq in Utf8Sequences::new(range.start(), range.end()) {
for byte_range in seq.as_slice() {
set.remove_all(byte_range.start, byte_range.end);
}
}
}
}
HirKind::Class(hir::Class::Bytes(ref cls)) => {
for range in cls.iter() {
set.remove_all(range.start(), range.end());
}
}
HirKind::Repetition(ref x) => {
remove_matching_bytes(&x.hir, set);
}
HirKind::Group(ref x) => {
remove_matching_bytes(&x.hir, set);
}
HirKind::Concat(ref xs) => {
for x in xs {
remove_matching_bytes(x, set);
}
}
HirKind::Alternation(ref xs) => {
for x in xs {
remove_matching_bytes(x, set);
}
}
}
}
#[cfg(test)]
mod tests {
use grep_matcher::ByteSet;
use regex_syntax::ParserBuilder;
use super::non_matching_bytes;
fn extract(pattern: &str) -> ByteSet {
let expr = ParserBuilder::new()
.allow_invalid_utf8(true)
.build()
.parse(pattern)
.unwrap();
non_matching_bytes(&expr)
}
fn sparse(set: &ByteSet) -> Vec<u8> {
let mut sparse_set = vec![];
for b in (0..256).map(|b| b as u8) {
if set.contains(b) {
sparse_set.push(b);
}
}
sparse_set
}
fn sparse_except(except: &[u8]) -> Vec<u8> {
let mut except_set = vec![false; 256];
for &b in except {
except_set[b as usize] = true;
}
let mut set = vec![];
for b in (0..256).map(|b| b as u8) {
if !except_set[b as usize] {
set.push(b);
}
}
set
}
#[test]
fn dot() {
assert_eq!(
sparse(&extract(".")),
vec![
b'\n', 192, 193, 245, 246, 247, 248, 249, 250, 251, 252, 253,
254, 255,
]
);
assert_eq!(
sparse(&extract("(?s).")),
vec![
192, 193, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
255,
]
);
assert_eq!(sparse(&extract("(?-u).")), vec![b'\n']);
assert_eq!(sparse(&extract("(?s-u).")), vec![]);
}
#[test]
fn literal() {
assert_eq!(sparse(&extract("a")), sparse_except(&[b'a']));
assert_eq!(sparse(&extract("")), sparse_except(&[0xE2, 0x98, 0x83]));
assert_eq!(sparse(&extract(r"\xFF")), sparse_except(&[0xC3, 0xBF]));
assert_eq!(sparse(&extract(r"(?-u)\xFF")), sparse_except(&[0xFF]));
}
}

153
crates/regex/src/strip.rs Normal file
View File

@@ -0,0 +1,153 @@
use grep_matcher::LineTerminator;
use regex_syntax::hir::{self, Hir, HirKind};
use error::{Error, ErrorKind};
/// Return an HIR that is guaranteed to never match the given line terminator,
/// if possible.
///
/// If the transformation isn't possible, then an error is returned.
///
/// In general, if a literal line terminator occurs anywhere in the HIR, then
/// this will return an error. However, if the line terminator occurs within
/// a character class with at least one other character (that isn't also a line
/// terminator), then the line terminator is simply stripped from that class.
///
/// If the given line terminator is not ASCII, then this function returns an
/// error.
pub fn strip_from_match(
expr: Hir,
line_term: LineTerminator,
) -> Result<Hir, Error> {
if line_term.is_crlf() {
let expr1 = strip_from_match_ascii(expr, b'\r')?;
strip_from_match_ascii(expr1, b'\n')
} else {
let b = line_term.as_byte();
if b > 0x7F {
return Err(Error::new(ErrorKind::InvalidLineTerminator(b)));
}
strip_from_match_ascii(expr, b)
}
}
/// The implementation of strip_from_match. The given byte must be ASCII. This
/// function panics otherwise.
fn strip_from_match_ascii(expr: Hir, byte: u8) -> Result<Hir, Error> {
assert!(byte <= 0x7F);
let chr = byte as char;
assert_eq!(chr.len_utf8(), 1);
let invalid = || Err(Error::new(ErrorKind::NotAllowed(chr.to_string())));
Ok(match expr.into_kind() {
HirKind::Empty => Hir::empty(),
HirKind::Literal(hir::Literal::Unicode(c)) => {
if c == chr {
return invalid();
}
Hir::literal(hir::Literal::Unicode(c))
}
HirKind::Literal(hir::Literal::Byte(b)) => {
if b as char == chr {
return invalid();
}
Hir::literal(hir::Literal::Byte(b))
}
HirKind::Class(hir::Class::Unicode(mut cls)) => {
let remove = hir::ClassUnicode::new(Some(
hir::ClassUnicodeRange::new(chr, chr),
));
cls.difference(&remove);
if cls.ranges().is_empty() {
return invalid();
}
Hir::class(hir::Class::Unicode(cls))
}
HirKind::Class(hir::Class::Bytes(mut cls)) => {
let remove = hir::ClassBytes::new(Some(
hir::ClassBytesRange::new(byte, byte),
));
cls.difference(&remove);
if cls.ranges().is_empty() {
return invalid();
}
Hir::class(hir::Class::Bytes(cls))
}
HirKind::Anchor(x) => Hir::anchor(x),
HirKind::WordBoundary(x) => Hir::word_boundary(x),
HirKind::Repetition(mut x) => {
x.hir = Box::new(strip_from_match_ascii(*x.hir, byte)?);
Hir::repetition(x)
}
HirKind::Group(mut x) => {
x.hir = Box::new(strip_from_match_ascii(*x.hir, byte)?);
Hir::group(x)
}
HirKind::Concat(xs) => {
let xs = xs
.into_iter()
.map(|e| strip_from_match_ascii(e, byte))
.collect::<Result<Vec<Hir>, Error>>()?;
Hir::concat(xs)
}
HirKind::Alternation(xs) => {
let xs = xs
.into_iter()
.map(|e| strip_from_match_ascii(e, byte))
.collect::<Result<Vec<Hir>, Error>>()?;
Hir::alternation(xs)
}
})
}
#[cfg(test)]
mod tests {
use regex_syntax::Parser;
use super::{strip_from_match, LineTerminator};
use error::Error;
fn roundtrip(pattern: &str, byte: u8) -> String {
roundtrip_line_term(pattern, LineTerminator::byte(byte)).unwrap()
}
fn roundtrip_crlf(pattern: &str) -> String {
roundtrip_line_term(pattern, LineTerminator::crlf()).unwrap()
}
fn roundtrip_err(pattern: &str, byte: u8) -> Result<String, Error> {
roundtrip_line_term(pattern, LineTerminator::byte(byte))
}
fn roundtrip_line_term(
pattern: &str,
line_term: LineTerminator,
) -> Result<String, Error> {
let expr1 = Parser::new().parse(pattern).unwrap();
let expr2 = strip_from_match(expr1, line_term)?;
Ok(expr2.to_string())
}
#[test]
fn various() {
assert_eq!(roundtrip(r"[a\n]", b'\n'), "[a]");
assert_eq!(roundtrip(r"[a\n]", b'a'), "[\n]");
assert_eq!(roundtrip_crlf(r"[a\n]"), "[a]");
assert_eq!(roundtrip_crlf(r"[a\r]"), "[a]");
assert_eq!(roundtrip_crlf(r"[a\r\n]"), "[a]");
assert_eq!(roundtrip(r"(?-u)\s", b'a'), r"(?-u:[\x09-\x0D\x20])");
assert_eq!(roundtrip(r"(?-u)\s", b'\n'), r"(?-u:[\x09\x0B-\x0D\x20])");
assert!(roundtrip_err(r"\n", b'\n').is_err());
assert!(roundtrip_err(r"abc\n", b'\n').is_err());
assert!(roundtrip_err(r"\nabc", b'\n').is_err());
assert!(roundtrip_err(r"abc\nxyz", b'\n').is_err());
assert!(roundtrip_err(r"\x0A", b'\n').is_err());
assert!(roundtrip_err(r"\u000A", b'\n').is_err());
assert!(roundtrip_err(r"\U0000000A", b'\n').is_err());
assert!(roundtrip_err(r"\u{A}", b'\n').is_err());
assert!(roundtrip_err("\n", b'\n').is_err());
}
}

29
crates/regex/src/util.rs Normal file
View File

@@ -0,0 +1,29 @@
/// Converts an arbitrary sequence of bytes to a literal suitable for building
/// a regular expression.
pub fn bytes_to_regex(bs: &[u8]) -> String {
use regex_syntax::is_meta_character;
use std::fmt::Write;
let mut s = String::with_capacity(bs.len());
for &b in bs {
if b <= 0x7F && !is_meta_character(b as char) {
write!(s, r"{}", b as char).unwrap();
} else {
write!(s, r"\x{:02x}", b).unwrap();
}
}
s
}
/// Converts arbitrary bytes to a nice string.
pub fn show_bytes(bs: &[u8]) -> String {
use std::ascii::escape_default;
use std::str;
let mut nice = String::new();
for &b in bs {
let part: Vec<u8> = escape_default(b).collect();
nice.push_str(str::from_utf8(&part).unwrap());
}
nice
}

292
crates/regex/src/word.rs Normal file
View File

@@ -0,0 +1,292 @@
use std::cell::RefCell;
use std::collections::HashMap;
use std::sync::Arc;
use grep_matcher::{Match, Matcher, NoError};
use regex::bytes::{CaptureLocations, Regex};
use thread_local::CachedThreadLocal;
use config::ConfiguredHIR;
use error::Error;
use matcher::RegexCaptures;
/// A matcher for implementing "word match" semantics.
#[derive(Debug)]
pub struct WordMatcher {
/// The regex which is roughly `(?:^|\W)(<original pattern>)(?:$|\W)`.
regex: Regex,
/// The original regex supplied by the user, which we use in a fast path
/// to try and detect matches before deferring to slower engines.
original: Regex,
/// A map from capture group name to capture group index.
names: HashMap<String, usize>,
/// A reusable buffer for finding the match location of the inner group.
locs: Arc<CachedThreadLocal<RefCell<CaptureLocations>>>,
}
impl Clone for WordMatcher {
fn clone(&self) -> WordMatcher {
// We implement Clone manually so that we get a fresh CachedThreadLocal
// such that it can set its own thread owner. This permits each thread
// usings `locs` to hit the fast path.
WordMatcher {
regex: self.regex.clone(),
original: self.original.clone(),
names: self.names.clone(),
locs: Arc::new(CachedThreadLocal::new()),
}
}
}
impl WordMatcher {
/// Create a new matcher from the given pattern that only produces matches
/// that are considered "words."
///
/// The given options are used to construct the regular expression
/// internally.
pub fn new(expr: &ConfiguredHIR) -> Result<WordMatcher, Error> {
let original =
expr.with_pattern(|pat| format!("^(?:{})$", pat))?.regex()?;
let word_expr = expr.with_pattern(|pat| {
let pat = format!(r"(?:(?-m:^)|\W)({})(?:(?-m:$)|\W)", pat);
debug!("word regex: {:?}", pat);
pat
})?;
let regex = word_expr.regex()?;
let locs = Arc::new(CachedThreadLocal::new());
let mut names = HashMap::new();
for (i, optional_name) in regex.capture_names().enumerate() {
if let Some(name) = optional_name {
names.insert(name.to_string(), i.checked_sub(1).unwrap());
}
}
Ok(WordMatcher { regex, original, names, locs })
}
/// Return the underlying regex used by this matcher.
pub fn regex(&self) -> &Regex {
&self.regex
}
/// Attempt to do a fast confirmation of a word match that covers a subset
/// (but hopefully a big subset) of most cases. Ok(Some(..)) is returned
/// when a match is found. Ok(None) is returned when there is definitively
/// no match. Err(()) is returned when this routine could not detect
/// whether there was a match or not.
fn fast_find(
&self,
haystack: &[u8],
at: usize,
) -> Result<Option<Match>, ()> {
// This is a bit hairy. The whole point here is to avoid running an
// NFA simulation in the regex engine. Remember, our word regex looks
// like this:
//
// (^|\W)(<original regex>)($|\W)
// where ^ and $ have multiline mode DISABLED
//
// What we want are the match offsets of <original regex>. So in the
// easy/common case, the original regex will be sandwiched between
// two codepoints that are in the \W class. So our approach here is to
// look for a match of the overall word regexp, strip the \W ends and
// then check whether the original regex matches what's left. If so,
// then we are guaranteed a correct match.
//
// This only works though if we know that the match is sandwiched
// between two \W codepoints. This only occurs when neither ^ nor $
// match. This in turn only occurs when the match is at either the
// beginning or end of the haystack. In either of those cases, we
// declare defeat and defer to the slower implementation.
//
// The reason why we cannot handle the ^/$ cases here is because we
// can't assume anything about the original pattern. (Try commenting
// out the checks for ^/$ below and run the tests to see examples.)
let mut cand = match self.regex.find_at(haystack, at) {
None => return Ok(None),
Some(m) => Match::new(m.start(), m.end()),
};
if cand.start() == 0 || cand.end() == haystack.len() {
return Err(());
}
let (_, slen) = bstr::decode_utf8(&haystack[cand]);
let (_, elen) = bstr::decode_last_utf8(&haystack[cand]);
cand =
cand.with_start(cand.start() + slen).with_end(cand.end() - elen);
if self.original.is_match(&haystack[cand]) {
Ok(Some(cand))
} else {
Err(())
}
}
}
impl Matcher for WordMatcher {
type Captures = RegexCaptures;
type Error = NoError;
fn find_at(
&self,
haystack: &[u8],
at: usize,
) -> Result<Option<Match>, NoError> {
// To make this easy to get right, we extract captures here instead of
// calling `find_at`. The actual match is at capture group `1` instead
// of `0`. We *could* use `find_at` here and then trim the match after
// the fact, but that's a bit harder to get right, and it's not clear
// if it's worth it.
//
// OK, well, it turns out that it is worth it! But it is quite tricky.
// See `fast_find` for details. Effectively, this lets us skip running
// the NFA simulation in the regex engine in the vast majority of
// cases. However, the NFA simulation is required for full correctness.
match self.fast_find(haystack, at) {
Ok(Some(m)) => return Ok(Some(m)),
Ok(None) => return Ok(None),
Err(()) => {}
}
let cell =
self.locs.get_or(|| RefCell::new(self.regex.capture_locations()));
let mut caps = cell.borrow_mut();
self.regex.captures_read_at(&mut caps, haystack, at);
Ok(caps.get(1).map(|m| Match::new(m.0, m.1)))
}
fn new_captures(&self) -> Result<RegexCaptures, NoError> {
Ok(RegexCaptures::with_offset(self.regex.capture_locations(), 1))
}
fn capture_count(&self) -> usize {
self.regex.captures_len().checked_sub(1).unwrap()
}
fn capture_index(&self, name: &str) -> Option<usize> {
self.names.get(name).map(|i| *i)
}
fn captures_at(
&self,
haystack: &[u8],
at: usize,
caps: &mut RegexCaptures,
) -> Result<bool, NoError> {
let r =
self.regex.captures_read_at(caps.locations_mut(), haystack, at);
Ok(r.is_some())
}
// We specifically do not implement other methods like find_iter or
// captures_iter. Namely, the iter methods are guaranteed to be correct
// by virtue of implementing find_at and captures_at above.
}
#[cfg(test)]
mod tests {
use super::WordMatcher;
use config::Config;
use grep_matcher::{Captures, Match, Matcher};
fn matcher(pattern: &str) -> WordMatcher {
let chir = Config::default().hir(pattern).unwrap();
WordMatcher::new(&chir).unwrap()
}
fn find(pattern: &str, haystack: &str) -> Option<(usize, usize)> {
matcher(pattern)
.find(haystack.as_bytes())
.unwrap()
.map(|m| (m.start(), m.end()))
}
fn find_by_caps(pattern: &str, haystack: &str) -> Option<(usize, usize)> {
let m = matcher(pattern);
let mut caps = m.new_captures().unwrap();
if !m.captures(haystack.as_bytes(), &mut caps).unwrap() {
None
} else {
caps.get(0).map(|m| (m.start(), m.end()))
}
}
// Test that the standard `find` API reports offsets correctly.
#[test]
fn various_find() {
assert_eq!(Some((0, 3)), find(r"foo", "foo"));
assert_eq!(Some((0, 3)), find(r"foo", "foo("));
assert_eq!(Some((1, 4)), find(r"foo", "!foo("));
assert_eq!(None, find(r"foo", "!afoo("));
assert_eq!(Some((0, 3)), find(r"foo", "foo☃"));
assert_eq!(None, find(r"foo", "fooб"));
assert_eq!(Some((0, 4)), find(r"foo5", "foo5"));
assert_eq!(None, find(r"foo", "foo5"));
assert_eq!(Some((1, 4)), find(r"foo", "!foo!"));
assert_eq!(Some((1, 5)), find(r"foo!", "!foo!"));
assert_eq!(Some((0, 5)), find(r"!foo!", "!foo!"));
assert_eq!(Some((0, 3)), find(r"foo", "foo\n"));
assert_eq!(Some((1, 4)), find(r"foo", "!foo!\n"));
assert_eq!(Some((1, 5)), find(r"foo!", "!foo!\n"));
assert_eq!(Some((0, 5)), find(r"!foo!", "!foo!\n"));
assert_eq!(Some((1, 6)), find(r"!?foo!?", "!!foo!!"));
assert_eq!(Some((0, 5)), find(r"!?foo!?", "!foo!"));
assert_eq!(Some((2, 5)), find(r"!?foo!?", "a!foo!a"));
assert_eq!(Some((2, 7)), find(r"!?foo!?", "##!foo!\n"));
assert_eq!(Some((3, 7)), find(r"f?oo!?", "##\nfoo!##"));
assert_eq!(Some((2, 5)), find(r"(?-u)foo[^a]*", "#!foo☃aaa"));
}
// See: https://github.com/BurntSushi/ripgrep/issues/389
#[test]
fn regression_dash() {
assert_eq!(Some((0, 2)), find(r"-2", "-2"));
}
// Test that the captures API also reports offsets correctly, just as
// find does. This exercises a different path in the code since captures
// are handled differently.
#[test]
fn various_captures() {
assert_eq!(Some((0, 3)), find_by_caps(r"foo", "foo"));
assert_eq!(Some((0, 3)), find_by_caps(r"foo", "foo("));
assert_eq!(Some((1, 4)), find_by_caps(r"foo", "!foo("));
assert_eq!(None, find_by_caps(r"foo", "!afoo("));
assert_eq!(Some((0, 3)), find_by_caps(r"foo", "foo☃"));
assert_eq!(None, find_by_caps(r"foo", "fooб"));
// assert_eq!(Some((0, 3)), find_by_caps(r"foo", "fooб"));
// See: https://github.com/BurntSushi/ripgrep/issues/389
assert_eq!(Some((0, 2)), find_by_caps(r"-2", "-2"));
}
// Test that the capture reporting methods work as advertised.
#[test]
fn capture_indexing() {
let m = matcher(r"(a)(?P<foo>b)(c)");
assert_eq!(4, m.capture_count());
assert_eq!(Some(2), m.capture_index("foo"));
let mut caps = m.new_captures().unwrap();
assert_eq!(4, caps.len());
assert!(m.captures(b"abc", &mut caps).unwrap());
assert_eq!(caps.get(0), Some(Match::new(0, 3)));
assert_eq!(caps.get(1), Some(Match::new(0, 1)));
assert_eq!(caps.get(2), Some(Match::new(1, 2)));
assert_eq!(caps.get(3), Some(Match::new(2, 3)));
assert_eq!(caps.get(4), None);
assert!(m.captures(b"#abc#", &mut caps).unwrap());
assert_eq!(caps.get(0), Some(Match::new(1, 4)));
assert_eq!(caps.get(1), Some(Match::new(1, 2)));
assert_eq!(caps.get(2), Some(Match::new(2, 3)));
assert_eq!(caps.get(3), Some(Match::new(3, 4)));
assert_eq!(caps.get(4), None);
}
}

View File

@@ -0,0 +1,33 @@
[package]
name = "grep-searcher"
version = "0.1.6" #:version
authors = ["Andrew Gallant <jamslam@gmail.com>"]
description = """
Fast line oriented regex searching as a library.
"""
documentation = "https://docs.rs/grep-searcher"
homepage = "https://github.com/BurntSushi/ripgrep"
repository = "https://github.com/BurntSushi/ripgrep"
readme = "README.md"
keywords = ["regex", "grep", "egrep", "search", "pattern"]
license = "Unlicense/MIT"
[dependencies]
bstr = { version = "0.2.0", default-features = false, features = ["std"] }
bytecount = "0.6"
encoding_rs = "0.8.14"
encoding_rs_io = "0.1.6"
grep-matcher = { version = "0.1.2", path = "../grep-matcher" }
log = "0.4.5"
memmap = "0.7"
[dev-dependencies]
grep-regex = { version = "0.1.3", path = "../grep-regex" }
regex = "1.1"
[features]
default = ["bytecount/runtime-dispatch-simd"]
simd-accel = ["encoding_rs/simd-accel"]
# This feature is DEPRECATED. Runtime dispatch is used for SIMD now.
avx-accel = []

View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 Andrew Gallant
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

Some files were not shown because too many files have changed in this diff Show More