mirror of
https://github.com/BurntSushi/ripgrep.git
synced 2025-07-31 04:02:00 -07:00
repo: move all source code in crates directory
The top-level listing was just getting a bit too long for my taste. So put all of the code in one directory and shrink the large top-level mess to a small top-level mess. NOTE: This commit only contains renames. The subsequent commit will actually make ripgrep build again. We do it this way with the naive hope that this will make it easier for git history to track the renames. Sigh.
This commit is contained in:
3
crates/globset/COPYING
Normal file
3
crates/globset/COPYING
Normal file
@@ -0,0 +1,3 @@
|
||||
This project is dual-licensed under the Unlicense and MIT licenses.
|
||||
|
||||
You may use this code under the terms of either license.
|
33
crates/globset/Cargo.toml
Normal file
33
crates/globset/Cargo.toml
Normal file
@@ -0,0 +1,33 @@
|
||||
[package]
|
||||
name = "globset"
|
||||
version = "0.4.4" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
Cross platform single glob and glob set matching. Glob set matching is the
|
||||
process of matching one or more glob patterns against a single candidate path
|
||||
simultaneously, and returning all of the globs that matched.
|
||||
"""
|
||||
documentation = "https://docs.rs/globset"
|
||||
homepage = "https://github.com/BurntSushi/ripgrep/tree/master/globset"
|
||||
repository = "https://github.com/BurntSushi/ripgrep/tree/master/globset"
|
||||
readme = "README.md"
|
||||
keywords = ["regex", "glob", "multiple", "set", "pattern"]
|
||||
license = "Unlicense/MIT"
|
||||
|
||||
[lib]
|
||||
name = "globset"
|
||||
bench = false
|
||||
|
||||
[dependencies]
|
||||
aho-corasick = "0.7.3"
|
||||
bstr = { version = "0.2.0", default-features = false, features = ["std"] }
|
||||
fnv = "1.0.6"
|
||||
log = "0.4.5"
|
||||
regex = "1.1.5"
|
||||
|
||||
[dev-dependencies]
|
||||
glob = "0.3.0"
|
||||
lazy_static = "1"
|
||||
|
||||
[features]
|
||||
simd-accel = []
|
21
crates/globset/LICENSE-MIT
Normal file
21
crates/globset/LICENSE-MIT
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Andrew Gallant
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
122
crates/globset/README.md
Normal file
122
crates/globset/README.md
Normal file
@@ -0,0 +1,122 @@
|
||||
globset
|
||||
=======
|
||||
Cross platform single glob and glob set matching. Glob set matching is the
|
||||
process of matching one or more glob patterns against a single candidate path
|
||||
simultaneously, and returning all of the globs that matched.
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/ripgrep)
|
||||
[](https://ci.appveyor.com/project/BurntSushi/ripgrep)
|
||||
[](https://crates.io/crates/globset)
|
||||
|
||||
Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org).
|
||||
|
||||
### Documentation
|
||||
|
||||
[https://docs.rs/globset](https://docs.rs/globset)
|
||||
|
||||
### Usage
|
||||
|
||||
Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
globset = "0.3"
|
||||
```
|
||||
|
||||
and this to your crate root:
|
||||
|
||||
```rust
|
||||
extern crate globset;
|
||||
```
|
||||
|
||||
### Example: one glob
|
||||
|
||||
This example shows how to match a single glob against a single file path.
|
||||
|
||||
```rust
|
||||
use globset::Glob;
|
||||
|
||||
let glob = Glob::new("*.rs")?.compile_matcher();
|
||||
|
||||
assert!(glob.is_match("foo.rs"));
|
||||
assert!(glob.is_match("foo/bar.rs"));
|
||||
assert!(!glob.is_match("Cargo.toml"));
|
||||
```
|
||||
|
||||
### Example: configuring a glob matcher
|
||||
|
||||
This example shows how to use a `GlobBuilder` to configure aspects of match
|
||||
semantics. In this example, we prevent wildcards from matching path separators.
|
||||
|
||||
```rust
|
||||
use globset::GlobBuilder;
|
||||
|
||||
let glob = GlobBuilder::new("*.rs")
|
||||
.literal_separator(true).build()?.compile_matcher();
|
||||
|
||||
assert!(glob.is_match("foo.rs"));
|
||||
assert!(!glob.is_match("foo/bar.rs")); // no longer matches
|
||||
assert!(!glob.is_match("Cargo.toml"));
|
||||
```
|
||||
|
||||
### Example: match multiple globs at once
|
||||
|
||||
This example shows how to match multiple glob patterns at once.
|
||||
|
||||
```rust
|
||||
use globset::{Glob, GlobSetBuilder};
|
||||
|
||||
let mut builder = GlobSetBuilder::new();
|
||||
// A GlobBuilder can be used to configure each glob's match semantics
|
||||
// independently.
|
||||
builder.add(Glob::new("*.rs")?);
|
||||
builder.add(Glob::new("src/lib.rs")?);
|
||||
builder.add(Glob::new("src/**/foo.rs")?);
|
||||
let set = builder.build()?;
|
||||
|
||||
assert_eq!(set.matches("src/bar/baz/foo.rs"), vec![0, 2]);
|
||||
```
|
||||
|
||||
### Performance
|
||||
|
||||
This crate implements globs by converting them to regular expressions, and
|
||||
executing them with the
|
||||
[`regex`](https://github.com/rust-lang-nursery/regex)
|
||||
crate.
|
||||
|
||||
For single glob matching, performance of this crate should be roughly on par
|
||||
with the performance of the
|
||||
[`glob`](https://github.com/rust-lang-nursery/glob)
|
||||
crate. (`*_regex` correspond to benchmarks for this library while `*_glob`
|
||||
correspond to benchmarks for the `glob` library.)
|
||||
Optimizations in the `regex` crate may propel this library past `glob`,
|
||||
particularly when matching longer paths.
|
||||
|
||||
```
|
||||
test ext_glob ... bench: 425 ns/iter (+/- 21)
|
||||
test ext_regex ... bench: 175 ns/iter (+/- 10)
|
||||
test long_glob ... bench: 182 ns/iter (+/- 11)
|
||||
test long_regex ... bench: 173 ns/iter (+/- 10)
|
||||
test short_glob ... bench: 69 ns/iter (+/- 4)
|
||||
test short_regex ... bench: 83 ns/iter (+/- 2)
|
||||
```
|
||||
|
||||
The primary performance advantage of this crate is when matching multiple
|
||||
globs against a single path. With the `glob` crate, one must match each glob
|
||||
synchronously, one after the other. In this crate, many can be matched
|
||||
simultaneously. For example:
|
||||
|
||||
```
|
||||
test many_short_glob ... bench: 1,063 ns/iter (+/- 47)
|
||||
test many_short_regex_set ... bench: 186 ns/iter (+/- 11)
|
||||
```
|
||||
|
||||
### Comparison with the [`glob`](https://github.com/rust-lang-nursery/glob) crate
|
||||
|
||||
* Supports alternate "or" globs, e.g., `*.{foo,bar}`.
|
||||
* Can match non-UTF-8 file paths correctly.
|
||||
* Supports matching multiple globs at once.
|
||||
* Doesn't provide a recursive directory iterator of matching file paths,
|
||||
although I believe this crate should grow one eventually.
|
||||
* Supports case insensitive and require-literal-separator match options, but
|
||||
**doesn't** support the require-literal-leading-dot option.
|
24
crates/globset/UNLICENSE
Normal file
24
crates/globset/UNLICENSE
Normal file
@@ -0,0 +1,24 @@
|
||||
This is free and unencumbered software released into the public domain.
|
||||
|
||||
Anyone is free to copy, modify, publish, use, compile, sell, or
|
||||
distribute this software, either in source code form or as a compiled
|
||||
binary, for any purpose, commercial or non-commercial, and by any
|
||||
means.
|
||||
|
||||
In jurisdictions that recognize copyright laws, the author or authors
|
||||
of this software dedicate any and all copyright interest in the
|
||||
software to the public domain. We make this dedication for the benefit
|
||||
of the public at large and to the detriment of our heirs and
|
||||
successors. We intend this dedication to be an overt act of
|
||||
relinquishment in perpetuity of all present and future rights to this
|
||||
software under copyright law.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
For more information, please refer to <http://unlicense.org/>
|
116
crates/globset/benches/bench.rs
Normal file
116
crates/globset/benches/bench.rs
Normal file
@@ -0,0 +1,116 @@
|
||||
/*!
|
||||
This module benchmarks the glob implementation. For benchmarks on the ripgrep
|
||||
tool itself, see the benchsuite directory.
|
||||
*/
|
||||
#![feature(test)]
|
||||
|
||||
extern crate glob;
|
||||
extern crate globset;
|
||||
extern crate regex;
|
||||
extern crate test;
|
||||
|
||||
use globset::{Candidate, Glob, GlobMatcher, GlobSet, GlobSetBuilder};
|
||||
|
||||
const EXT: &'static str = "some/a/bigger/path/to/the/crazy/needle.txt";
|
||||
const EXT_PAT: &'static str = "*.txt";
|
||||
|
||||
const SHORT: &'static str = "some/needle.txt";
|
||||
const SHORT_PAT: &'static str = "some/**/needle.txt";
|
||||
|
||||
const LONG: &'static str = "some/a/bigger/path/to/the/crazy/needle.txt";
|
||||
const LONG_PAT: &'static str = "some/**/needle.txt";
|
||||
|
||||
fn new_glob(pat: &str) -> glob::Pattern {
|
||||
glob::Pattern::new(pat).unwrap()
|
||||
}
|
||||
|
||||
fn new_reglob(pat: &str) -> GlobMatcher {
|
||||
Glob::new(pat).unwrap().compile_matcher()
|
||||
}
|
||||
|
||||
fn new_reglob_many(pats: &[&str]) -> GlobSet {
|
||||
let mut builder = GlobSetBuilder::new();
|
||||
for pat in pats {
|
||||
builder.add(Glob::new(pat).unwrap());
|
||||
}
|
||||
builder.build().unwrap()
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn ext_glob(b: &mut test::Bencher) {
|
||||
let pat = new_glob(EXT_PAT);
|
||||
b.iter(|| assert!(pat.matches(EXT)));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn ext_regex(b: &mut test::Bencher) {
|
||||
let set = new_reglob(EXT_PAT);
|
||||
let cand = Candidate::new(EXT);
|
||||
b.iter(|| assert!(set.is_match_candidate(&cand)));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn short_glob(b: &mut test::Bencher) {
|
||||
let pat = new_glob(SHORT_PAT);
|
||||
b.iter(|| assert!(pat.matches(SHORT)));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn short_regex(b: &mut test::Bencher) {
|
||||
let set = new_reglob(SHORT_PAT);
|
||||
let cand = Candidate::new(SHORT);
|
||||
b.iter(|| assert!(set.is_match_candidate(&cand)));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn long_glob(b: &mut test::Bencher) {
|
||||
let pat = new_glob(LONG_PAT);
|
||||
b.iter(|| assert!(pat.matches(LONG)));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn long_regex(b: &mut test::Bencher) {
|
||||
let set = new_reglob(LONG_PAT);
|
||||
let cand = Candidate::new(LONG);
|
||||
b.iter(|| assert!(set.is_match_candidate(&cand)));
|
||||
}
|
||||
|
||||
const MANY_SHORT_GLOBS: &'static [&'static str] = &[
|
||||
// Taken from a random .gitignore on my system.
|
||||
".*.swp",
|
||||
"tags",
|
||||
"target",
|
||||
"*.lock",
|
||||
"tmp",
|
||||
"*.csv",
|
||||
"*.fst",
|
||||
"*-got",
|
||||
"*.csv.idx",
|
||||
"words",
|
||||
"98m*",
|
||||
"dict",
|
||||
"test",
|
||||
"months",
|
||||
];
|
||||
|
||||
const MANY_SHORT_SEARCH: &'static str = "98m-blah.csv.idx";
|
||||
|
||||
#[bench]
|
||||
fn many_short_glob(b: &mut test::Bencher) {
|
||||
let pats: Vec<_> = MANY_SHORT_GLOBS.iter().map(|&s| new_glob(s)).collect();
|
||||
b.iter(|| {
|
||||
let mut count = 0;
|
||||
for pat in &pats {
|
||||
if pat.matches(MANY_SHORT_SEARCH) {
|
||||
count += 1;
|
||||
}
|
||||
}
|
||||
assert_eq!(2, count);
|
||||
})
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn many_short_regex_set(b: &mut test::Bencher) {
|
||||
let set = new_reglob_many(MANY_SHORT_GLOBS);
|
||||
b.iter(|| assert_eq!(2, set.matches(MANY_SHORT_SEARCH).iter().count()));
|
||||
}
|
1521
crates/globset/src/glob.rs
Normal file
1521
crates/globset/src/glob.rs
Normal file
File diff suppressed because it is too large
Load Diff
860
crates/globset/src/lib.rs
Normal file
860
crates/globset/src/lib.rs
Normal file
@@ -0,0 +1,860 @@
|
||||
/*!
|
||||
The globset crate provides cross platform single glob and glob set matching.
|
||||
|
||||
Glob set matching is the process of matching one or more glob patterns against
|
||||
a single candidate path simultaneously, and returning all of the globs that
|
||||
matched. For example, given this set of globs:
|
||||
|
||||
```ignore
|
||||
*.rs
|
||||
src/lib.rs
|
||||
src/**/foo.rs
|
||||
```
|
||||
|
||||
and a path `src/bar/baz/foo.rs`, then the set would report the first and third
|
||||
globs as matching.
|
||||
|
||||
# Example: one glob
|
||||
|
||||
This example shows how to match a single glob against a single file path.
|
||||
|
||||
```
|
||||
# fn example() -> Result<(), globset::Error> {
|
||||
use globset::Glob;
|
||||
|
||||
let glob = Glob::new("*.rs")?.compile_matcher();
|
||||
|
||||
assert!(glob.is_match("foo.rs"));
|
||||
assert!(glob.is_match("foo/bar.rs"));
|
||||
assert!(!glob.is_match("Cargo.toml"));
|
||||
# Ok(()) } example().unwrap();
|
||||
```
|
||||
|
||||
# Example: configuring a glob matcher
|
||||
|
||||
This example shows how to use a `GlobBuilder` to configure aspects of match
|
||||
semantics. In this example, we prevent wildcards from matching path separators.
|
||||
|
||||
```
|
||||
# fn example() -> Result<(), globset::Error> {
|
||||
use globset::GlobBuilder;
|
||||
|
||||
let glob = GlobBuilder::new("*.rs")
|
||||
.literal_separator(true).build()?.compile_matcher();
|
||||
|
||||
assert!(glob.is_match("foo.rs"));
|
||||
assert!(!glob.is_match("foo/bar.rs")); // no longer matches
|
||||
assert!(!glob.is_match("Cargo.toml"));
|
||||
# Ok(()) } example().unwrap();
|
||||
```
|
||||
|
||||
# Example: match multiple globs at once
|
||||
|
||||
This example shows how to match multiple glob patterns at once.
|
||||
|
||||
```
|
||||
# fn example() -> Result<(), globset::Error> {
|
||||
use globset::{Glob, GlobSetBuilder};
|
||||
|
||||
let mut builder = GlobSetBuilder::new();
|
||||
// A GlobBuilder can be used to configure each glob's match semantics
|
||||
// independently.
|
||||
builder.add(Glob::new("*.rs")?);
|
||||
builder.add(Glob::new("src/lib.rs")?);
|
||||
builder.add(Glob::new("src/**/foo.rs")?);
|
||||
let set = builder.build()?;
|
||||
|
||||
assert_eq!(set.matches("src/bar/baz/foo.rs"), vec![0, 2]);
|
||||
# Ok(()) } example().unwrap();
|
||||
```
|
||||
|
||||
# Syntax
|
||||
|
||||
Standard Unix-style glob syntax is supported:
|
||||
|
||||
* `?` matches any single character. (If the `literal_separator` option is
|
||||
enabled, then `?` can never match a path separator.)
|
||||
* `*` matches zero or more characters. (If the `literal_separator` option is
|
||||
enabled, then `*` can never match a path separator.)
|
||||
* `**` recursively matches directories but are only legal in three situations.
|
||||
First, if the glob starts with <code>\*\*/</code>, then it matches
|
||||
all directories. For example, <code>\*\*/foo</code> matches `foo`
|
||||
and `bar/foo` but not `foo/bar`. Secondly, if the glob ends with
|
||||
<code>/\*\*</code>, then it matches all sub-entries. For example,
|
||||
<code>foo/\*\*</code> matches `foo/a` and `foo/a/b`, but not `foo`.
|
||||
Thirdly, if the glob contains <code>/\*\*/</code> anywhere within
|
||||
the pattern, then it matches zero or more directories. Using `**` anywhere
|
||||
else is illegal (N.B. the glob `**` is allowed and means "match everything").
|
||||
* `{a,b}` matches `a` or `b` where `a` and `b` are arbitrary glob patterns.
|
||||
(N.B. Nesting `{...}` is not currently allowed.)
|
||||
* `[ab]` matches `a` or `b` where `a` and `b` are characters. Use
|
||||
`[!ab]` to match any character except for `a` and `b`.
|
||||
* Metacharacters such as `*` and `?` can be escaped with character class
|
||||
notation. e.g., `[*]` matches `*`.
|
||||
* When backslash escapes are enabled, a backslash (`\`) will escape all meta
|
||||
characters in a glob. If it precedes a non-meta character, then the slash is
|
||||
ignored. A `\\` will match a literal `\\`. Note that this mode is only
|
||||
enabled on Unix platforms by default, but can be enabled on any platform
|
||||
via the `backslash_escape` setting on `Glob`.
|
||||
|
||||
A `GlobBuilder` can be used to prevent wildcards from matching path separators,
|
||||
or to enable case insensitive matching.
|
||||
*/
|
||||
|
||||
#![deny(missing_docs)]
|
||||
|
||||
extern crate aho_corasick;
|
||||
extern crate bstr;
|
||||
extern crate fnv;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate regex;
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::error::Error as StdError;
|
||||
use std::fmt;
|
||||
use std::hash;
|
||||
use std::path::Path;
|
||||
use std::str;
|
||||
|
||||
use aho_corasick::AhoCorasick;
|
||||
use bstr::{ByteSlice, ByteVec, B};
|
||||
use regex::bytes::{Regex, RegexBuilder, RegexSet};
|
||||
|
||||
use glob::MatchStrategy;
|
||||
pub use glob::{Glob, GlobBuilder, GlobMatcher};
|
||||
use pathutil::{file_name, file_name_ext, normalize_path};
|
||||
|
||||
mod glob;
|
||||
mod pathutil;
|
||||
|
||||
/// Represents an error that can occur when parsing a glob pattern.
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct Error {
|
||||
/// The original glob provided by the caller.
|
||||
glob: Option<String>,
|
||||
/// The kind of error.
|
||||
kind: ErrorKind,
|
||||
}
|
||||
|
||||
/// The kind of error that can occur when parsing a glob pattern.
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub enum ErrorKind {
|
||||
/// **DEPRECATED**.
|
||||
///
|
||||
/// This error used to occur for consistency with git's glob specification,
|
||||
/// but the specification now accepts all uses of `**`. When `**` does not
|
||||
/// appear adjacent to a path separator or at the beginning/end of a glob,
|
||||
/// it is now treated as two consecutive `*` patterns. As such, this error
|
||||
/// is no longer used.
|
||||
InvalidRecursive,
|
||||
/// Occurs when a character class (e.g., `[abc]`) is not closed.
|
||||
UnclosedClass,
|
||||
/// Occurs when a range in a character (e.g., `[a-z]`) is invalid. For
|
||||
/// example, if the range starts with a lexicographically larger character
|
||||
/// than it ends with.
|
||||
InvalidRange(char, char),
|
||||
/// Occurs when a `}` is found without a matching `{`.
|
||||
UnopenedAlternates,
|
||||
/// Occurs when a `{` is found without a matching `}`.
|
||||
UnclosedAlternates,
|
||||
/// Occurs when an alternating group is nested inside another alternating
|
||||
/// group, e.g., `{{a,b},{c,d}}`.
|
||||
NestedAlternates,
|
||||
/// Occurs when an unescaped '\' is found at the end of a glob.
|
||||
DanglingEscape,
|
||||
/// An error associated with parsing or compiling a regex.
|
||||
Regex(String),
|
||||
/// Hints that destructuring should not be exhaustive.
|
||||
///
|
||||
/// This enum may grow additional variants, so this makes sure clients
|
||||
/// don't count on exhaustive matching. (Otherwise, adding a new variant
|
||||
/// could break existing code.)
|
||||
#[doc(hidden)]
|
||||
__Nonexhaustive,
|
||||
}
|
||||
|
||||
impl StdError for Error {
|
||||
fn description(&self) -> &str {
|
||||
self.kind.description()
|
||||
}
|
||||
}
|
||||
|
||||
impl Error {
|
||||
/// Return the glob that caused this error, if one exists.
|
||||
pub fn glob(&self) -> Option<&str> {
|
||||
self.glob.as_ref().map(|s| &**s)
|
||||
}
|
||||
|
||||
/// Return the kind of this error.
|
||||
pub fn kind(&self) -> &ErrorKind {
|
||||
&self.kind
|
||||
}
|
||||
}
|
||||
|
||||
impl ErrorKind {
|
||||
fn description(&self) -> &str {
|
||||
match *self {
|
||||
ErrorKind::InvalidRecursive => {
|
||||
"invalid use of **; must be one path component"
|
||||
}
|
||||
ErrorKind::UnclosedClass => {
|
||||
"unclosed character class; missing ']'"
|
||||
}
|
||||
ErrorKind::InvalidRange(_, _) => "invalid character range",
|
||||
ErrorKind::UnopenedAlternates => {
|
||||
"unopened alternate group; missing '{' \
|
||||
(maybe escape '}' with '[}]'?)"
|
||||
}
|
||||
ErrorKind::UnclosedAlternates => {
|
||||
"unclosed alternate group; missing '}' \
|
||||
(maybe escape '{' with '[{]'?)"
|
||||
}
|
||||
ErrorKind::NestedAlternates => {
|
||||
"nested alternate groups are not allowed"
|
||||
}
|
||||
ErrorKind::DanglingEscape => "dangling '\\'",
|
||||
ErrorKind::Regex(ref err) => err,
|
||||
ErrorKind::__Nonexhaustive => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self.glob {
|
||||
None => self.kind.fmt(f),
|
||||
Some(ref glob) => {
|
||||
write!(f, "error parsing glob '{}': {}", glob, self.kind)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ErrorKind {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match *self {
|
||||
ErrorKind::InvalidRecursive
|
||||
| ErrorKind::UnclosedClass
|
||||
| ErrorKind::UnopenedAlternates
|
||||
| ErrorKind::UnclosedAlternates
|
||||
| ErrorKind::NestedAlternates
|
||||
| ErrorKind::DanglingEscape
|
||||
| ErrorKind::Regex(_) => write!(f, "{}", self.description()),
|
||||
ErrorKind::InvalidRange(s, e) => {
|
||||
write!(f, "invalid range; '{}' > '{}'", s, e)
|
||||
}
|
||||
ErrorKind::__Nonexhaustive => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn new_regex(pat: &str) -> Result<Regex, Error> {
|
||||
RegexBuilder::new(pat)
|
||||
.dot_matches_new_line(true)
|
||||
.size_limit(10 * (1 << 20))
|
||||
.dfa_size_limit(10 * (1 << 20))
|
||||
.build()
|
||||
.map_err(|err| Error {
|
||||
glob: Some(pat.to_string()),
|
||||
kind: ErrorKind::Regex(err.to_string()),
|
||||
})
|
||||
}
|
||||
|
||||
fn new_regex_set<I, S>(pats: I) -> Result<RegexSet, Error>
|
||||
where
|
||||
S: AsRef<str>,
|
||||
I: IntoIterator<Item = S>,
|
||||
{
|
||||
RegexSet::new(pats).map_err(|err| Error {
|
||||
glob: None,
|
||||
kind: ErrorKind::Regex(err.to_string()),
|
||||
})
|
||||
}
|
||||
|
||||
type Fnv = hash::BuildHasherDefault<fnv::FnvHasher>;
|
||||
|
||||
/// GlobSet represents a group of globs that can be matched together in a
|
||||
/// single pass.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct GlobSet {
|
||||
len: usize,
|
||||
strats: Vec<GlobSetMatchStrategy>,
|
||||
}
|
||||
|
||||
impl GlobSet {
|
||||
/// Create an empty `GlobSet`. An empty set matches nothing.
|
||||
#[inline]
|
||||
pub fn empty() -> GlobSet {
|
||||
GlobSet { len: 0, strats: vec![] }
|
||||
}
|
||||
|
||||
/// Returns true if this set is empty, and therefore matches nothing.
|
||||
#[inline]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len == 0
|
||||
}
|
||||
|
||||
/// Returns the number of globs in this set.
|
||||
#[inline]
|
||||
pub fn len(&self) -> usize {
|
||||
self.len
|
||||
}
|
||||
|
||||
/// Returns true if any glob in this set matches the path given.
|
||||
pub fn is_match<P: AsRef<Path>>(&self, path: P) -> bool {
|
||||
self.is_match_candidate(&Candidate::new(path.as_ref()))
|
||||
}
|
||||
|
||||
/// Returns true if any glob in this set matches the path given.
|
||||
///
|
||||
/// This takes a Candidate as input, which can be used to amortize the
|
||||
/// cost of preparing a path for matching.
|
||||
pub fn is_match_candidate(&self, path: &Candidate) -> bool {
|
||||
if self.is_empty() {
|
||||
return false;
|
||||
}
|
||||
for strat in &self.strats {
|
||||
if strat.is_match(path) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Returns the sequence number of every glob pattern that matches the
|
||||
/// given path.
|
||||
pub fn matches<P: AsRef<Path>>(&self, path: P) -> Vec<usize> {
|
||||
self.matches_candidate(&Candidate::new(path.as_ref()))
|
||||
}
|
||||
|
||||
/// Returns the sequence number of every glob pattern that matches the
|
||||
/// given path.
|
||||
///
|
||||
/// This takes a Candidate as input, which can be used to amortize the
|
||||
/// cost of preparing a path for matching.
|
||||
pub fn matches_candidate(&self, path: &Candidate) -> Vec<usize> {
|
||||
let mut into = vec![];
|
||||
if self.is_empty() {
|
||||
return into;
|
||||
}
|
||||
self.matches_candidate_into(path, &mut into);
|
||||
into
|
||||
}
|
||||
|
||||
/// Adds the sequence number of every glob pattern that matches the given
|
||||
/// path to the vec given.
|
||||
///
|
||||
/// `into` is is cleared before matching begins, and contains the set of
|
||||
/// sequence numbers (in ascending order) after matching ends. If no globs
|
||||
/// were matched, then `into` will be empty.
|
||||
pub fn matches_into<P: AsRef<Path>>(
|
||||
&self,
|
||||
path: P,
|
||||
into: &mut Vec<usize>,
|
||||
) {
|
||||
self.matches_candidate_into(&Candidate::new(path.as_ref()), into);
|
||||
}
|
||||
|
||||
/// Adds the sequence number of every glob pattern that matches the given
|
||||
/// path to the vec given.
|
||||
///
|
||||
/// `into` is is cleared before matching begins, and contains the set of
|
||||
/// sequence numbers (in ascending order) after matching ends. If no globs
|
||||
/// were matched, then `into` will be empty.
|
||||
///
|
||||
/// This takes a Candidate as input, which can be used to amortize the
|
||||
/// cost of preparing a path for matching.
|
||||
pub fn matches_candidate_into(
|
||||
&self,
|
||||
path: &Candidate,
|
||||
into: &mut Vec<usize>,
|
||||
) {
|
||||
into.clear();
|
||||
if self.is_empty() {
|
||||
return;
|
||||
}
|
||||
for strat in &self.strats {
|
||||
strat.matches_into(path, into);
|
||||
}
|
||||
into.sort();
|
||||
into.dedup();
|
||||
}
|
||||
|
||||
fn new(pats: &[Glob]) -> Result<GlobSet, Error> {
|
||||
if pats.is_empty() {
|
||||
return Ok(GlobSet { len: 0, strats: vec![] });
|
||||
}
|
||||
let mut lits = LiteralStrategy::new();
|
||||
let mut base_lits = BasenameLiteralStrategy::new();
|
||||
let mut exts = ExtensionStrategy::new();
|
||||
let mut prefixes = MultiStrategyBuilder::new();
|
||||
let mut suffixes = MultiStrategyBuilder::new();
|
||||
let mut required_exts = RequiredExtensionStrategyBuilder::new();
|
||||
let mut regexes = MultiStrategyBuilder::new();
|
||||
for (i, p) in pats.iter().enumerate() {
|
||||
match MatchStrategy::new(p) {
|
||||
MatchStrategy::Literal(lit) => {
|
||||
lits.add(i, lit);
|
||||
}
|
||||
MatchStrategy::BasenameLiteral(lit) => {
|
||||
base_lits.add(i, lit);
|
||||
}
|
||||
MatchStrategy::Extension(ext) => {
|
||||
exts.add(i, ext);
|
||||
}
|
||||
MatchStrategy::Prefix(prefix) => {
|
||||
prefixes.add(i, prefix);
|
||||
}
|
||||
MatchStrategy::Suffix { suffix, component } => {
|
||||
if component {
|
||||
lits.add(i, suffix[1..].to_string());
|
||||
}
|
||||
suffixes.add(i, suffix);
|
||||
}
|
||||
MatchStrategy::RequiredExtension(ext) => {
|
||||
required_exts.add(i, ext, p.regex().to_owned());
|
||||
}
|
||||
MatchStrategy::Regex => {
|
||||
debug!("glob converted to regex: {:?}", p);
|
||||
regexes.add(i, p.regex().to_owned());
|
||||
}
|
||||
}
|
||||
}
|
||||
debug!(
|
||||
"built glob set; {} literals, {} basenames, {} extensions, \
|
||||
{} prefixes, {} suffixes, {} required extensions, {} regexes",
|
||||
lits.0.len(),
|
||||
base_lits.0.len(),
|
||||
exts.0.len(),
|
||||
prefixes.literals.len(),
|
||||
suffixes.literals.len(),
|
||||
required_exts.0.len(),
|
||||
regexes.literals.len()
|
||||
);
|
||||
Ok(GlobSet {
|
||||
len: pats.len(),
|
||||
strats: vec![
|
||||
GlobSetMatchStrategy::Extension(exts),
|
||||
GlobSetMatchStrategy::BasenameLiteral(base_lits),
|
||||
GlobSetMatchStrategy::Literal(lits),
|
||||
GlobSetMatchStrategy::Suffix(suffixes.suffix()),
|
||||
GlobSetMatchStrategy::Prefix(prefixes.prefix()),
|
||||
GlobSetMatchStrategy::RequiredExtension(
|
||||
required_exts.build()?,
|
||||
),
|
||||
GlobSetMatchStrategy::Regex(regexes.regex_set()?),
|
||||
],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// GlobSetBuilder builds a group of patterns that can be used to
|
||||
/// simultaneously match a file path.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct GlobSetBuilder {
|
||||
pats: Vec<Glob>,
|
||||
}
|
||||
|
||||
impl GlobSetBuilder {
|
||||
/// Create a new GlobSetBuilder. A GlobSetBuilder can be used to add new
|
||||
/// patterns. Once all patterns have been added, `build` should be called
|
||||
/// to produce a `GlobSet`, which can then be used for matching.
|
||||
pub fn new() -> GlobSetBuilder {
|
||||
GlobSetBuilder { pats: vec![] }
|
||||
}
|
||||
|
||||
/// Builds a new matcher from all of the glob patterns added so far.
|
||||
///
|
||||
/// Once a matcher is built, no new patterns can be added to it.
|
||||
pub fn build(&self) -> Result<GlobSet, Error> {
|
||||
GlobSet::new(&self.pats)
|
||||
}
|
||||
|
||||
/// Add a new pattern to this set.
|
||||
pub fn add(&mut self, pat: Glob) -> &mut GlobSetBuilder {
|
||||
self.pats.push(pat);
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// A candidate path for matching.
|
||||
///
|
||||
/// All glob matching in this crate operates on `Candidate` values.
|
||||
/// Constructing candidates has a very small cost associated with it, so
|
||||
/// callers may find it beneficial to amortize that cost when matching a single
|
||||
/// path against multiple globs or sets of globs.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Candidate<'a> {
|
||||
path: Cow<'a, [u8]>,
|
||||
basename: Cow<'a, [u8]>,
|
||||
ext: Cow<'a, [u8]>,
|
||||
}
|
||||
|
||||
impl<'a> Candidate<'a> {
|
||||
/// Create a new candidate for matching from the given path.
|
||||
pub fn new<P: AsRef<Path> + ?Sized>(path: &'a P) -> Candidate<'a> {
|
||||
let path = normalize_path(Vec::from_path_lossy(path.as_ref()));
|
||||
let basename = file_name(&path).unwrap_or(Cow::Borrowed(B("")));
|
||||
let ext = file_name_ext(&basename).unwrap_or(Cow::Borrowed(B("")));
|
||||
Candidate { path: path, basename: basename, ext: ext }
|
||||
}
|
||||
|
||||
fn path_prefix(&self, max: usize) -> &[u8] {
|
||||
if self.path.len() <= max {
|
||||
&*self.path
|
||||
} else {
|
||||
&self.path[..max]
|
||||
}
|
||||
}
|
||||
|
||||
fn path_suffix(&self, max: usize) -> &[u8] {
|
||||
if self.path.len() <= max {
|
||||
&*self.path
|
||||
} else {
|
||||
&self.path[self.path.len() - max..]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
enum GlobSetMatchStrategy {
|
||||
Literal(LiteralStrategy),
|
||||
BasenameLiteral(BasenameLiteralStrategy),
|
||||
Extension(ExtensionStrategy),
|
||||
Prefix(PrefixStrategy),
|
||||
Suffix(SuffixStrategy),
|
||||
RequiredExtension(RequiredExtensionStrategy),
|
||||
Regex(RegexSetStrategy),
|
||||
}
|
||||
|
||||
impl GlobSetMatchStrategy {
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
use self::GlobSetMatchStrategy::*;
|
||||
match *self {
|
||||
Literal(ref s) => s.is_match(candidate),
|
||||
BasenameLiteral(ref s) => s.is_match(candidate),
|
||||
Extension(ref s) => s.is_match(candidate),
|
||||
Prefix(ref s) => s.is_match(candidate),
|
||||
Suffix(ref s) => s.is_match(candidate),
|
||||
RequiredExtension(ref s) => s.is_match(candidate),
|
||||
Regex(ref s) => s.is_match(candidate),
|
||||
}
|
||||
}
|
||||
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
use self::GlobSetMatchStrategy::*;
|
||||
match *self {
|
||||
Literal(ref s) => s.matches_into(candidate, matches),
|
||||
BasenameLiteral(ref s) => s.matches_into(candidate, matches),
|
||||
Extension(ref s) => s.matches_into(candidate, matches),
|
||||
Prefix(ref s) => s.matches_into(candidate, matches),
|
||||
Suffix(ref s) => s.matches_into(candidate, matches),
|
||||
RequiredExtension(ref s) => s.matches_into(candidate, matches),
|
||||
Regex(ref s) => s.matches_into(candidate, matches),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct LiteralStrategy(BTreeMap<Vec<u8>, Vec<usize>>);
|
||||
|
||||
impl LiteralStrategy {
|
||||
fn new() -> LiteralStrategy {
|
||||
LiteralStrategy(BTreeMap::new())
|
||||
}
|
||||
|
||||
fn add(&mut self, global_index: usize, lit: String) {
|
||||
self.0.entry(lit.into_bytes()).or_insert(vec![]).push(global_index);
|
||||
}
|
||||
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
self.0.contains_key(candidate.path.as_bytes())
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
if let Some(hits) = self.0.get(candidate.path.as_bytes()) {
|
||||
matches.extend(hits);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct BasenameLiteralStrategy(BTreeMap<Vec<u8>, Vec<usize>>);
|
||||
|
||||
impl BasenameLiteralStrategy {
|
||||
fn new() -> BasenameLiteralStrategy {
|
||||
BasenameLiteralStrategy(BTreeMap::new())
|
||||
}
|
||||
|
||||
fn add(&mut self, global_index: usize, lit: String) {
|
||||
self.0.entry(lit.into_bytes()).or_insert(vec![]).push(global_index);
|
||||
}
|
||||
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
if candidate.basename.is_empty() {
|
||||
return false;
|
||||
}
|
||||
self.0.contains_key(candidate.basename.as_bytes())
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
if candidate.basename.is_empty() {
|
||||
return;
|
||||
}
|
||||
if let Some(hits) = self.0.get(candidate.basename.as_bytes()) {
|
||||
matches.extend(hits);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct ExtensionStrategy(HashMap<Vec<u8>, Vec<usize>, Fnv>);
|
||||
|
||||
impl ExtensionStrategy {
|
||||
fn new() -> ExtensionStrategy {
|
||||
ExtensionStrategy(HashMap::with_hasher(Fnv::default()))
|
||||
}
|
||||
|
||||
fn add(&mut self, global_index: usize, ext: String) {
|
||||
self.0.entry(ext.into_bytes()).or_insert(vec![]).push(global_index);
|
||||
}
|
||||
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
if candidate.ext.is_empty() {
|
||||
return false;
|
||||
}
|
||||
self.0.contains_key(candidate.ext.as_bytes())
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
if candidate.ext.is_empty() {
|
||||
return;
|
||||
}
|
||||
if let Some(hits) = self.0.get(candidate.ext.as_bytes()) {
|
||||
matches.extend(hits);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct PrefixStrategy {
|
||||
matcher: AhoCorasick,
|
||||
map: Vec<usize>,
|
||||
longest: usize,
|
||||
}
|
||||
|
||||
impl PrefixStrategy {
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
let path = candidate.path_prefix(self.longest);
|
||||
for m in self.matcher.find_overlapping_iter(path) {
|
||||
if m.start() == 0 {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
let path = candidate.path_prefix(self.longest);
|
||||
for m in self.matcher.find_overlapping_iter(path) {
|
||||
if m.start() == 0 {
|
||||
matches.push(self.map[m.pattern()]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct SuffixStrategy {
|
||||
matcher: AhoCorasick,
|
||||
map: Vec<usize>,
|
||||
longest: usize,
|
||||
}
|
||||
|
||||
impl SuffixStrategy {
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
let path = candidate.path_suffix(self.longest);
|
||||
for m in self.matcher.find_overlapping_iter(path) {
|
||||
if m.end() == path.len() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
let path = candidate.path_suffix(self.longest);
|
||||
for m in self.matcher.find_overlapping_iter(path) {
|
||||
if m.end() == path.len() {
|
||||
matches.push(self.map[m.pattern()]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct RequiredExtensionStrategy(HashMap<Vec<u8>, Vec<(usize, Regex)>, Fnv>);
|
||||
|
||||
impl RequiredExtensionStrategy {
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
if candidate.ext.is_empty() {
|
||||
return false;
|
||||
}
|
||||
match self.0.get(candidate.ext.as_bytes()) {
|
||||
None => false,
|
||||
Some(regexes) => {
|
||||
for &(_, ref re) in regexes {
|
||||
if re.is_match(candidate.path.as_bytes()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
if candidate.ext.is_empty() {
|
||||
return;
|
||||
}
|
||||
if let Some(regexes) = self.0.get(candidate.ext.as_bytes()) {
|
||||
for &(global_index, ref re) in regexes {
|
||||
if re.is_match(candidate.path.as_bytes()) {
|
||||
matches.push(global_index);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct RegexSetStrategy {
|
||||
matcher: RegexSet,
|
||||
map: Vec<usize>,
|
||||
}
|
||||
|
||||
impl RegexSetStrategy {
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
self.matcher.is_match(candidate.path.as_bytes())
|
||||
}
|
||||
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
for i in self.matcher.matches(candidate.path.as_bytes()) {
|
||||
matches.push(self.map[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct MultiStrategyBuilder {
|
||||
literals: Vec<String>,
|
||||
map: Vec<usize>,
|
||||
longest: usize,
|
||||
}
|
||||
|
||||
impl MultiStrategyBuilder {
|
||||
fn new() -> MultiStrategyBuilder {
|
||||
MultiStrategyBuilder { literals: vec![], map: vec![], longest: 0 }
|
||||
}
|
||||
|
||||
fn add(&mut self, global_index: usize, literal: String) {
|
||||
if literal.len() > self.longest {
|
||||
self.longest = literal.len();
|
||||
}
|
||||
self.map.push(global_index);
|
||||
self.literals.push(literal);
|
||||
}
|
||||
|
||||
fn prefix(self) -> PrefixStrategy {
|
||||
PrefixStrategy {
|
||||
matcher: AhoCorasick::new_auto_configured(&self.literals),
|
||||
map: self.map,
|
||||
longest: self.longest,
|
||||
}
|
||||
}
|
||||
|
||||
fn suffix(self) -> SuffixStrategy {
|
||||
SuffixStrategy {
|
||||
matcher: AhoCorasick::new_auto_configured(&self.literals),
|
||||
map: self.map,
|
||||
longest: self.longest,
|
||||
}
|
||||
}
|
||||
|
||||
fn regex_set(self) -> Result<RegexSetStrategy, Error> {
|
||||
Ok(RegexSetStrategy {
|
||||
matcher: new_regex_set(self.literals)?,
|
||||
map: self.map,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct RequiredExtensionStrategyBuilder(
|
||||
HashMap<Vec<u8>, Vec<(usize, String)>>,
|
||||
);
|
||||
|
||||
impl RequiredExtensionStrategyBuilder {
|
||||
fn new() -> RequiredExtensionStrategyBuilder {
|
||||
RequiredExtensionStrategyBuilder(HashMap::new())
|
||||
}
|
||||
|
||||
fn add(&mut self, global_index: usize, ext: String, regex: String) {
|
||||
self.0
|
||||
.entry(ext.into_bytes())
|
||||
.or_insert(vec![])
|
||||
.push((global_index, regex));
|
||||
}
|
||||
|
||||
fn build(self) -> Result<RequiredExtensionStrategy, Error> {
|
||||
let mut exts = HashMap::with_hasher(Fnv::default());
|
||||
for (ext, regexes) in self.0.into_iter() {
|
||||
exts.insert(ext.clone(), vec![]);
|
||||
for (global_index, regex) in regexes {
|
||||
let compiled = new_regex(®ex)?;
|
||||
exts.get_mut(&ext).unwrap().push((global_index, compiled));
|
||||
}
|
||||
}
|
||||
Ok(RequiredExtensionStrategy(exts))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::GlobSetBuilder;
|
||||
use glob::Glob;
|
||||
|
||||
#[test]
|
||||
fn set_works() {
|
||||
let mut builder = GlobSetBuilder::new();
|
||||
builder.add(Glob::new("src/**/*.rs").unwrap());
|
||||
builder.add(Glob::new("*.c").unwrap());
|
||||
builder.add(Glob::new("src/lib.rs").unwrap());
|
||||
let set = builder.build().unwrap();
|
||||
|
||||
assert!(set.is_match("foo.c"));
|
||||
assert!(set.is_match("src/foo.c"));
|
||||
assert!(!set.is_match("foo.rs"));
|
||||
assert!(!set.is_match("tests/foo.rs"));
|
||||
assert!(set.is_match("src/foo.rs"));
|
||||
assert!(set.is_match("src/grep/src/main.rs"));
|
||||
|
||||
let matches = set.matches("src/lib.rs");
|
||||
assert_eq!(2, matches.len());
|
||||
assert_eq!(0, matches[0]);
|
||||
assert_eq!(2, matches[1]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_set_works() {
|
||||
let set = GlobSetBuilder::new().build().unwrap();
|
||||
assert!(!set.is_match(""));
|
||||
assert!(!set.is_match("a"));
|
||||
}
|
||||
}
|
129
crates/globset/src/pathutil.rs
Normal file
129
crates/globset/src/pathutil.rs
Normal file
@@ -0,0 +1,129 @@
|
||||
use std::borrow::Cow;
|
||||
|
||||
use bstr::{ByteSlice, ByteVec};
|
||||
|
||||
/// The final component of the path, if it is a normal file.
|
||||
///
|
||||
/// If the path terminates in ., .., or consists solely of a root of prefix,
|
||||
/// file_name will return None.
|
||||
pub fn file_name<'a>(path: &Cow<'a, [u8]>) -> Option<Cow<'a, [u8]>> {
|
||||
if path.is_empty() {
|
||||
return None;
|
||||
} else if path.last_byte() == Some(b'.') {
|
||||
return None;
|
||||
}
|
||||
let last_slash = path.rfind_byte(b'/').map(|i| i + 1).unwrap_or(0);
|
||||
Some(match *path {
|
||||
Cow::Borrowed(path) => Cow::Borrowed(&path[last_slash..]),
|
||||
Cow::Owned(ref path) => {
|
||||
let mut path = path.clone();
|
||||
path.drain_bytes(..last_slash);
|
||||
Cow::Owned(path)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Return a file extension given a path's file name.
|
||||
///
|
||||
/// Note that this does NOT match the semantics of std::path::Path::extension.
|
||||
/// Namely, the extension includes the `.` and matching is otherwise more
|
||||
/// liberal. Specifically, the extenion is:
|
||||
///
|
||||
/// * None, if the file name given is empty;
|
||||
/// * None, if there is no embedded `.`;
|
||||
/// * Otherwise, the portion of the file name starting with the final `.`.
|
||||
///
|
||||
/// e.g., A file name of `.rs` has an extension `.rs`.
|
||||
///
|
||||
/// N.B. This is done to make certain glob match optimizations easier. Namely,
|
||||
/// a pattern like `*.rs` is obviously trying to match files with a `rs`
|
||||
/// extension, but it also matches files like `.rs`, which doesn't have an
|
||||
/// extension according to std::path::Path::extension.
|
||||
pub fn file_name_ext<'a>(name: &Cow<'a, [u8]>) -> Option<Cow<'a, [u8]>> {
|
||||
if name.is_empty() {
|
||||
return None;
|
||||
}
|
||||
let last_dot_at = match name.rfind_byte(b'.') {
|
||||
None => return None,
|
||||
Some(i) => i,
|
||||
};
|
||||
Some(match *name {
|
||||
Cow::Borrowed(name) => Cow::Borrowed(&name[last_dot_at..]),
|
||||
Cow::Owned(ref name) => {
|
||||
let mut name = name.clone();
|
||||
name.drain_bytes(..last_dot_at);
|
||||
Cow::Owned(name)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Normalizes a path to use `/` as a separator everywhere, even on platforms
|
||||
/// that recognize other characters as separators.
|
||||
#[cfg(unix)]
|
||||
pub fn normalize_path(path: Cow<[u8]>) -> Cow<[u8]> {
|
||||
// UNIX only uses /, so we're good.
|
||||
path
|
||||
}
|
||||
|
||||
/// Normalizes a path to use `/` as a separator everywhere, even on platforms
|
||||
/// that recognize other characters as separators.
|
||||
#[cfg(not(unix))]
|
||||
pub fn normalize_path(mut path: Cow<[u8]>) -> Cow<[u8]> {
|
||||
use std::path::is_separator;
|
||||
|
||||
for i in 0..path.len() {
|
||||
if path[i] == b'/' || !is_separator(path[i] as char) {
|
||||
continue;
|
||||
}
|
||||
path.to_mut()[i] = b'/';
|
||||
}
|
||||
path
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::borrow::Cow;
|
||||
|
||||
use bstr::{ByteVec, B};
|
||||
|
||||
use super::{file_name_ext, normalize_path};
|
||||
|
||||
macro_rules! ext {
|
||||
($name:ident, $file_name:expr, $ext:expr) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let bs = Vec::from($file_name);
|
||||
let got = file_name_ext(&Cow::Owned(bs));
|
||||
assert_eq!($ext.map(|s| Cow::Borrowed(B(s))), got);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
ext!(ext1, "foo.rs", Some(".rs"));
|
||||
ext!(ext2, ".rs", Some(".rs"));
|
||||
ext!(ext3, "..rs", Some(".rs"));
|
||||
ext!(ext4, "", None::<&str>);
|
||||
ext!(ext5, "foo", None::<&str>);
|
||||
|
||||
macro_rules! normalize {
|
||||
($name:ident, $path:expr, $expected:expr) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let bs = Vec::from_slice($path);
|
||||
let got = normalize_path(Cow::Owned(bs));
|
||||
assert_eq!($expected.to_vec(), got.into_owned());
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
normalize!(normal1, b"foo", b"foo");
|
||||
normalize!(normal2, b"foo/bar", b"foo/bar");
|
||||
#[cfg(unix)]
|
||||
normalize!(normal3, b"foo\\bar", b"foo\\bar");
|
||||
#[cfg(not(unix))]
|
||||
normalize!(normal3, b"foo\\bar", b"foo/bar");
|
||||
#[cfg(unix)]
|
||||
normalize!(normal4, b"foo\\bar/baz", b"foo\\bar/baz");
|
||||
#[cfg(not(unix))]
|
||||
normalize!(normal4, b"foo\\bar/baz", b"foo/bar/baz");
|
||||
}
|
Reference in New Issue
Block a user