mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-03 15:52:55 +00:00
Compare commits
5 Commits
kkoziara-p
...
remove-tru
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4fc8712f1a | ||
|
|
6e4fdfd4bf | ||
|
|
0519056bd8 | ||
|
|
7305ad575e | ||
|
|
79f64ac2f4 |
12
.github/FUNDING.yml
vendored
Normal file
12
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: fulmicoton
|
||||
patreon: # Replace with a single Patreon username
|
||||
open_collective: # Replace with a single Open Collective username
|
||||
ko_fi: # Replace with a single Ko-fi username
|
||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
liberapay: # Replace with a single Liberapay username
|
||||
issuehunt: # Replace with a single IssueHunt username
|
||||
otechie: # Replace with a single Otechie username
|
||||
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
||||
12
Cargo.toml
12
Cargo.toml
@@ -50,10 +50,10 @@ owned-read = "0.4"
|
||||
failure = "0.1"
|
||||
htmlescape = "0.3.1"
|
||||
fail = "0.3"
|
||||
scoped-pool = "1.0"
|
||||
murmurhash32 = "0.2"
|
||||
chrono = "0.4"
|
||||
smallvec = "0.6"
|
||||
smallvec = "1.0"
|
||||
rayon = "1"
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
winapi = "0.3"
|
||||
@@ -64,6 +64,10 @@ maplit = "1"
|
||||
matches = "0.1.8"
|
||||
time = "0.1.42"
|
||||
|
||||
[dev-dependencies.fail]
|
||||
version = "0.3"
|
||||
features = ["failpoints"]
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
debug = false
|
||||
@@ -87,10 +91,6 @@ members = ["query-grammar"]
|
||||
[badges]
|
||||
travis-ci = { repository = "tantivy-search/tantivy" }
|
||||
|
||||
[dev-dependencies.fail]
|
||||
version = "0.3"
|
||||
features = ["failpoints"]
|
||||
|
||||
# Following the "fail" crate best practises, we isolate
|
||||
# tests that define specific behavior in fail check points
|
||||
# in a different binary.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::Result;
|
||||
use crossbeam::channel;
|
||||
use scoped_pool::{Pool, ThreadConfig};
|
||||
use rayon::{ThreadPool, ThreadPoolBuilder};
|
||||
|
||||
/// Search executor whether search request are single thread or multithread.
|
||||
///
|
||||
@@ -11,7 +11,7 @@ use scoped_pool::{Pool, ThreadConfig};
|
||||
/// used by the client. Second, we may stop using rayon in the future.
|
||||
pub enum Executor {
|
||||
SingleThread,
|
||||
ThreadPool(Pool),
|
||||
ThreadPool(ThreadPool),
|
||||
}
|
||||
|
||||
impl Executor {
|
||||
@@ -21,10 +21,12 @@ impl Executor {
|
||||
}
|
||||
|
||||
// Creates an Executor that dispatches the tasks in a thread pool.
|
||||
pub fn multi_thread(num_threads: usize, prefix: &'static str) -> Executor {
|
||||
let thread_config = ThreadConfig::new().prefix(prefix);
|
||||
let pool = Pool::with_thread_config(num_threads, thread_config);
|
||||
Executor::ThreadPool(pool)
|
||||
pub fn multi_thread(num_threads: usize, prefix: &'static str) -> Result<Executor> {
|
||||
let pool = ThreadPoolBuilder::new()
|
||||
.num_threads(num_threads)
|
||||
.thread_name(move |num| format!("{}{}", prefix, num))
|
||||
.build()?;
|
||||
Ok(Executor::ThreadPool(pool))
|
||||
}
|
||||
|
||||
// Perform a map in the thread pool.
|
||||
@@ -48,9 +50,9 @@ impl Executor {
|
||||
let num_fruits = args_with_indices.len();
|
||||
let fruit_receiver = {
|
||||
let (fruit_sender, fruit_receiver) = channel::unbounded();
|
||||
pool.scoped(|scope| {
|
||||
pool.scope(|scope| {
|
||||
for arg_with_idx in args_with_indices {
|
||||
scope.execute(|| {
|
||||
scope.spawn(|_| {
|
||||
let (idx, arg) = arg_with_idx;
|
||||
let fruit = f(arg);
|
||||
if let Err(err) = fruit_sender.send((idx, fruit)) {
|
||||
@@ -103,6 +105,7 @@ mod tests {
|
||||
#[should_panic] //< unfortunately the panic message is not propagated
|
||||
fn test_panic_propagates_multi_thread() {
|
||||
let _result: Vec<usize> = Executor::multi_thread(1, "search-test")
|
||||
.unwrap()
|
||||
.map(
|
||||
|_| {
|
||||
panic!("panic should propagate");
|
||||
@@ -126,6 +129,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_map_multithread() {
|
||||
let result: Vec<usize> = Executor::multi_thread(3, "search-test")
|
||||
.unwrap()
|
||||
.map(|i| Ok(i * 2), 0..10)
|
||||
.unwrap();
|
||||
assert_eq!(result.len(), 10);
|
||||
|
||||
@@ -73,15 +73,16 @@ impl Index {
|
||||
|
||||
/// Replace the default single thread search executor pool
|
||||
/// by a thread pool with a given number of threads.
|
||||
pub fn set_multithread_executor(&mut self, num_threads: usize) {
|
||||
self.executor = Arc::new(Executor::multi_thread(num_threads, "thrd-tantivy-search-"));
|
||||
pub fn set_multithread_executor(&mut self, num_threads: usize) -> Result<()> {
|
||||
self.executor = Arc::new(Executor::multi_thread(num_threads, "thrd-tantivy-search-")?);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Replace the default single thread search executor pool
|
||||
/// by a thread pool with a given number of threads.
|
||||
pub fn set_default_multithread_executor(&mut self) {
|
||||
pub fn set_default_multithread_executor(&mut self) -> Result<()> {
|
||||
let default_num_threads = num_cpus::get();
|
||||
self.set_multithread_executor(default_num_threads);
|
||||
self.set_multithread_executor(default_num_threads)
|
||||
}
|
||||
|
||||
/// Creates a new index using the `RAMDirectory`.
|
||||
|
||||
@@ -170,3 +170,9 @@ impl From<serde_json::Error> for TantivyError {
|
||||
TantivyError::IOError(io_err.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<rayon::ThreadPoolBuildError> for TantivyError {
|
||||
fn from(error: rayon::ThreadPoolBuildError) -> TantivyError {
|
||||
TantivyError::SystemError(error.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -227,7 +227,8 @@ fn index_documents(
|
||||
delete_cursor,
|
||||
delete_bitset_opt,
|
||||
);
|
||||
Ok(segment_updater.add_segment(segment_entry))
|
||||
segment_updater.add_segment(segment_entry);
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn apply_deletes(
|
||||
|
||||
@@ -199,14 +199,12 @@ impl SegmentUpdater {
|
||||
self.0.pool.spawn_fn(move || Ok(f(me_clone)))
|
||||
}
|
||||
|
||||
pub fn add_segment(&self, segment_entry: SegmentEntry) -> bool {
|
||||
pub fn add_segment(&self, segment_entry: SegmentEntry) {
|
||||
self.run_async(|segment_updater| {
|
||||
segment_updater.0.segment_manager.add_segment(segment_entry);
|
||||
segment_updater.consider_merge_options();
|
||||
true
|
||||
})
|
||||
.forget();
|
||||
true
|
||||
}
|
||||
|
||||
/// Orders `SegmentManager` to remove all segments
|
||||
|
||||
@@ -170,6 +170,7 @@ impl SegmentWriter {
|
||||
if let Some(last_token) = tok_str.tokens.last() {
|
||||
total_offset += last_token.offset_to;
|
||||
}
|
||||
|
||||
token_streams
|
||||
.push(Box::new(PreTokenizedStream::from(tok_str.clone())));
|
||||
}
|
||||
|
||||
@@ -43,32 +43,29 @@ impl PreTokenizedStream {
|
||||
tok_strings: &'a [&'a PreTokenizedString],
|
||||
) -> Box<dyn TokenStream + 'a> {
|
||||
if tok_strings.len() == 1 {
|
||||
return Box::new(PreTokenizedStream::from((*tok_strings[0]).clone()));
|
||||
}
|
||||
let mut offsets = vec![];
|
||||
let mut total_offset = 0;
|
||||
for &tok_string in tok_strings {
|
||||
offsets.push(total_offset);
|
||||
if let Some(last_token) = tok_string.tokens.last() {
|
||||
total_offset += last_token.offset_to;
|
||||
Box::new(PreTokenizedStream::from((*tok_strings[0]).clone()))
|
||||
} else {
|
||||
let mut offsets = vec![];
|
||||
let mut total_offset = 0;
|
||||
for &tok_string in tok_strings {
|
||||
offsets.push(total_offset);
|
||||
if let Some(last_token) = tok_string.tokens.last() {
|
||||
total_offset += last_token.offset_to;
|
||||
}
|
||||
}
|
||||
let token_streams: Vec<_> = tok_strings
|
||||
.iter()
|
||||
.map(|tok_string| PreTokenizedStream::from((*tok_string).clone()))
|
||||
.collect();
|
||||
Box::new(TokenStreamChain::new(offsets, token_streams))
|
||||
}
|
||||
let token_streams: Vec<_> = tok_strings
|
||||
.iter()
|
||||
.map(|tok_string| PreTokenizedStream::from((*tok_string).clone()))
|
||||
.collect();
|
||||
Box::new(TokenStreamChain::new(offsets, token_streams))
|
||||
}
|
||||
}
|
||||
|
||||
impl TokenStream for PreTokenizedStream {
|
||||
fn advance(&mut self) -> bool {
|
||||
if self.current_token >= self.tokenized_string.tokens.len() as i64 - 1 {
|
||||
// This was our last token.
|
||||
return false;
|
||||
}
|
||||
self.current_token += 1;
|
||||
true
|
||||
self.current_token < self.tokenized_string.tokens.len() as i64
|
||||
}
|
||||
|
||||
fn token(&self) -> &Token {
|
||||
@@ -117,13 +114,13 @@ mod tests {
|
||||
],
|
||||
};
|
||||
|
||||
let mut tok_stream = PreTokenizedStream::from(tok_text.clone());
|
||||
let mut token_stream = PreTokenizedStream::from(tok_text.clone());
|
||||
|
||||
let mut i = 0;
|
||||
while tok_stream.advance() {
|
||||
assert!(*tok_stream.token() == tok_text.tokens[i]);
|
||||
i += 1;
|
||||
for expected_token in tok_text.tokens {
|
||||
assert!(token_stream.advance());
|
||||
assert_eq!(token_stream.token(), &expected_token);
|
||||
}
|
||||
assert!(!token_stream.advance());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -182,6 +179,7 @@ mod tests {
|
||||
position_length: 1,
|
||||
},
|
||||
];
|
||||
|
||||
for expected_token in expected_tokens {
|
||||
assert!(token_stream.advance());
|
||||
assert_eq!(token_stream.token(), &expected_token);
|
||||
|
||||
Reference in New Issue
Block a user