mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-05 16:52:55 +00:00
Backmerging master
This commit is contained in:
10
CHANGELOG.md
10
CHANGELOG.md
@@ -1,3 +1,13 @@
|
||||
Tantivy 0.5
|
||||
==========================
|
||||
- Faceting
|
||||
- RangeQuery
|
||||
- Configurable tokenization pipeline
|
||||
- Allowing super large indexes
|
||||
- 64 bits file address
|
||||
- Smarter encoding of the `TermInfo` objects
|
||||
|
||||
|
||||
|
||||
Tantivy 0.4.3
|
||||
==========================
|
||||
|
||||
@@ -20,7 +20,6 @@ impl CountCollector {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl Collector for CountCollector {
|
||||
fn set_segment(&mut self, _: SegmentLocalId, _: &SegmentReader) -> Result<()> {
|
||||
Ok(())
|
||||
@@ -34,18 +33,16 @@ impl Collector for CountCollector {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
use test::Bencher;
|
||||
use collector::Collector;
|
||||
use collector::{Collector, CountCollector};
|
||||
|
||||
#[bench]
|
||||
fn build_collector(b: &mut Bencher) {
|
||||
b.iter(|| {
|
||||
let mut count_collector = CountCollector::default();
|
||||
for doc in 0..1_000_000 {
|
||||
count_collector.collect(doc, 1f32);
|
||||
}
|
||||
count_collector.count()
|
||||
});
|
||||
#[test]
|
||||
fn test_count_collector() {
|
||||
let mut count_collector = CountCollector::default();
|
||||
assert_eq!(count_collector.count(), 0);
|
||||
count_collector.collect(0u32, 1f32);
|
||||
assert_eq!(count_collector.count(), 1);
|
||||
assert_eq!(count_collector.count(), 1);
|
||||
count_collector.collect(1u32, 1f32);
|
||||
assert_eq!(count_collector.count(), 2);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,11 +13,9 @@ use termdict::TermStreamerBuilder;
|
||||
use std::collections::BTreeSet;
|
||||
use termdict::TermMerger;
|
||||
use postings::SkipResult;
|
||||
use std::{u64, usize};
|
||||
use schema::FACET_SEP_BYTE;
|
||||
use std::{usize, u64};
|
||||
use std::iter::Peekable;
|
||||
|
||||
|
||||
use DocId;
|
||||
use Result;
|
||||
use Score;
|
||||
@@ -50,7 +48,6 @@ impl<'a> Ord for Hit<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
struct SegmentFacetCounter {
|
||||
pub facet_reader: FacetReader,
|
||||
pub facet_ords: Vec<u64>,
|
||||
@@ -61,16 +58,10 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
||||
if facet_bytes.is_empty() {
|
||||
0
|
||||
} else {
|
||||
facet_bytes
|
||||
.iter()
|
||||
.cloned()
|
||||
.filter(|b| *b == 0u8)
|
||||
.count() + 1
|
||||
facet_bytes.iter().cloned().filter(|b| *b == 0u8).count() + 1
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/// Collector for faceting
|
||||
///
|
||||
/// The collector collects all facets. You need to configure it
|
||||
@@ -93,7 +84,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
||||
/// (e.g. `/category/fiction`, `/category/biography`, `/category/personal_development`).
|
||||
///
|
||||
/// Once collection is finished, you can harvest its results in the form
|
||||
/// of a `FacetCounts` object, and extract your facet counts from it.
|
||||
/// of a `FacetCounts` object, and extract your face t counts from it.
|
||||
///
|
||||
/// This implementation assumes you are working with a number of facets that
|
||||
/// is much hundreds of time lower than your number of documents.
|
||||
@@ -204,7 +195,6 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
|
||||
pub struct FacetCollector {
|
||||
facet_ords: Vec<u64>,
|
||||
field: Field,
|
||||
@@ -218,23 +208,24 @@ pub struct FacetCollector {
|
||||
// collapse facet_id -> facet_ord
|
||||
current_collapse_facet_ords: Vec<u64>,
|
||||
|
||||
collapse: BTreeSet<Vec<u8>>,
|
||||
facets: BTreeSet<Facet>,
|
||||
}
|
||||
|
||||
fn skip<'a, I: Iterator<Item=&'a Vec<u8>>>(target: &[u8], collapse_it: &mut Peekable<I>) -> SkipResult {
|
||||
fn skip<'a, I: Iterator<Item = &'a Facet>>(
|
||||
target: &[u8],
|
||||
collapse_it: &mut Peekable<I>,
|
||||
) -> SkipResult {
|
||||
loop {
|
||||
match collapse_it.peek() {
|
||||
Some(facet_bytes) => {
|
||||
match facet_bytes[..].cmp(&target) {
|
||||
Ordering::Less => {}
|
||||
Ordering::Greater => {
|
||||
return SkipResult::OverStep;
|
||||
}
|
||||
Ordering::Equal => {
|
||||
return SkipResult::Reached;
|
||||
}
|
||||
Some(facet_bytes) => match facet_bytes.encoded_bytes().cmp(target) {
|
||||
Ordering::Less => {}
|
||||
Ordering::Greater => {
|
||||
return SkipResult::OverStep;
|
||||
}
|
||||
}
|
||||
Ordering::Equal => {
|
||||
return SkipResult::Reached;
|
||||
}
|
||||
},
|
||||
None => {
|
||||
return SkipResult::End;
|
||||
}
|
||||
@@ -244,71 +235,59 @@ fn skip<'a, I: Iterator<Item=&'a Vec<u8>>>(target: &[u8], collapse_it: &mut Peek
|
||||
}
|
||||
|
||||
impl FacetCollector {
|
||||
|
||||
/// Create a facet collector to collect the facets
|
||||
/// from a specific facet `Field`.
|
||||
///
|
||||
/// This function does not check whether the field
|
||||
/// is of the proper type.
|
||||
pub fn for_field(field: Field) -> FacetCollector {
|
||||
let mut facet_collector = FacetCollector {
|
||||
FacetCollector {
|
||||
facet_ords: Vec::with_capacity(255),
|
||||
field: field,
|
||||
ff_reader: None,
|
||||
segment_counters: Vec::new(),
|
||||
collapse: BTreeSet::new(),
|
||||
field,
|
||||
ff_reader: None,
|
||||
facets: BTreeSet::new(),
|
||||
|
||||
current_segment_collapse_mapping: Vec::new(),
|
||||
current_collapse_facet_ords: Vec::new(),
|
||||
current_segment_counts: Vec::new(),
|
||||
};
|
||||
facet_collector.add_facet(Facet::from("/"));
|
||||
facet_collector
|
||||
}
|
||||
|
||||
/// Adds a facet that we want to collect.
|
||||
///
|
||||
/// For all of the facets that are one level below the facet
|
||||
/// given in argument, the collector will collects the count of unique
|
||||
/// documents containing the facet.
|
||||
///
|
||||
/// Calling `add_facet()` on two facets that have a ancestor
|
||||
/// descendant relationships is not allowed.
|
||||
pub fn add_facet<T>(&mut self, facet_from: T)
|
||||
where Facet: From<T> {
|
||||
let facet = Facet::from(facet_from);
|
||||
let facet_bytes: &[u8] = facet.encoded_bytes();
|
||||
self.collapse.remove(&facet_bytes[..0]);
|
||||
for pos in facet_bytes.iter()
|
||||
.cloned()
|
||||
.position(|b| b == FACET_SEP_BYTE) {
|
||||
self.collapse.remove(&facet_bytes[..pos]);
|
||||
}
|
||||
self.collapse.insert(facet_bytes.to_owned());
|
||||
}
|
||||
|
||||
fn finalize_segment(&mut self) {
|
||||
if self.ff_reader.is_some() {
|
||||
self.segment_counters.push(
|
||||
SegmentFacetCounter {
|
||||
facet_reader: self.ff_reader.take().unwrap().into_inner(),
|
||||
facet_ords: mem::replace(&mut self.current_collapse_facet_ords, Vec::new()),
|
||||
facet_counts: mem::replace(&mut self.current_segment_counts, Vec::new()),
|
||||
}
|
||||
/// Adds a facet that we want to record counts
|
||||
///
|
||||
/// Adding facet `Facet::from("/country")` for instance,
|
||||
/// will record the counts of all of the direct children of the facet country
|
||||
/// (e.g. `/country/FR`, `/country/UK`).
|
||||
///
|
||||
/// Adding two facets within which one is the prefix of the other is forbidden.
|
||||
/// If you need the correct number of unique documents for two such facets,
|
||||
/// just add them in separate `FacetCollector`.
|
||||
pub fn add_facet<T>(&mut self, facet_from: T)
|
||||
where
|
||||
Facet: From<T>,
|
||||
{
|
||||
let facet = Facet::from(facet_from);
|
||||
for old_facet in &self.facets {
|
||||
assert!(
|
||||
!old_facet.is_prefix_of(&facet),
|
||||
"Tried to add a facet which is a descendant of an already added facet."
|
||||
);
|
||||
assert!(
|
||||
!facet.is_prefix_of(old_facet),
|
||||
"Tried to add a facet which is an ancestor of an already added facet."
|
||||
);
|
||||
}
|
||||
self.facets.insert(facet);
|
||||
}
|
||||
|
||||
fn set_collapse_mapping(&mut self, facet_reader: &FacetReader) {
|
||||
self.current_segment_collapse_mapping.clear();
|
||||
self.current_collapse_facet_ords.clear();
|
||||
self.current_segment_counts.clear();
|
||||
let mut collapse_facet_it = self.collapse.iter().peekable();
|
||||
let mut collapse_facet_it = self.facets.iter().peekable();
|
||||
self.current_collapse_facet_ords.push(0);
|
||||
let mut facet_streamer = facet_reader
|
||||
.facet_dict()
|
||||
.range()
|
||||
.into_stream();
|
||||
let mut facet_streamer = facet_reader.facet_dict().range().into_stream();
|
||||
if !facet_streamer.advance() {
|
||||
return;
|
||||
}
|
||||
@@ -326,9 +305,11 @@ impl FacetCollector {
|
||||
let depth = facet_depth(facet_streamer.key());
|
||||
if depth <= collapse_depth {
|
||||
continue 'outer;
|
||||
} else if depth == collapse_depth + 1 {
|
||||
}
|
||||
if depth == collapse_depth + 1 {
|
||||
collapsed_id = self.current_collapse_facet_ords.len();
|
||||
self.current_collapse_facet_ords.push(facet_streamer.term_ord());
|
||||
self.current_collapse_facet_ords
|
||||
.push(facet_streamer.term_ord());
|
||||
self.current_segment_collapse_mapping.push(collapsed_id);
|
||||
} else {
|
||||
self.current_segment_collapse_mapping.push(collapsed_id);
|
||||
@@ -346,6 +327,16 @@ impl FacetCollector {
|
||||
}
|
||||
}
|
||||
|
||||
fn finalize_segment(&mut self) {
|
||||
if self.ff_reader.is_some() {
|
||||
self.segment_counters.push(SegmentFacetCounter {
|
||||
facet_reader: self.ff_reader.take().unwrap().into_inner(),
|
||||
facet_ords: mem::replace(&mut self.current_collapse_facet_ords, Vec::new()),
|
||||
facet_counts: mem::replace(&mut self.current_segment_counts, Vec::new()),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the results of the collection.
|
||||
///
|
||||
/// This method does not just return the counters,
|
||||
@@ -362,14 +353,9 @@ impl FacetCollector {
|
||||
.map(|segment_counter| &segment_counter.facet_counts[..])
|
||||
.collect();
|
||||
|
||||
|
||||
let facet_streams = self.segment_counters
|
||||
.iter()
|
||||
.map(|seg_counts| seg_counts
|
||||
.facet_reader
|
||||
.facet_dict()
|
||||
.range()
|
||||
.into_stream())
|
||||
.map(|seg_counts| seg_counts.facet_reader.facet_dict().range().into_stream())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut facet_merger = TermMerger::new(facet_streams);
|
||||
@@ -399,51 +385,43 @@ impl FacetCollector {
|
||||
facet_counts.insert(Facet::from_encoded(bytes), count);
|
||||
}
|
||||
}
|
||||
FacetCounts {
|
||||
facet_counts: facet_counts
|
||||
}
|
||||
FacetCounts { facet_counts }
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
impl Collector for FacetCollector {
|
||||
fn set_segment(&mut self, _: SegmentLocalId, reader: &SegmentReader) -> Result<()> {
|
||||
self.finalize_segment();
|
||||
let facet_reader = reader.facet_reader(self.field)?;
|
||||
self.set_collapse_mapping(&facet_reader);
|
||||
self.current_segment_counts.resize(self.current_collapse_facet_ords.len(), 0);
|
||||
self.current_segment_counts
|
||||
.resize(self.current_collapse_facet_ords.len(), 0);
|
||||
self.ff_reader = Some(UnsafeCell::new(facet_reader));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn collect(&mut self, doc: DocId, _: Score) {
|
||||
let facet_reader: &mut FacetReader =
|
||||
unsafe {
|
||||
&mut *self.ff_reader
|
||||
.as_ref()
|
||||
.expect("collect() was called before set_segment. This should never happen.")
|
||||
.get()
|
||||
};
|
||||
let facet_reader: &mut FacetReader = unsafe {
|
||||
&mut *self.ff_reader
|
||||
.as_ref()
|
||||
.expect("collect() was called before set_segment. This should never happen.")
|
||||
.get()
|
||||
};
|
||||
facet_reader.facet_ords(doc, &mut self.facet_ords);
|
||||
let mut previous_collapsed_ord: usize = usize::MAX;
|
||||
for &facet_ord in &self.facet_ords {
|
||||
let collapsed_ord = self.current_segment_collapse_mapping[facet_ord as usize];
|
||||
self.current_segment_counts[collapsed_ord] +=
|
||||
if collapsed_ord == previous_collapsed_ord {
|
||||
0
|
||||
} else {
|
||||
1
|
||||
};
|
||||
self.current_segment_counts[collapsed_ord] += if collapsed_ord == previous_collapsed_ord
|
||||
{
|
||||
0
|
||||
} else {
|
||||
1
|
||||
};
|
||||
previous_collapsed_ord = collapsed_ord;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/// Intermediary result of the `FacetCollector` that stores
|
||||
/// the facet counts for all the segments.
|
||||
pub struct FacetCounts {
|
||||
@@ -451,20 +429,21 @@ pub struct FacetCounts {
|
||||
}
|
||||
|
||||
impl FacetCounts {
|
||||
|
||||
pub fn get<'a, T>(&'a self, facet_from: T) -> impl Iterator<Item=(&'a Facet, u64)>
|
||||
where Facet: From<T> {
|
||||
#[allow(needless_lifetimes)] //< compiler fails if we remove the lifetime
|
||||
pub fn get<'a, T>(&'a self, facet_from: T) -> impl Iterator<Item = (&'a Facet, u64)>
|
||||
where
|
||||
Facet: From<T>,
|
||||
{
|
||||
let facet = Facet::from(facet_from);
|
||||
let left_bound = Bound::Excluded(facet.clone());
|
||||
let right_bound =
|
||||
if facet.is_root() {
|
||||
Bound::Unbounded
|
||||
} else {
|
||||
let mut facet_after_bytes = facet.encoded_bytes().to_owned();
|
||||
facet_after_bytes.push(1u8);
|
||||
let facet_after = Facet::from_encoded(facet_after_bytes);
|
||||
Bound::Excluded(facet_after)
|
||||
};
|
||||
let right_bound = if facet.is_root() {
|
||||
Bound::Unbounded
|
||||
} else {
|
||||
let mut facet_after_bytes = facet.encoded_bytes().to_owned();
|
||||
facet_after_bytes.push(1u8);
|
||||
let facet_after = Facet::from_encoded(facet_after_bytes);
|
||||
Bound::Excluded(facet_after)
|
||||
};
|
||||
|
||||
self.facet_counts
|
||||
.range((left_bound, right_bound))
|
||||
@@ -472,29 +451,22 @@ impl FacetCounts {
|
||||
}
|
||||
|
||||
pub fn top_k<T>(&self, facet: T, k: usize) -> Vec<(&Facet, u64)>
|
||||
where Facet: From<T> {
|
||||
|
||||
where
|
||||
Facet: From<T>,
|
||||
{
|
||||
let mut heap = BinaryHeap::with_capacity(k);
|
||||
let mut it = self.get(facet);
|
||||
|
||||
for (ref facet, count) in (&mut it).take(k) {
|
||||
heap.push(Hit {
|
||||
count: count,
|
||||
facet: facet
|
||||
});
|
||||
for (facet, count) in (&mut it).take(k) {
|
||||
heap.push(Hit { count, facet });
|
||||
}
|
||||
|
||||
let mut lowest_count: u64 = heap.peek()
|
||||
.map(|hit| hit.count)
|
||||
.unwrap_or(u64::MIN);
|
||||
let mut lowest_count: u64 = heap.peek().map(|hit| hit.count).unwrap_or(u64::MIN);
|
||||
for (facet, count) in it {
|
||||
if count > lowest_count {
|
||||
lowest_count = count;
|
||||
if let Some(mut head) = heap.peek_mut() {
|
||||
*head = Hit {
|
||||
count: count,
|
||||
facet: facet
|
||||
};
|
||||
*head = Hit { count, facet };
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -503,19 +475,17 @@ impl FacetCounts {
|
||||
.map(|hit| (hit.facet, hit.count))
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use test::Bencher;
|
||||
use core::Index;
|
||||
use schema::{SchemaBuilder, Document, Facet};
|
||||
use schema::{Document, Facet, SchemaBuilder};
|
||||
use query::AllQuery;
|
||||
use super::{FacetCollector, FacetCounts};
|
||||
use std::iter;
|
||||
use schema::Field;
|
||||
use rand::{thread_rng, Rng};
|
||||
|
||||
#[test]
|
||||
@@ -556,19 +526,36 @@ mod tests {
|
||||
.get("/top1")
|
||||
.map(|(facet, count)| (facet.to_string(), count))
|
||||
.collect();
|
||||
assert_eq!(facets, [
|
||||
("/top1/mid0", 50),
|
||||
("/top1/mid1", 50),
|
||||
("/top1/mid2", 50),
|
||||
("/top1/mid3", 50),
|
||||
].iter()
|
||||
.map(|&(facet_str, count)| {
|
||||
(String::from(facet_str), count)
|
||||
})
|
||||
.collect::<Vec<_>>());
|
||||
assert_eq!(
|
||||
facets,
|
||||
[
|
||||
("/top1/mid0", 50),
|
||||
("/top1/mid1", 50),
|
||||
("/top1/mid2", 50),
|
||||
("/top1/mid3", 50),
|
||||
].iter()
|
||||
.map(|&(facet_str, count)| (String::from(facet_str), count))
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "Tried to add a facet which is a descendant of \
|
||||
an already added facet.")]
|
||||
fn test_misused_facet_collector() {
|
||||
let mut facet_collector = FacetCollector::for_field(Field(0));
|
||||
facet_collector.add_facet(Facet::from("/country"));
|
||||
facet_collector.add_facet(Facet::from("/country/europe"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_non_used_facet_collector() {
|
||||
let mut facet_collector = FacetCollector::for_field(Field(0));
|
||||
facet_collector.add_facet(Facet::from("/country"));
|
||||
facet_collector.add_facet(Facet::from("/countryeurope"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_facet_collector_topk() {
|
||||
let mut schema_builder = SchemaBuilder::new();
|
||||
@@ -576,18 +563,14 @@ mod tests {
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut docs: Vec<Document> = vec![
|
||||
("a", 10),
|
||||
("b", 100),
|
||||
("c", 7),
|
||||
("d", 12),
|
||||
("e", 21)
|
||||
].into_iter()
|
||||
.flat_map(|(c, count)| {
|
||||
let facet = Facet::from(&format!("/facet_{}", c));
|
||||
let doc = doc!(facet_field => facet);
|
||||
iter::repeat(doc).take(count)
|
||||
}).collect();
|
||||
let mut docs: Vec<Document> = vec![("a", 10), ("b", 100), ("c", 7), ("d", 12), ("e", 21)]
|
||||
.into_iter()
|
||||
.flat_map(|(c, count)| {
|
||||
let facet = Facet::from(&format!("/facet_{}", c));
|
||||
let doc = doc!(facet_field => facet);
|
||||
iter::repeat(doc).take(count)
|
||||
})
|
||||
.collect();
|
||||
thread_rng().shuffle(&mut docs[..]);
|
||||
|
||||
let mut index_writer = index.writer(3_000_000).unwrap();
|
||||
@@ -611,8 +594,9 @@ mod tests {
|
||||
vec![
|
||||
(&Facet::from("/facet_b"), 100),
|
||||
(&Facet::from("/facet_e"), 21),
|
||||
(&Facet::from("/facet_d"), 12)
|
||||
]);
|
||||
(&Facet::from("/facet_d"), 12),
|
||||
]
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -623,12 +607,12 @@ mod tests {
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut docs = vec!();
|
||||
let mut docs = vec![];
|
||||
for val in 0..50 {
|
||||
let facet = Facet::from(&format!("/facet_{}", val));
|
||||
for _ in 0..val*val {
|
||||
docs.push(doc!(facet_field=>facet.clone()));
|
||||
}
|
||||
for _ in 0..val * val {
|
||||
docs.push(doc!(facet_field=>facet.clone()));
|
||||
}
|
||||
}
|
||||
// 40425 docs
|
||||
thread_rng().shuffle(&mut docs[..]);
|
||||
@@ -647,4 +631,3 @@ mod tests {
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -4,64 +4,35 @@ use common::serialize::BinarySerializable;
|
||||
use std::mem;
|
||||
use std::ops::Deref;
|
||||
|
||||
/// Computes the number of bits that will be used for bitpacking.
|
||||
///
|
||||
/// In general the target is the minimum number of bits
|
||||
/// required to express the amplitude given in argument.
|
||||
///
|
||||
/// e.g. If the amplitude is 10, we can store all ints on simply 4bits.
|
||||
///
|
||||
/// The logic is slightly more convoluted here as for optimization
|
||||
/// reasons, we want to ensure that a value spawns over at most 8 bytes
|
||||
/// of aligns bytes.
|
||||
///
|
||||
/// Spanning over 9 bytes is possible for instance, if we do
|
||||
/// bitpacking with an amplitude of 63 bits.
|
||||
/// In this case, the second int will start on bit
|
||||
/// 63 (which belongs to byte 7) and ends at byte 15;
|
||||
/// Hence 9 bytes (from byte 7 to byte 15 included).
|
||||
///
|
||||
/// To avoid this, we force the number of bits to 64bits
|
||||
/// when the result is greater than `64-8 = 56 bits`.
|
||||
///
|
||||
/// Note that this only affects rare use cases spawning over
|
||||
/// a very large range of values. Even in this case, it results
|
||||
/// in an extra cost of at most 12% compared to the optimal
|
||||
/// number of bits.
|
||||
pub fn compute_num_bits(amplitude: u64) -> u8 {
|
||||
let amplitude = (64u32 - amplitude.leading_zeros()) as u8;
|
||||
if amplitude <= 64 - 8 {
|
||||
amplitude
|
||||
} else {
|
||||
64
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BitPacker {
|
||||
pub(crate) struct BitPacker {
|
||||
mini_buffer: u64,
|
||||
mini_buffer_written: usize,
|
||||
num_bits: usize,
|
||||
}
|
||||
|
||||
impl BitPacker {
|
||||
pub fn new(num_bits: usize) -> BitPacker {
|
||||
pub fn new() -> BitPacker {
|
||||
BitPacker {
|
||||
mini_buffer: 0u64,
|
||||
mini_buffer_written: 0,
|
||||
num_bits,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write<TWrite: Write>(&mut self, val: u64, output: &mut TWrite) -> io::Result<()> {
|
||||
pub fn write<TWrite: Write>(
|
||||
&mut self,
|
||||
val: u64,
|
||||
num_bits: u8,
|
||||
output: &mut TWrite,
|
||||
) -> io::Result<()> {
|
||||
let val_u64 = val as u64;
|
||||
if self.mini_buffer_written + self.num_bits > 64 {
|
||||
let num_bits = num_bits as usize;
|
||||
if self.mini_buffer_written + num_bits > 64 {
|
||||
self.mini_buffer |= val_u64.wrapping_shl(self.mini_buffer_written as u32);
|
||||
self.mini_buffer.serialize(output)?;
|
||||
self.mini_buffer = val_u64.wrapping_shr((64 - self.mini_buffer_written) as u32);
|
||||
self.mini_buffer_written = self.mini_buffer_written + (self.num_bits as usize) - 64;
|
||||
self.mini_buffer_written = self.mini_buffer_written + num_bits - 64;
|
||||
} else {
|
||||
self.mini_buffer |= val_u64 << self.mini_buffer_written;
|
||||
self.mini_buffer_written += self.num_bits;
|
||||
self.mini_buffer_written += num_bits;
|
||||
if self.mini_buffer_written == 64 {
|
||||
self.mini_buffer.serialize(output)?;
|
||||
self.mini_buffer_written = 0;
|
||||
@@ -71,7 +42,7 @@ impl BitPacker {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn flush<TWrite: Write>(&mut self, output: &mut TWrite) -> io::Result<()> {
|
||||
pub fn flush<TWrite: Write>(&mut self, output: &mut TWrite) -> io::Result<()> {
|
||||
if self.mini_buffer_written > 0 {
|
||||
let num_bytes = (self.mini_buffer_written + 7) / 8;
|
||||
let arr: [u8; 8] = unsafe { mem::transmute::<u64, [u8; 8]>(self.mini_buffer) };
|
||||
@@ -103,14 +74,14 @@ impl<Data> BitUnpacker<Data>
|
||||
where
|
||||
Data: Deref<Target = [u8]>,
|
||||
{
|
||||
pub fn new(data: Data, num_bits: usize) -> BitUnpacker<Data> {
|
||||
pub fn new(data: Data, num_bits: u8) -> BitUnpacker<Data> {
|
||||
let mask: u64 = if num_bits == 64 {
|
||||
!0u64
|
||||
} else {
|
||||
(1u64 << num_bits) - 1u64
|
||||
};
|
||||
BitUnpacker {
|
||||
num_bits,
|
||||
num_bits: num_bits as usize,
|
||||
mask,
|
||||
data,
|
||||
}
|
||||
@@ -147,7 +118,7 @@ where
|
||||
}
|
||||
unsafe { *(buffer[..].as_ptr() as *const u64) }
|
||||
};
|
||||
let val_shifted = (val_unshifted_unmasked >> bit_shift) as u64;
|
||||
let val_shifted = val_unshifted_unmasked >> (bit_shift as u64);
|
||||
(val_shifted & mask)
|
||||
}
|
||||
}
|
||||
@@ -176,37 +147,25 @@ where
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
use super::{BitPacker, BitUnpacker};
|
||||
|
||||
#[test]
|
||||
fn test_compute_num_bits() {
|
||||
assert_eq!(compute_num_bits(1), 1u8);
|
||||
assert_eq!(compute_num_bits(0), 0u8);
|
||||
assert_eq!(compute_num_bits(2), 2u8);
|
||||
assert_eq!(compute_num_bits(3), 2u8);
|
||||
assert_eq!(compute_num_bits(4), 3u8);
|
||||
assert_eq!(compute_num_bits(255), 8u8);
|
||||
assert_eq!(compute_num_bits(256), 9u8);
|
||||
assert_eq!(compute_num_bits(5_000_000_000), 33u8);
|
||||
}
|
||||
|
||||
fn create_fastfield_bitpacker(len: usize, num_bits: usize) -> (BitUnpacker<Vec<u8>>, Vec<u64>) {
|
||||
fn create_fastfield_bitpacker(len: usize, num_bits: u8) -> (BitUnpacker<Vec<u8>>, Vec<u64>) {
|
||||
let mut data = Vec::new();
|
||||
let mut bitpacker = BitPacker::new(num_bits);
|
||||
let max_val: u64 = (1 << num_bits) - 1;
|
||||
let mut bitpacker = BitPacker::new();
|
||||
let max_val: u64 = (1u64 << num_bits as u64) - 1u64;
|
||||
let vals: Vec<u64> = (0u64..len as u64)
|
||||
.map(|i| if max_val == 0 { 0 } else { i % max_val })
|
||||
.collect();
|
||||
for &val in &vals {
|
||||
bitpacker.write(val, &mut data).unwrap();
|
||||
bitpacker.write(val, num_bits, &mut data).unwrap();
|
||||
}
|
||||
bitpacker.close(&mut data).unwrap();
|
||||
assert_eq!(data.len(), (num_bits * len + 7) / 8 + 7);
|
||||
assert_eq!(data.len(), ((num_bits as usize) * len + 7) / 8 + 7);
|
||||
let bitunpacker = BitUnpacker::new(data, num_bits);
|
||||
(bitunpacker, vals)
|
||||
}
|
||||
|
||||
fn test_bitpacker_util(len: usize, num_bits: usize) {
|
||||
fn test_bitpacker_util(len: usize, num_bits: u8) {
|
||||
let (bitunpacker, vals) = create_fastfield_bitpacker(len, num_bits);
|
||||
for (i, val) in vals.iter().enumerate() {
|
||||
assert_eq!(bitunpacker.get(i), *val);
|
||||
|
||||
@@ -1,190 +1,260 @@
|
||||
use DocId;
|
||||
use std::fmt;
|
||||
|
||||
pub trait TinySet {
|
||||
fn insert(&mut self, b: u32);
|
||||
fn is_empty(&self) -> bool;
|
||||
fn pop_lowest(&mut self) -> Option<u32>;
|
||||
fn remove(&mut self, b: u32);
|
||||
fn lowest(&mut self) -> Option<u32>;
|
||||
#[derive(Clone, Copy, Eq, PartialEq)]
|
||||
pub(crate) struct TinySet(u64);
|
||||
|
||||
/// Update self to represent the
|
||||
/// intersection of its elements and the other
|
||||
/// set given in arguments.
|
||||
fn intersect(&mut self, other: Self);
|
||||
|
||||
/// Returns a `TinySet` than contains all values up
|
||||
/// to limit excluded.
|
||||
///
|
||||
/// The limit is assumed to be strictly lower than 64.
|
||||
fn range_lower(limit: u32) -> u64;
|
||||
|
||||
/// Returns a `TinySet` that contains all values greater
|
||||
/// or equal to the given limit, included. (and up to 63)
|
||||
///
|
||||
/// The limit is assumed to be strictly lower than 64.
|
||||
fn range_greater_or_equal(from_included: u32) -> u64 {
|
||||
assert!(from_included < 64);
|
||||
0 ^ Self::range_lower(from_included)
|
||||
impl fmt::Debug for TinySet {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
self.into_iter().collect::<Vec<u32>>().fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl TinySet for u64 {
|
||||
fn range_lower(from_included: u32) -> u64 {
|
||||
assert!(from_included < 64);
|
||||
(1u64 << (from_included as u64)) - 1u64
|
||||
pub struct TinySetIterator(TinySet);
|
||||
impl Iterator for TinySetIterator {
|
||||
type Item = u32;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.0.pop_lowest()
|
||||
}
|
||||
}
|
||||
|
||||
impl IntoIterator for TinySet {
|
||||
type Item = u32;
|
||||
type IntoIter = TinySetIterator;
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
TinySetIterator(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl TinySet {
|
||||
/// Returns an empty `TinySet`.
|
||||
pub fn empty() -> TinySet {
|
||||
TinySet(0u64)
|
||||
}
|
||||
|
||||
fn intersect(&mut self, filter_mask: u64) {
|
||||
*self &= filter_mask;
|
||||
/// Returns the complement of the set in `[0, 64[`.
|
||||
fn complement(&self) -> TinySet {
|
||||
TinySet(!self.0)
|
||||
}
|
||||
|
||||
/// Returns true iff the `TinySet` contains the element `el`.
|
||||
pub fn contains(&self, el: u32) -> bool {
|
||||
!self.intersect(TinySet::singleton(el)).is_empty()
|
||||
}
|
||||
|
||||
/// Returns the intersection of `self` and `other`
|
||||
pub fn intersect(&self, other: TinySet) -> TinySet {
|
||||
TinySet(self.0 & other.0)
|
||||
}
|
||||
|
||||
/// Creates a new `TinySet` containing only one element
|
||||
/// within `[0; 64[`
|
||||
#[inline(always)]
|
||||
fn insert(&mut self, b: u32) {
|
||||
*self |= 1u64 << (b as u64);
|
||||
pub fn singleton(el: u32) -> TinySet {
|
||||
TinySet(1u64 << (el as u64))
|
||||
}
|
||||
|
||||
/// Insert a new element within [0..64[
|
||||
#[inline(always)]
|
||||
fn is_empty(&self) -> bool {
|
||||
*self == 0u64
|
||||
pub fn insert(self, el: u32) -> TinySet {
|
||||
self.union(TinySet::singleton(el))
|
||||
}
|
||||
|
||||
/// Insert a new element within [0..64[
|
||||
#[inline(always)]
|
||||
fn pop_lowest(&mut self) -> Option<u32> {
|
||||
pub fn insert_mut(&mut self, el: u32) -> bool {
|
||||
let old = *self;
|
||||
*self = old.insert(el);
|
||||
old != *self
|
||||
}
|
||||
|
||||
/// Returns the union of two tinysets
|
||||
#[inline(always)]
|
||||
pub fn union(self, other: TinySet) -> TinySet {
|
||||
TinySet(self.0 | other.0)
|
||||
}
|
||||
|
||||
/// Returns true iff the `TinySet` is empty.
|
||||
#[inline(always)]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0 == 0u64
|
||||
}
|
||||
|
||||
/// Returns the lowest element in the `TinySet`
|
||||
/// and removes it.
|
||||
#[inline(always)]
|
||||
pub fn pop_lowest(&mut self) -> Option<u32> {
|
||||
if let Some(lowest) = self.lowest() {
|
||||
self.remove(lowest);
|
||||
self.0 ^= TinySet::singleton(lowest).0;
|
||||
Some(lowest)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the lowest element in the `TinySet`
|
||||
/// (or None if the set is empty).
|
||||
#[inline(always)]
|
||||
fn remove(&mut self, b: u32) {
|
||||
*self ^= 1 << (b as u64);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn lowest(&mut self) -> Option<u32> {
|
||||
pub fn lowest(&mut self) -> Option<u32> {
|
||||
if self.is_empty() {
|
||||
None
|
||||
} else {
|
||||
let least_significant_bit = self.trailing_zeros() as u32;
|
||||
let least_significant_bit = self.0.trailing_zeros() as u32;
|
||||
Some(least_significant_bit)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a `TinySet` than contains all values up
|
||||
/// to limit excluded.
|
||||
///
|
||||
/// The limit is assumed to be strictly lower than 64.
|
||||
pub fn range_lower(upper_bound: u32) -> TinySet {
|
||||
TinySet((1u64 << ((upper_bound % 64u32) as u64)) - 1u64)
|
||||
}
|
||||
|
||||
/// Returns a `TinySet` that contains all values greater
|
||||
/// or equal to the given limit, included. (and up to 63)
|
||||
///
|
||||
/// The limit is assumed to be strictly lower than 64.
|
||||
pub fn range_greater_or_equal(from_included: u32) -> TinySet {
|
||||
TinySet::range_lower(from_included).complement()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DocBitSet {
|
||||
tinybitsets: Box<[u64]>,
|
||||
size_hint: usize, //< Technically it should be u32, but we
|
||||
// count multiple inserts.
|
||||
// `usize` guards us from overflow.
|
||||
max_doc: DocId
|
||||
#[derive(Clone)]
|
||||
pub struct BitSet {
|
||||
tinysets: Box<[TinySet]>,
|
||||
len: usize, //< Technically it should be u32, but we
|
||||
// count multiple inserts.
|
||||
// `usize` guards us from overflow.
|
||||
max_value: u32,
|
||||
}
|
||||
|
||||
impl DocBitSet {
|
||||
pub fn with_maxdoc(max_doc: DocId) -> DocBitSet {
|
||||
let num_buckets = (max_doc + 63) / 64;
|
||||
DocBitSet {
|
||||
tinybitsets: vec![0u64; num_buckets as usize].into_boxed_slice(),
|
||||
size_hint: 0,
|
||||
max_doc
|
||||
fn num_buckets(max_val: u32) -> u32 {
|
||||
(max_val + 63u32) / 64u32
|
||||
}
|
||||
|
||||
impl BitSet {
|
||||
/// Create a new `BitSet` that may contain elements
|
||||
/// within `[0, max_val[`.
|
||||
pub fn with_max_value(max_value: u32) -> BitSet {
|
||||
let num_buckets = num_buckets(max_value);
|
||||
let tinybisets = vec![TinySet::empty(); num_buckets as usize].into_boxed_slice();
|
||||
BitSet {
|
||||
tinysets: tinybisets,
|
||||
len: 0,
|
||||
max_value,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn size_hint(&self) -> u32 {
|
||||
if self.max_doc as usize > self.size_hint {
|
||||
self.size_hint as u32
|
||||
} else {
|
||||
self.max_doc
|
||||
/// Removes all elements from the `BitSet`.
|
||||
pub fn clear(&mut self) {
|
||||
for tinyset in self.tinysets.iter_mut() {
|
||||
*tinyset = TinySet::empty();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, doc: DocId) {
|
||||
/// Returns the number of elements in the `BitSet`.
|
||||
pub fn len(&self) -> usize {
|
||||
self.len
|
||||
}
|
||||
|
||||
/// Inserts an element in the `BitSet`
|
||||
pub fn insert(&mut self, el: u32) {
|
||||
// we do not check saturated els.
|
||||
self.size_hint += 1;
|
||||
let bucket = (doc / 64u32) as usize;
|
||||
self.tinybitsets[bucket].insert(doc % 64u32);
|
||||
let higher = el / 64u32;
|
||||
let lower = el % 64u32;
|
||||
self.len += if self.tinysets[higher as usize].insert_mut(lower) {
|
||||
1
|
||||
} else {
|
||||
0
|
||||
};
|
||||
}
|
||||
|
||||
pub fn contains(&self, doc: DocId) -> bool {
|
||||
let tiny_bitset = self.tiny_bitset((doc / 64u32) as usize);
|
||||
let lower = doc % 64;
|
||||
let mask = 1u64 << (lower as u64);
|
||||
(tiny_bitset & mask) != 0u64
|
||||
/// Returns true iff the elements is in the `BitSet`.
|
||||
pub fn contains(&self, el: u32) -> bool {
|
||||
self.tinyset(el / 64u32).contains(el % 64)
|
||||
}
|
||||
|
||||
pub fn max_doc(&self) -> DocId {
|
||||
self.max_doc
|
||||
/// Returns the first non-empty `TinySet` associated to a bucket lower
|
||||
/// or greater than bucket.
|
||||
///
|
||||
/// Reminder: the tiny set with the bucket `bucket`, represents the
|
||||
/// elements from `bucket * 64` to `(bucket+1) * 64`.
|
||||
pub(crate) fn first_non_empty_bucket(&self, bucket: u32) -> Option<u32> {
|
||||
self.tinysets[bucket as usize..]
|
||||
.iter()
|
||||
.cloned()
|
||||
.position(|tinyset| !tinyset.is_empty())
|
||||
.map(|delta_bucket| bucket + delta_bucket as u32)
|
||||
}
|
||||
|
||||
pub fn num_tiny_bitsets(&self) -> usize {
|
||||
self.tinybitsets.len()
|
||||
pub fn max_value(&self) -> u32 {
|
||||
self.max_value
|
||||
}
|
||||
|
||||
pub fn tiny_bitset(&self, bucket: usize) -> u64 {
|
||||
self.tinybitsets[bucket]
|
||||
/// Returns the tiny bitset representing the
|
||||
/// the set restricted to the number range from
|
||||
/// `bucket * 64` to `(bucket + 1) * 64`.
|
||||
pub(crate) fn tinyset(&self, bucket: u32) -> TinySet {
|
||||
self.tinysets[bucket as usize]
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
extern crate test;
|
||||
use tests;
|
||||
use std::collections::HashSet;
|
||||
use DocId;
|
||||
use super::BitSet;
|
||||
use super::TinySet;
|
||||
use super::DocBitSet;
|
||||
use tests::generate_nonunique_unsorted;
|
||||
use std::collections::BTreeSet;
|
||||
use query::BitSetDocSet;
|
||||
use DocSet;
|
||||
|
||||
#[test]
|
||||
fn test_tiny_set() {
|
||||
assert!(0u64.is_empty());
|
||||
assert!(TinySet::empty().is_empty());
|
||||
{
|
||||
let mut u = 0u64;
|
||||
u.insert(1u32);
|
||||
let mut u = TinySet::empty().insert(1u32);
|
||||
assert_eq!(u.pop_lowest(), Some(1u32));
|
||||
assert!(u.pop_lowest().is_none())
|
||||
}
|
||||
{
|
||||
let mut u = 0u64;
|
||||
u.insert(1u32);
|
||||
u.insert(1u32);
|
||||
let mut u = TinySet::empty().insert(1u32).insert(1u32);
|
||||
assert_eq!(u.pop_lowest(), Some(1u32));
|
||||
assert!(u.pop_lowest().is_none())
|
||||
}
|
||||
{
|
||||
let mut u = 0u64;
|
||||
u.insert(2u32);
|
||||
let mut u = TinySet::empty().insert(2u32);
|
||||
assert_eq!(u.pop_lowest(), Some(2u32));
|
||||
u.insert(1u32);
|
||||
u.insert_mut(1u32);
|
||||
assert_eq!(u.pop_lowest(), Some(1u32));
|
||||
assert!(u.pop_lowest().is_none());
|
||||
}
|
||||
{
|
||||
let mut u = 0u64;
|
||||
u.insert(63u32);
|
||||
let mut u = TinySet::empty().insert(63u32);
|
||||
assert_eq!(u.pop_lowest(), Some(63u32));
|
||||
assert!(u.pop_lowest().is_none());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_docbitset() {
|
||||
// docs are assumed to be lower than 100.
|
||||
let test_against_hashset = |docs: &[DocId], max_doc: u32| {
|
||||
let mut hashset: HashSet<DocId> = HashSet::new();
|
||||
let mut docbitset = DocBitSet::with_maxdoc(max_doc);
|
||||
for &doc in docs {
|
||||
assert!(doc < max_doc);
|
||||
hashset.insert(doc);
|
||||
docbitset.insert(doc);
|
||||
fn test_bitset() {
|
||||
let test_against_hashset = |els: &[u32], max_value: u32| {
|
||||
let mut hashset: HashSet<u32> = HashSet::new();
|
||||
let mut bitset = BitSet::with_max_value(max_value);
|
||||
for &el in els {
|
||||
assert!(el < max_value);
|
||||
hashset.insert(el);
|
||||
bitset.insert(el);
|
||||
}
|
||||
for doc in 0..max_doc {
|
||||
assert_eq!(
|
||||
hashset.contains(&doc),
|
||||
docbitset.contains(doc)
|
||||
);
|
||||
for el in 0..max_value {
|
||||
assert_eq!(hashset.contains(&el), bitset.contains(el));
|
||||
}
|
||||
assert_eq!(docbitset.max_doc(), max_doc);
|
||||
assert_eq!(bitset.max_value(), max_value);
|
||||
};
|
||||
|
||||
test_against_hashset(&[], 0);
|
||||
@@ -194,17 +264,119 @@ mod tests {
|
||||
test_against_hashset(&[1u32, 2u32], 4);
|
||||
test_against_hashset(&[99u32], 100);
|
||||
test_against_hashset(&[63u32], 64);
|
||||
test_against_hashset(&[62u32,63u32], 64);
|
||||
test_against_hashset(&[62u32, 63u32], 64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_docbitset_num_buckets() {
|
||||
assert_eq!(DocBitSet::with_maxdoc(0u32).num_tiny_bitsets(), 0);
|
||||
assert_eq!(DocBitSet::with_maxdoc(1u32).num_tiny_bitsets(), 1);
|
||||
assert_eq!(DocBitSet::with_maxdoc(64u32).num_tiny_bitsets(), 1);
|
||||
assert_eq!(DocBitSet::with_maxdoc(65u32).num_tiny_bitsets(), 2);
|
||||
assert_eq!(DocBitSet::with_maxdoc(128u32).num_tiny_bitsets(), 2);
|
||||
assert_eq!(DocBitSet::with_maxdoc(129u32).num_tiny_bitsets(), 3);
|
||||
fn test_bitset_large() {
|
||||
let arr = generate_nonunique_unsorted(1_000_000, 50_000);
|
||||
let mut btreeset: BTreeSet<u32> = BTreeSet::new();
|
||||
let mut bitset = BitSet::with_max_value(1_000_000);
|
||||
for el in arr {
|
||||
btreeset.insert(el);
|
||||
bitset.insert(el);
|
||||
}
|
||||
for i in 0..1_000_000 {
|
||||
assert_eq!(btreeset.contains(&i), bitset.contains(i));
|
||||
}
|
||||
assert_eq!(btreeset.len(), bitset.len());
|
||||
let mut bitset_docset = BitSetDocSet::from(bitset);
|
||||
for el in btreeset.into_iter() {
|
||||
bitset_docset.advance();
|
||||
assert_eq!(bitset_docset.doc(), el);
|
||||
}
|
||||
assert!(!bitset_docset.advance());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bitset_num_buckets() {
|
||||
use super::num_buckets;
|
||||
assert_eq!(num_buckets(0u32), 0);
|
||||
assert_eq!(num_buckets(1u32), 1);
|
||||
assert_eq!(num_buckets(64u32), 1);
|
||||
assert_eq!(num_buckets(65u32), 2);
|
||||
assert_eq!(num_buckets(128u32), 2);
|
||||
assert_eq!(num_buckets(129u32), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tinyset_range() {
|
||||
assert_eq!(
|
||||
TinySet::range_lower(3).into_iter().collect::<Vec<u32>>(),
|
||||
[0, 1, 2]
|
||||
);
|
||||
assert!(TinySet::range_lower(0).is_empty());
|
||||
assert_eq!(
|
||||
TinySet::range_lower(63).into_iter().collect::<Vec<u32>>(),
|
||||
(0u32..63u32).collect::<Vec<_>>()
|
||||
);
|
||||
assert_eq!(
|
||||
TinySet::range_lower(1).into_iter().collect::<Vec<u32>>(),
|
||||
[0]
|
||||
);
|
||||
assert_eq!(
|
||||
TinySet::range_lower(2).into_iter().collect::<Vec<u32>>(),
|
||||
[0, 1]
|
||||
);
|
||||
assert_eq!(
|
||||
TinySet::range_greater_or_equal(3)
|
||||
.into_iter()
|
||||
.collect::<Vec<u32>>(),
|
||||
(3u32..64u32).collect::<Vec<_>>()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bitset_len() {
|
||||
let mut bitset = BitSet::with_max_value(1_000);
|
||||
assert_eq!(bitset.len(), 0);
|
||||
bitset.insert(3u32);
|
||||
assert_eq!(bitset.len(), 1);
|
||||
bitset.insert(103u32);
|
||||
assert_eq!(bitset.len(), 2);
|
||||
bitset.insert(3u32);
|
||||
assert_eq!(bitset.len(), 2);
|
||||
bitset.insert(103u32);
|
||||
assert_eq!(bitset.len(), 2);
|
||||
bitset.insert(104u32);
|
||||
assert_eq!(bitset.len(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bitset_clear() {
|
||||
let mut bitset = BitSet::with_max_value(1_000);
|
||||
let els = tests::sample(1_000, 0.01f32);
|
||||
for &el in &els {
|
||||
bitset.insert(el);
|
||||
}
|
||||
assert!(els.iter().all(|el| bitset.contains(*el)));
|
||||
bitset.clear();
|
||||
for el in 0u32..1000u32 {
|
||||
assert!(!bitset.contains(el));
|
||||
}
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_tinyset_pop(b: &mut test::Bencher) {
|
||||
b.iter(|| test::black_box(TinySet::singleton(31u32)).pop_lowest());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_tinyset_sum(b: &mut test::Bencher) {
|
||||
let tiny_set = TinySet::empty().insert(10u32).insert(14u32).insert(21u32);
|
||||
b.iter(|| {
|
||||
assert_eq!(test::black_box(tiny_set).into_iter().sum::<u32>(), 45u32);
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_tinyarr_sum(b: &mut test::Bencher) {
|
||||
let v = [10u32, 14u32, 21u32];
|
||||
b.iter(|| test::black_box(v).iter().cloned().sum::<u32>());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_bitset_initialize(b: &mut test::Bencher) {
|
||||
b.iter(|| BitSet::with_max_value(1_000_000));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ use std::io::{self, Read};
|
||||
use directory::ReadOnlySource;
|
||||
use common::BinarySerializable;
|
||||
|
||||
|
||||
#[derive(Eq, PartialEq, Hash, Copy, Ord, PartialOrd, Clone, Debug)]
|
||||
pub struct FileAddr {
|
||||
field: Field,
|
||||
@@ -19,7 +18,7 @@ impl FileAddr {
|
||||
fn new(field: Field, idx: usize) -> FileAddr {
|
||||
FileAddr {
|
||||
field: field,
|
||||
idx: idx
|
||||
idx: idx,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -36,7 +35,7 @@ impl BinarySerializable for FileAddr {
|
||||
let idx = VInt::deserialize(reader)?.0 as usize;
|
||||
Ok(FileAddr {
|
||||
field: field,
|
||||
idx: idx
|
||||
idx: idx,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -59,7 +58,7 @@ impl<W: Write> CompositeWrite<W> {
|
||||
|
||||
/// Start writing a new field.
|
||||
pub fn for_field(&mut self, field: Field) -> &mut CountingWriter<W> {
|
||||
self.for_field_with_idx(field, 0)
|
||||
self.for_field_with_idx(field, 0)
|
||||
}
|
||||
|
||||
/// Start writing a new field.
|
||||
@@ -71,7 +70,6 @@ impl<W: Write> CompositeWrite<W> {
|
||||
&mut self.write
|
||||
}
|
||||
|
||||
|
||||
/// Close the composite file.
|
||||
///
|
||||
/// An index of the different field offsets
|
||||
@@ -89,9 +87,7 @@ impl<W: Write> CompositeWrite<W> {
|
||||
|
||||
let mut prev_offset = 0;
|
||||
for (offset, file_addr) in offset_fields {
|
||||
VInt((offset - prev_offset) as u64).serialize(
|
||||
&mut self.write,
|
||||
)?;
|
||||
VInt((offset - prev_offset) as u64).serialize(&mut self.write)?;
|
||||
file_addr.serialize(&mut self.write)?;
|
||||
prev_offset = offset;
|
||||
}
|
||||
@@ -103,7 +99,6 @@ impl<W: Write> CompositeWrite<W> {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// A composite file is an abstraction to store a
|
||||
/// file partitioned by field.
|
||||
///
|
||||
@@ -174,20 +169,20 @@ impl CompositeFile {
|
||||
/// to a given `Field` and stored in a `CompositeFile`.
|
||||
pub fn open_read_with_idx(&self, field: Field, idx: usize) -> Option<ReadOnlySource> {
|
||||
self.offsets_index
|
||||
.get(&FileAddr {field: field, idx: idx})
|
||||
.map(|&(from, to)| {
|
||||
self.data.slice(from, to)
|
||||
.get(&FileAddr {
|
||||
field: field,
|
||||
idx: idx,
|
||||
})
|
||||
.map(|&(from, to)| self.data.slice(from, to))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
use std::io::Write;
|
||||
use super::{CompositeWrite, CompositeFile};
|
||||
use directory::{RAMDirectory, Directory};
|
||||
use super::{CompositeFile, CompositeWrite};
|
||||
use directory::{Directory, RAMDirectory};
|
||||
use schema::Field;
|
||||
use common::VInt;
|
||||
use common::BinarySerializable;
|
||||
@@ -231,7 +226,6 @@ mod test {
|
||||
assert_eq!(payload_4, 2u64);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -7,18 +7,57 @@ pub mod bitpacker;
|
||||
mod bitset;
|
||||
|
||||
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
|
||||
pub use self::serialize::BinarySerializable;
|
||||
pub use self::serialize::{BinarySerializable, FixedSize};
|
||||
pub use self::timer::Timing;
|
||||
pub use self::timer::TimerTree;
|
||||
pub use self::timer::OpenTimer;
|
||||
pub use self::vint::VInt;
|
||||
pub use self::counting_writer::CountingWriter;
|
||||
pub use self::bitset::{TinySet, DocBitSet};
|
||||
pub use self::bitset::BitSet;
|
||||
pub(crate) use self::bitset::TinySet;
|
||||
pub use byteorder::LittleEndian as Endianness;
|
||||
|
||||
use std::io;
|
||||
|
||||
/// Computes the number of bits that will be used for bitpacking.
|
||||
///
|
||||
/// In general the target is the minimum number of bits
|
||||
/// required to express the amplitude given in argument.
|
||||
///
|
||||
/// e.g. If the amplitude is 10, we can store all ints on simply 4bits.
|
||||
///
|
||||
/// The logic is slightly more convoluted here as for optimization
|
||||
/// reasons, we want to ensure that a value spawns over at most 8 bytes
|
||||
/// of aligns bytes.
|
||||
///
|
||||
/// Spanning over 9 bytes is possible for instance, if we do
|
||||
/// bitpacking with an amplitude of 63 bits.
|
||||
/// In this case, the second int will start on bit
|
||||
/// 63 (which belongs to byte 7) and ends at byte 15;
|
||||
/// Hence 9 bytes (from byte 7 to byte 15 included).
|
||||
///
|
||||
/// To avoid this, we force the number of bits to 64bits
|
||||
/// when the result is greater than `64-8 = 56 bits`.
|
||||
///
|
||||
/// Note that this only affects rare use cases spawning over
|
||||
/// a very large range of values. Even in this case, it results
|
||||
/// in an extra cost of at most 12% compared to the optimal
|
||||
/// number of bits.
|
||||
pub(crate) fn compute_num_bits(n: u64) -> u8 {
|
||||
let amplitude = (64u32 - n.leading_zeros()) as u8;
|
||||
if amplitude <= 64 - 8 {
|
||||
amplitude
|
||||
} else {
|
||||
64
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn is_power_of_2(n: usize) -> bool {
|
||||
(n > 0) && (n & (n - 1) == 0)
|
||||
}
|
||||
|
||||
/// Create a default io error given a string.
|
||||
pub fn make_io_err(msg: String) -> io::Error {
|
||||
pub(crate) fn make_io_err(msg: String) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::Other, msg)
|
||||
}
|
||||
|
||||
@@ -67,9 +106,10 @@ pub fn u64_to_i64(val: u64) -> i64 {
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
pub(crate) mod test {
|
||||
|
||||
use super::{i64_to_u64, u64_to_i64};
|
||||
use super::{compute_num_bits, i64_to_u64, u64_to_i64};
|
||||
pub use super::serialize::test::fixed_size_test;
|
||||
|
||||
fn test_i64_converter_helper(val: i64) {
|
||||
assert_eq!(u64_to_i64(i64_to_u64(val)), val);
|
||||
@@ -86,4 +126,16 @@ mod test {
|
||||
test_i64_converter_helper(i);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compute_num_bits() {
|
||||
assert_eq!(compute_num_bits(1), 1u8);
|
||||
assert_eq!(compute_num_bits(0), 0u8);
|
||||
assert_eq!(compute_num_bits(2), 2u8);
|
||||
assert_eq!(compute_num_bits(3), 2u8);
|
||||
assert_eq!(compute_num_bits(4), 3u8);
|
||||
assert_eq!(compute_num_bits(255), 8u8);
|
||||
assert_eq!(compute_num_bits(256), 9u8);
|
||||
assert_eq!(compute_num_bits(5_000_000_000), 33u8);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,16 +1,25 @@
|
||||
use byteorder::{ReadBytesExt, WriteBytesExt};
|
||||
use byteorder::LittleEndian as Endianness;
|
||||
use common::Endianness;
|
||||
use std::fmt;
|
||||
use std::io::Write;
|
||||
use std::io::Read;
|
||||
use std::io;
|
||||
use common::VInt;
|
||||
|
||||
/// Trait for a simple binary serialization.
|
||||
pub trait BinarySerializable: fmt::Debug + Sized {
|
||||
/// Serialize
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()>;
|
||||
/// Deserialize
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self>;
|
||||
}
|
||||
|
||||
/// `FixedSize` marks a `BinarySerializable` as
|
||||
/// always serializing to the same size.
|
||||
pub trait FixedSize: BinarySerializable {
|
||||
const SIZE_IN_BYTES: usize;
|
||||
}
|
||||
|
||||
impl BinarySerializable for () {
|
||||
fn serialize<W: Write>(&self, _: &mut W) -> io::Result<()> {
|
||||
Ok(())
|
||||
@@ -20,6 +29,10 @@ impl BinarySerializable for () {
|
||||
}
|
||||
}
|
||||
|
||||
impl FixedSize for () {
|
||||
const SIZE_IN_BYTES: usize = 0;
|
||||
}
|
||||
|
||||
impl<T: BinarySerializable> BinarySerializable for Vec<T> {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
VInt(self.len() as u64).serialize(writer)?;
|
||||
@@ -59,6 +72,10 @@ impl BinarySerializable for u32 {
|
||||
}
|
||||
}
|
||||
|
||||
impl FixedSize for u32 {
|
||||
const SIZE_IN_BYTES: usize = 4;
|
||||
}
|
||||
|
||||
impl BinarySerializable for u64 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_u64::<Endianness>(*self)
|
||||
@@ -68,6 +85,10 @@ impl BinarySerializable for u64 {
|
||||
}
|
||||
}
|
||||
|
||||
impl FixedSize for u64 {
|
||||
const SIZE_IN_BYTES: usize = 8;
|
||||
}
|
||||
|
||||
impl BinarySerializable for i64 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_i64::<Endianness>(*self)
|
||||
@@ -77,6 +98,10 @@ impl BinarySerializable for i64 {
|
||||
}
|
||||
}
|
||||
|
||||
impl FixedSize for i64 {
|
||||
const SIZE_IN_BYTES: usize = 8;
|
||||
}
|
||||
|
||||
impl BinarySerializable for u8 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_u8(*self)
|
||||
@@ -86,6 +111,10 @@ impl BinarySerializable for u8 {
|
||||
}
|
||||
}
|
||||
|
||||
impl FixedSize for u8 {
|
||||
const SIZE_IN_BYTES: usize = 1;
|
||||
}
|
||||
|
||||
impl BinarySerializable for String {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
let data: &[u8] = self.as_bytes();
|
||||
@@ -104,63 +133,78 @@ impl BinarySerializable for String {
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
pub mod test {
|
||||
|
||||
use common::VInt;
|
||||
use super::*;
|
||||
|
||||
fn serialize_test<T: BinarySerializable + Eq>(v: T, num_bytes: usize) {
|
||||
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
|
||||
let mut buffer = Vec::new();
|
||||
O::default().serialize(&mut buffer).unwrap();
|
||||
assert_eq!(buffer.len(), O::SIZE_IN_BYTES);
|
||||
}
|
||||
|
||||
fn serialize_test<T: BinarySerializable + Eq>(v: T) -> usize {
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
if num_bytes != 0 {
|
||||
v.serialize(&mut buffer).unwrap();
|
||||
assert_eq!(buffer.len(), num_bytes);
|
||||
} else {
|
||||
v.serialize(&mut buffer).unwrap();
|
||||
}
|
||||
v.serialize(&mut buffer).unwrap();
|
||||
let num_bytes = buffer.len();
|
||||
let mut cursor = &buffer[..];
|
||||
let deser = T::deserialize(&mut cursor).unwrap();
|
||||
assert_eq!(deser, v);
|
||||
num_bytes
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_u8() {
|
||||
serialize_test(3u8, 1);
|
||||
serialize_test(5u8, 1);
|
||||
fixed_size_test::<u8>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_u32() {
|
||||
serialize_test(3u32, 4);
|
||||
serialize_test(5u32, 4);
|
||||
serialize_test(u32::max_value(), 4);
|
||||
fixed_size_test::<u32>();
|
||||
assert_eq!(4, serialize_test(3u32));
|
||||
assert_eq!(4, serialize_test(5u32));
|
||||
assert_eq!(4, serialize_test(u32::max_value()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_i64() {
|
||||
fixed_size_test::<i64>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_u64() {
|
||||
fixed_size_test::<u64>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_string() {
|
||||
serialize_test(String::from(""), 1);
|
||||
serialize_test(String::from("ぽよぽよ"), 1 + 3 * 4);
|
||||
serialize_test(String::from("富士さん見える。"), 1 + 3 * 8);
|
||||
assert_eq!(serialize_test(String::from("")), 1);
|
||||
assert_eq!(serialize_test(String::from("ぽよぽよ")), 1 + 3 * 4);
|
||||
assert_eq!(
|
||||
serialize_test(String::from("富士さん見える。")),
|
||||
1 + 3 * 8
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_vec() {
|
||||
let v: Vec<u8> = Vec::new();
|
||||
serialize_test(v, 1);
|
||||
serialize_test(vec![1u32, 3u32], 1 + 4 * 2);
|
||||
assert_eq!(serialize_test(Vec::<u8>::new()), 1);
|
||||
assert_eq!(serialize_test(vec![1u32, 3u32]), 1 + 4 * 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_vint() {
|
||||
for i in 0..10_000 {
|
||||
serialize_test(VInt(i as u64), 0);
|
||||
serialize_test(VInt(i as u64));
|
||||
}
|
||||
serialize_test(VInt(7u64), 1);
|
||||
serialize_test(VInt(127u64), 1);
|
||||
serialize_test(VInt(128u64), 2);
|
||||
serialize_test(VInt(129u64), 2);
|
||||
serialize_test(VInt(1234u64), 2);
|
||||
serialize_test(VInt(16_383), 2);
|
||||
serialize_test(VInt(16_384), 3);
|
||||
serialize_test(VInt(u64::max_value()), 10);
|
||||
assert_eq!(serialize_test(VInt(7u64)), 1);
|
||||
assert_eq!(serialize_test(VInt(127u64)), 1);
|
||||
assert_eq!(serialize_test(VInt(128u64)), 2);
|
||||
assert_eq!(serialize_test(VInt(129u64)), 2);
|
||||
assert_eq!(serialize_test(VInt(1234u64)), 2);
|
||||
assert_eq!(serialize_test(VInt(16_383u64)), 2);
|
||||
assert_eq!(serialize_test(VInt(16_384u64)), 3);
|
||||
assert_eq!(serialize_test(VInt(u64::max_value())), 10);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,6 +11,10 @@ impl VInt {
|
||||
pub fn val(&self) -> u64 {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn deserialize_u64<R: Read>(reader: &mut R) -> io::Result<u64> {
|
||||
VInt::deserialize(reader).map(|vint| vint.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for VInt {
|
||||
|
||||
@@ -23,9 +23,11 @@ pub fn compress_sorted(vals: &mut [u32], output: &mut [u8], offset: u32) -> usiz
|
||||
let num_bits = compute_num_bits(max_delta as u64);
|
||||
counting_writer.write_all(&[num_bits]).unwrap();
|
||||
|
||||
let mut bit_packer = BitPacker::new(num_bits as usize);
|
||||
let mut bit_packer = BitPacker::new();
|
||||
for val in vals {
|
||||
bit_packer.write(*val as u64, &mut counting_writer).unwrap();
|
||||
bit_packer
|
||||
.write(*val as u64, num_bits, &mut counting_writer)
|
||||
.unwrap();
|
||||
}
|
||||
counting_writer.written_bytes()
|
||||
}
|
||||
@@ -61,13 +63,15 @@ impl BlockEncoder {
|
||||
let num_bits = compute_num_bits(max as u64);
|
||||
let mut counting_writer = CountingWriter::wrap(output);
|
||||
counting_writer.write_all(&[num_bits]).unwrap();
|
||||
let mut bit_packer = BitPacker::new(num_bits as usize);
|
||||
let mut bit_packer = BitPacker::new();
|
||||
for val in vals {
|
||||
bit_packer.write(*val as u64, &mut counting_writer).unwrap();
|
||||
bit_packer
|
||||
.write(*val as u64, num_bits, &mut counting_writer)
|
||||
.unwrap();
|
||||
}
|
||||
for _ in vals.len()..COMPRESSION_BLOCK_SIZE {
|
||||
bit_packer
|
||||
.write(vals[0] as u64, &mut counting_writer)
|
||||
.write(vals[0] as u64, num_bits, &mut counting_writer)
|
||||
.unwrap();
|
||||
}
|
||||
bit_packer.flush(&mut counting_writer).expect(
|
||||
|
||||
@@ -14,7 +14,8 @@ pub struct IndexMeta {
|
||||
pub segments: Vec<SegmentMeta>,
|
||||
pub schema: Schema,
|
||||
pub opstamp: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")] pub payload: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub payload: Option<String>,
|
||||
}
|
||||
|
||||
impl IndexMeta {
|
||||
|
||||
@@ -108,17 +108,21 @@ impl SegmentReader {
|
||||
pub fn facet_reader(&self, field: Field) -> Result<FacetReader> {
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
if field_entry.field_type() != &FieldType::HierarchicalFacet {
|
||||
return Err(ErrorKind::InvalidArgument(format!("The field {:?} is not a \
|
||||
hierarchical facet.", field_entry)).into())
|
||||
return Err(ErrorKind::InvalidArgument(format!(
|
||||
"The field {:?} is not a \
|
||||
hierarchical facet.",
|
||||
field_entry
|
||||
)).into());
|
||||
}
|
||||
let term_ords_reader = self.multi_value_reader(field)?;
|
||||
let termdict_source = self.termdict_composite
|
||||
.open_read(field)
|
||||
.ok_or_else(|| {
|
||||
ErrorKind::InvalidArgument(format!("The field \"{}\" is a hierarchical \
|
||||
but this segment does not seem to have the field term \
|
||||
dictionary.", field_entry.name()))
|
||||
})?;
|
||||
let termdict_source = self.termdict_composite.open_read(field).ok_or_else(|| {
|
||||
ErrorKind::InvalidArgument(format!(
|
||||
"The field \"{}\" is a hierarchical \
|
||||
but this segment does not seem to have the field term \
|
||||
dictionary.",
|
||||
field_entry.name()
|
||||
))
|
||||
})?;
|
||||
let termdict = TermDictionaryImpl::from_source(termdict_source);
|
||||
let facet_reader = FacetReader::new(term_ords_reader, termdict);
|
||||
Ok(facet_reader)
|
||||
|
||||
@@ -9,12 +9,12 @@ pub use self::skiplist::SkipList;
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
use super::{SkipList, SkipListBuilder};
|
||||
|
||||
#[test]
|
||||
fn test_skiplist() {
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
let mut skip_list_builder: SkipListBuilder<u32> = SkipListBuilder::new(10);
|
||||
let mut skip_list_builder: SkipListBuilder<u32> = SkipListBuilder::new(8);
|
||||
skip_list_builder.insert(2, &3).unwrap();
|
||||
skip_list_builder.write::<Vec<u8>>(&mut output).unwrap();
|
||||
let mut skip_list: SkipList<u32> = SkipList::from(output.as_slice());
|
||||
@@ -24,7 +24,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_skiplist2() {
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
let skip_list_builder: SkipListBuilder<u32> = SkipListBuilder::new(10);
|
||||
let skip_list_builder: SkipListBuilder<u32> = SkipListBuilder::new(8);
|
||||
skip_list_builder.write::<Vec<u8>>(&mut output).unwrap();
|
||||
let mut skip_list: SkipList<u32> = SkipList::from(output.as_slice());
|
||||
assert_eq!(skip_list.next(), None);
|
||||
@@ -71,7 +71,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_skiplist5() {
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
let mut skip_list_builder: SkipListBuilder<()> = SkipListBuilder::new(3);
|
||||
let mut skip_list_builder: SkipListBuilder<()> = SkipListBuilder::new(4);
|
||||
skip_list_builder.insert(2, &()).unwrap();
|
||||
skip_list_builder.insert(3, &()).unwrap();
|
||||
skip_list_builder.insert(5, &()).unwrap();
|
||||
@@ -103,7 +103,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_skiplist7() {
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
let mut skip_list_builder: SkipListBuilder<()> = SkipListBuilder::new(3);
|
||||
let mut skip_list_builder: SkipListBuilder<()> = SkipListBuilder::new(4);
|
||||
for i in 0..1000 {
|
||||
skip_list_builder.insert(i, &()).unwrap();
|
||||
}
|
||||
@@ -121,35 +121,48 @@ mod tests {
|
||||
#[test]
|
||||
fn test_skiplist8() {
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
let mut skip_list_builder: SkipListBuilder<u32> = SkipListBuilder::new(10);
|
||||
let mut skip_list_builder: SkipListBuilder<u64> = SkipListBuilder::new(8);
|
||||
skip_list_builder.insert(2, &3).unwrap();
|
||||
skip_list_builder.write::<Vec<u8>>(&mut output).unwrap();
|
||||
assert_eq!(output.len(), 13);
|
||||
assert_eq!(output.len(), 11);
|
||||
assert_eq!(output[0], 1u8 + 128u8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_skiplist9() {
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
let mut skip_list_builder: SkipListBuilder<u32> = SkipListBuilder::new(3);
|
||||
for i in 0..9 {
|
||||
let mut skip_list_builder: SkipListBuilder<u64> = SkipListBuilder::new(4);
|
||||
for i in 0..4 * 4 * 4 {
|
||||
skip_list_builder.insert(i, &i).unwrap();
|
||||
}
|
||||
skip_list_builder.write::<Vec<u8>>(&mut output).unwrap();
|
||||
assert_eq!(output.len(), 117);
|
||||
assert_eq!(output[0], 3u8 + 128u8);
|
||||
assert_eq!(output.len(), 774);
|
||||
assert_eq!(output[0], 4u8 + 128u8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_skiplist10() {
|
||||
// checking that void gets serialized to nothing.
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
let mut skip_list_builder: SkipListBuilder<()> = SkipListBuilder::new(3);
|
||||
for i in 0..9 {
|
||||
let mut skip_list_builder: SkipListBuilder<()> = SkipListBuilder::new(4);
|
||||
for i in 0..((4 * 4 * 4) - 1) {
|
||||
skip_list_builder.insert(i, &()).unwrap();
|
||||
}
|
||||
skip_list_builder.write::<Vec<u8>>(&mut output).unwrap();
|
||||
assert_eq!(output.len(), 81);
|
||||
assert_eq!(output.len(), 230);
|
||||
assert_eq!(output[0], 128u8 + 3u8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_skiplist11() {
|
||||
// checking that void gets serialized to nothing.
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
let mut skip_list_builder: SkipListBuilder<()> = SkipListBuilder::new(4);
|
||||
for i in 0..(4 * 4) {
|
||||
skip_list_builder.insert(i, &()).unwrap();
|
||||
}
|
||||
skip_list_builder.write::<Vec<u8>>(&mut output).unwrap();
|
||||
assert_eq!(output.len(), 65);
|
||||
assert_eq!(output[0], 128u8 + 3u8);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use common::BinarySerializable;
|
||||
use common::{BinarySerializable, VInt};
|
||||
use std::marker::PhantomData;
|
||||
use DocId;
|
||||
use std::cmp::max;
|
||||
|
||||
static EMPTY: [u8; 0] = [];
|
||||
@@ -8,21 +7,20 @@ static EMPTY: [u8; 0] = [];
|
||||
struct Layer<'a, T> {
|
||||
data: &'a [u8],
|
||||
cursor: &'a [u8],
|
||||
next_id: DocId,
|
||||
next_id: Option<u64>,
|
||||
_phantom_: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<'a, T: BinarySerializable> Iterator for Layer<'a, T> {
|
||||
type Item = (DocId, T);
|
||||
type Item = (u64, T);
|
||||
|
||||
fn next(&mut self) -> Option<(DocId, T)> {
|
||||
if self.next_id == u32::max_value() {
|
||||
None
|
||||
} else {
|
||||
fn next(&mut self) -> Option<(u64, T)> {
|
||||
if let Some(cur_id) = self.next_id {
|
||||
let cur_val = T::deserialize(&mut self.cursor).unwrap();
|
||||
let cur_id = self.next_id;
|
||||
self.next_id = u32::deserialize(&mut self.cursor).unwrap_or(u32::max_value());
|
||||
self.next_id = VInt::deserialize_u64(&mut self.cursor).ok();
|
||||
Some((cur_id, cur_val))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -30,7 +28,7 @@ impl<'a, T: BinarySerializable> Iterator for Layer<'a, T> {
|
||||
impl<'a, T: BinarySerializable> From<&'a [u8]> for Layer<'a, T> {
|
||||
fn from(data: &'a [u8]) -> Layer<'a, T> {
|
||||
let mut cursor = data;
|
||||
let next_id = u32::deserialize(&mut cursor).unwrap_or(u32::max_value());
|
||||
let next_id = VInt::deserialize_u64(&mut cursor).ok();
|
||||
Layer {
|
||||
data,
|
||||
cursor,
|
||||
@@ -45,14 +43,14 @@ impl<'a, T: BinarySerializable> Layer<'a, T> {
|
||||
Layer {
|
||||
data: &EMPTY,
|
||||
cursor: &EMPTY,
|
||||
next_id: DocId::max_value(),
|
||||
next_id: None,
|
||||
_phantom_: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn seek_offset(&mut self, offset: usize) {
|
||||
self.cursor = &self.data[offset..];
|
||||
self.next_id = u32::deserialize(&mut self.cursor).unwrap_or(u32::max_value());
|
||||
self.next_id = VInt::deserialize_u64(&mut self.cursor).ok();
|
||||
}
|
||||
|
||||
// Returns the last element (key, val)
|
||||
@@ -60,54 +58,61 @@ impl<'a, T: BinarySerializable> Layer<'a, T> {
|
||||
//
|
||||
// If there is no such element anymore,
|
||||
// returns None.
|
||||
fn seek(&mut self, doc_id: DocId) -> Option<(DocId, T)> {
|
||||
let mut val = None;
|
||||
while self.next_id < doc_id {
|
||||
match self.next() {
|
||||
None => {
|
||||
break;
|
||||
}
|
||||
v => {
|
||||
val = v;
|
||||
//
|
||||
// If the element exists, it will be returned
|
||||
// at the next call to `.next()`.
|
||||
fn seek(&mut self, key: u64) -> Option<(u64, T)> {
|
||||
let mut result: Option<(u64, T)> = None;
|
||||
loop {
|
||||
if let Some(next_id) = self.next_id {
|
||||
if next_id < key {
|
||||
if let Some(v) = self.next() {
|
||||
result = Some(v);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
val
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SkipList<'a, T: BinarySerializable> {
|
||||
data_layer: Layer<'a, T>,
|
||||
skip_layers: Vec<Layer<'a, u32>>,
|
||||
skip_layers: Vec<Layer<'a, u64>>,
|
||||
}
|
||||
|
||||
impl<'a, T: BinarySerializable> Iterator for SkipList<'a, T> {
|
||||
type Item = (DocId, T);
|
||||
type Item = (u64, T);
|
||||
|
||||
fn next(&mut self) -> Option<(DocId, T)> {
|
||||
fn next(&mut self) -> Option<(u64, T)> {
|
||||
self.data_layer.next()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: BinarySerializable> SkipList<'a, T> {
|
||||
pub fn seek(&mut self, doc_id: DocId) -> Option<(DocId, T)> {
|
||||
let mut next_layer_skip: Option<(DocId, u32)> = None;
|
||||
pub fn seek(&mut self, key: u64) -> Option<(u64, T)> {
|
||||
let mut next_layer_skip: Option<(u64, u64)> = None;
|
||||
for skip_layer in &mut self.skip_layers {
|
||||
if let Some((_, offset)) = next_layer_skip {
|
||||
skip_layer.seek_offset(offset as usize);
|
||||
}
|
||||
next_layer_skip = skip_layer.seek(doc_id);
|
||||
next_layer_skip = skip_layer.seek(key);
|
||||
}
|
||||
if let Some((_, offset)) = next_layer_skip {
|
||||
self.data_layer.seek_offset(offset as usize);
|
||||
}
|
||||
self.data_layer.seek(doc_id)
|
||||
self.data_layer.seek(key)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: BinarySerializable> From<&'a [u8]> for SkipList<'a, T> {
|
||||
fn from(mut data: &'a [u8]) -> SkipList<'a, T> {
|
||||
let offsets: Vec<u32> = Vec::deserialize(&mut data).unwrap();
|
||||
let offsets: Vec<u64> = Vec::<VInt>::deserialize(&mut data)
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(|el| el.0)
|
||||
.collect();
|
||||
let num_layers = offsets.len();
|
||||
let layers_data: &[u8] = data;
|
||||
let data_layer: Layer<'a, T> = if num_layers == 0 {
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
use std::io::Write;
|
||||
use common::BinarySerializable;
|
||||
use common::{BinarySerializable, VInt, is_power_of_2};
|
||||
use std::marker::PhantomData;
|
||||
use DocId;
|
||||
use std::io;
|
||||
|
||||
struct LayerBuilder<T: BinarySerializable> {
|
||||
period: usize,
|
||||
period_mask: usize,
|
||||
buffer: Vec<u8>,
|
||||
remaining: usize,
|
||||
len: usize,
|
||||
_phantom_: PhantomData<T>,
|
||||
}
|
||||
@@ -23,34 +21,33 @@ impl<T: BinarySerializable> LayerBuilder<T> {
|
||||
}
|
||||
|
||||
fn with_period(period: usize) -> LayerBuilder<T> {
|
||||
assert!(is_power_of_2(period), "The period has to be a power of 2.");
|
||||
LayerBuilder {
|
||||
period,
|
||||
period_mask: (period - 1),
|
||||
buffer: Vec::new(),
|
||||
remaining: period,
|
||||
len: 0,
|
||||
_phantom_: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn insert(&mut self, doc_id: DocId, value: &T) -> io::Result<Option<(DocId, u32)>> {
|
||||
self.remaining -= 1;
|
||||
fn insert(&mut self, key: u64, value: &T) -> io::Result<Option<(u64, u64)>> {
|
||||
self.len += 1;
|
||||
let offset = self.written_size() as u32;
|
||||
doc_id.serialize(&mut self.buffer)?;
|
||||
let offset = self.written_size() as u64;
|
||||
VInt(key).serialize(&mut self.buffer)?;
|
||||
value.serialize(&mut self.buffer)?;
|
||||
Ok(if self.remaining == 0 {
|
||||
self.remaining = self.period;
|
||||
Some((doc_id, offset))
|
||||
let emit_skip_info = (self.period_mask & self.len) == 0;
|
||||
if emit_skip_info {
|
||||
Ok(Some((key, offset)))
|
||||
} else {
|
||||
None
|
||||
})
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SkipListBuilder<T: BinarySerializable> {
|
||||
period: usize,
|
||||
data_layer: LayerBuilder<T>,
|
||||
skip_layers: Vec<LayerBuilder<u32>>,
|
||||
skip_layers: Vec<LayerBuilder<u64>>,
|
||||
}
|
||||
|
||||
impl<T: BinarySerializable> SkipListBuilder<T> {
|
||||
@@ -62,7 +59,7 @@ impl<T: BinarySerializable> SkipListBuilder<T> {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_skip_layer(&mut self, layer_id: usize) -> &mut LayerBuilder<u32> {
|
||||
fn get_skip_layer(&mut self, layer_id: usize) -> &mut LayerBuilder<u64> {
|
||||
if layer_id == self.skip_layers.len() {
|
||||
let layer_builder = LayerBuilder::with_period(self.period);
|
||||
self.skip_layers.push(layer_builder);
|
||||
@@ -70,9 +67,9 @@ impl<T: BinarySerializable> SkipListBuilder<T> {
|
||||
&mut self.skip_layers[layer_id]
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, doc_id: DocId, dest: &T) -> io::Result<()> {
|
||||
pub fn insert(&mut self, key: u64, dest: &T) -> io::Result<()> {
|
||||
let mut layer_id = 0;
|
||||
let mut skip_pointer = self.data_layer.insert(doc_id, dest)?;
|
||||
let mut skip_pointer = self.data_layer.insert(key, dest)?;
|
||||
loop {
|
||||
skip_pointer = match skip_pointer {
|
||||
Some((skip_doc_id, skip_offset)) => self.get_skip_layer(layer_id)
|
||||
@@ -86,13 +83,11 @@ impl<T: BinarySerializable> SkipListBuilder<T> {
|
||||
}
|
||||
|
||||
pub fn write<W: Write>(self, output: &mut W) -> io::Result<()> {
|
||||
let mut size: u32 = 0;
|
||||
let mut layer_sizes: Vec<u32> = Vec::new();
|
||||
size += self.data_layer.buffer.len() as u32;
|
||||
layer_sizes.push(size);
|
||||
let mut size: u64 = self.data_layer.buffer.len() as u64;
|
||||
let mut layer_sizes = vec![VInt(size)];
|
||||
for layer in self.skip_layers.iter().rev() {
|
||||
size += layer.buffer.len() as u32;
|
||||
layer_sizes.push(size);
|
||||
size += layer.buffer.len() as u64;
|
||||
layer_sizes.push(VInt(size));
|
||||
}
|
||||
layer_sizes.serialize(output)?;
|
||||
self.data_layer.write(output)?;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::iter;
|
||||
use std::mem;
|
||||
use postings::UnorderedTermId;
|
||||
use super::heap::{Heap, HeapAllocable, BytesRef};
|
||||
use super::heap::{BytesRef, Heap, HeapAllocable};
|
||||
|
||||
mod murmurhash2 {
|
||||
|
||||
@@ -10,7 +10,7 @@ mod murmurhash2 {
|
||||
#[inline(always)]
|
||||
pub fn murmurhash2(key: &[u8]) -> u32 {
|
||||
let mut key_ptr: *const u32 = key.as_ptr() as *const u32;
|
||||
let m: u32 = 0x5bd1e995;
|
||||
let m: u32 = 0x5bd1_e995;
|
||||
let r = 24;
|
||||
let len = key.len() as u32;
|
||||
|
||||
@@ -31,18 +31,18 @@ mod murmurhash2 {
|
||||
let key_ptr_u8: *const u8 = key_ptr as *const u8;
|
||||
match remaining {
|
||||
3 => {
|
||||
h ^= unsafe { *key_ptr_u8.wrapping_offset(2) as u32 } << 16;
|
||||
h ^= unsafe { *key_ptr_u8.wrapping_offset(1) as u32 } << 8;
|
||||
h ^= unsafe { *key_ptr_u8 as u32 };
|
||||
h ^= unsafe { u32::from(*key_ptr_u8.wrapping_offset(2)) } << 16;
|
||||
h ^= unsafe { u32::from(*key_ptr_u8.wrapping_offset(1)) } << 8;
|
||||
h ^= unsafe { u32::from(*key_ptr_u8) };
|
||||
h = h.wrapping_mul(m);
|
||||
}
|
||||
2 => {
|
||||
h ^= unsafe { *key_ptr_u8.wrapping_offset(1) as u32 } << 8;
|
||||
h ^= unsafe { *key_ptr_u8 as u32 };
|
||||
h ^= unsafe { u32::from(*key_ptr_u8.wrapping_offset(1)) } << 8;
|
||||
h ^= unsafe { u32::from(*key_ptr_u8) };
|
||||
h = h.wrapping_mul(m);
|
||||
}
|
||||
1 => {
|
||||
h ^= unsafe { *key_ptr_u8 as u32 };
|
||||
h ^= unsafe { u32::from(*key_ptr_u8) };
|
||||
h = h.wrapping_mul(m);
|
||||
}
|
||||
_ => {}
|
||||
@@ -53,9 +53,6 @@ mod murmurhash2 {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/// Split the thread memory budget into
|
||||
/// - the heap size
|
||||
/// - the hash table "table" itself.
|
||||
@@ -63,14 +60,10 @@ mod murmurhash2 {
|
||||
/// Returns (the heap size in bytes, the hash table size in number of bits)
|
||||
pub(crate) fn split_memory(per_thread_memory_budget: usize) -> (usize, usize) {
|
||||
let table_size_limit: usize = per_thread_memory_budget / 3;
|
||||
let compute_table_size = |num_bits: usize| {
|
||||
(1 << num_bits) * mem::size_of::<KeyValue>()
|
||||
};
|
||||
let compute_table_size = |num_bits: usize| (1 << num_bits) * mem::size_of::<KeyValue>();
|
||||
let table_num_bits: usize = (1..)
|
||||
.into_iter()
|
||||
.take_while(|num_bits: &usize| {
|
||||
compute_table_size(*num_bits) < table_size_limit
|
||||
})
|
||||
.take_while(|num_bits: &usize| compute_table_size(*num_bits) < table_size_limit)
|
||||
.last()
|
||||
.expect(&format!(
|
||||
"Per thread memory is too small: {}",
|
||||
@@ -81,7 +74,6 @@ pub(crate) fn split_memory(per_thread_memory_budget: usize) -> (usize, usize) {
|
||||
(heap_size, table_num_bits)
|
||||
}
|
||||
|
||||
|
||||
/// `KeyValue` is the item stored in the hash table.
|
||||
/// The key is actually a `BytesRef` object stored in an external heap.
|
||||
/// The `value_addr` also points to an address in the heap.
|
||||
@@ -101,7 +93,6 @@ impl KeyValue {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Customized `HashMap` with string keys
|
||||
///
|
||||
/// This `HashMap` takes String as keys. Keys are
|
||||
@@ -118,7 +109,6 @@ pub struct TermHashMap<'a> {
|
||||
occupied: Vec<usize>,
|
||||
}
|
||||
|
||||
|
||||
struct QuadraticProbing {
|
||||
hash: usize,
|
||||
i: usize,
|
||||
@@ -141,7 +131,6 @@ impl QuadraticProbing {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<'a> TermHashMap<'a> {
|
||||
pub fn new(num_bucket_power_of_2: usize, heap: &'a Heap) -> TermHashMap<'a> {
|
||||
let table_size = 1 << num_bucket_power_of_2;
|
||||
@@ -178,18 +167,17 @@ impl<'a> TermHashMap<'a> {
|
||||
}
|
||||
|
||||
pub fn iter<'b: 'a>(&'b self) -> impl Iterator<Item = (&'a [u8], u32, UnorderedTermId)> + 'b {
|
||||
self.occupied
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(move |bucket: usize| {
|
||||
let kv = self.table[bucket];
|
||||
let (key, offset) = self.get_key_value(kv.key_value_addr);
|
||||
(key, offset, bucket as UnorderedTermId)
|
||||
})
|
||||
self.occupied.iter().cloned().map(move |bucket: usize| {
|
||||
let kv = self.table[bucket];
|
||||
let (key, offset) = self.get_key_value(kv.key_value_addr);
|
||||
(key, offset, bucket as UnorderedTermId)
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
pub fn get_or_create<S: AsRef<[u8]>, V: HeapAllocable>(&mut self, key: S) -> (UnorderedTermId, &mut V) {
|
||||
pub fn get_or_create<S: AsRef<[u8]>, V: HeapAllocable>(
|
||||
&mut self,
|
||||
key: S,
|
||||
) -> (UnorderedTermId, &mut V) {
|
||||
let key_bytes: &[u8] = key.as_ref();
|
||||
let hash = murmurhash2::murmurhash2(key.as_ref());
|
||||
let mut probe = self.probe(hash);
|
||||
@@ -212,7 +200,6 @@ impl<'a> TermHashMap<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
@@ -223,7 +210,6 @@ mod tests {
|
||||
use std::collections::HashSet;
|
||||
use super::split_memory;
|
||||
|
||||
|
||||
struct TestValue {
|
||||
val: u32,
|
||||
_addr: u32,
|
||||
@@ -245,7 +231,6 @@ mod tests {
|
||||
assert_eq!(split_memory(10_000_000), (7902848, 18));
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_hash_map() {
|
||||
let heap = Heap::with_capacity(2_000_000);
|
||||
@@ -319,5 +304,4 @@ mod tests {
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
@@ -39,6 +39,5 @@ fn test_unrolled_linked_list() {
|
||||
assert!(!it.next().is_some());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,19 +20,17 @@ use std::sync::Arc;
|
||||
use std::sync::RwLock;
|
||||
use tempdir::TempDir;
|
||||
|
||||
|
||||
/// Returns None iff the file exists, can be read, but is empty (and hence
|
||||
/// cannot be mmapped).
|
||||
///
|
||||
fn open_mmap(full_path: &PathBuf) -> result::Result<Option<MmapReadOnly>, OpenReadError> {
|
||||
let file = File::open(&full_path)
|
||||
.map_err(|e| {
|
||||
if e.kind() == io::ErrorKind::NotFound {
|
||||
OpenReadError::FileDoesNotExist(full_path.clone())
|
||||
} else {
|
||||
OpenReadError::IOError(IOError::with_path(full_path.to_owned(), e))
|
||||
}
|
||||
})?;
|
||||
fn open_mmap(full_path: &Path) -> result::Result<Option<MmapReadOnly>, OpenReadError> {
|
||||
let file = File::open(full_path).map_err(|e| {
|
||||
if e.kind() == io::ErrorKind::NotFound {
|
||||
OpenReadError::FileDoesNotExist(full_path.to_owned())
|
||||
} else {
|
||||
OpenReadError::IOError(IOError::with_path(full_path.to_owned(), e))
|
||||
}
|
||||
})?;
|
||||
|
||||
let meta_data = file.metadata()
|
||||
.map_err(|e| IOError::with_path(full_path.to_owned(), e))?;
|
||||
@@ -44,9 +42,7 @@ fn open_mmap(full_path: &PathBuf) -> result::Result<Option<MmapReadOnly>, OpenRe
|
||||
}
|
||||
MmapReadOnly::open(&file)
|
||||
.map(Some)
|
||||
.map_err(|e| {
|
||||
From::from(IOError::with_path(full_path.to_owned(), e))
|
||||
})
|
||||
.map_err(|e| From::from(IOError::with_path(full_path.to_owned(), e)))
|
||||
}
|
||||
|
||||
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
|
||||
@@ -79,7 +75,6 @@ impl Default for MmapCache {
|
||||
}
|
||||
|
||||
impl MmapCache {
|
||||
|
||||
/// Removes a `MmapReadOnly` entry from the mmap cache.
|
||||
fn discard_from_cache(&mut self, full_path: &Path) -> bool {
|
||||
self.cache.remove(full_path).is_some()
|
||||
@@ -93,23 +88,23 @@ impl MmapCache {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_mmap(&mut self, full_path: PathBuf) -> Result<Option<MmapReadOnly>, OpenReadError> {
|
||||
Ok(match self.cache.entry(full_path.clone()) {
|
||||
HashMapEntry::Occupied(occupied_entry) => {
|
||||
let mmap = occupied_entry.get();
|
||||
self.counters.hit += 1;
|
||||
Some(mmap.clone())
|
||||
}
|
||||
HashMapEntry::Vacant(vacant_entry) => {
|
||||
self.counters.miss += 1;
|
||||
if let Some(mmap) = open_mmap(&full_path)? {
|
||||
vacant_entry.insert(mmap.clone());
|
||||
Some(mmap)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
})
|
||||
fn get_mmap(&mut self, full_path: &Path) -> Result<Option<MmapReadOnly>, OpenReadError> {
|
||||
Ok(match self.cache.entry(full_path.to_owned()) {
|
||||
HashMapEntry::Occupied(occupied_entry) => {
|
||||
let mmap = occupied_entry.get();
|
||||
self.counters.hit += 1;
|
||||
Some(mmap.clone())
|
||||
}
|
||||
HashMapEntry::Vacant(vacant_entry) => {
|
||||
self.counters.miss += 1;
|
||||
if let Some(mmap) = open_mmap(full_path)? {
|
||||
vacant_entry.insert(mmap.clone());
|
||||
Some(mmap)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -257,9 +252,9 @@ impl Directory for MmapDirectory {
|
||||
})?;
|
||||
|
||||
Ok(mmap_cache
|
||||
.get_mmap(full_path)?
|
||||
.map(ReadOnlySource::Mmap)
|
||||
.unwrap_or_else(|| ReadOnlySource::Anonymous(SharedVecSlice::empty())))
|
||||
.get_mmap(&full_path)?
|
||||
.map(ReadOnlySource::Mmap)
|
||||
.unwrap_or_else(|| ReadOnlySource::Anonymous(SharedVecSlice::empty())))
|
||||
}
|
||||
|
||||
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
|
||||
@@ -292,20 +287,19 @@ impl Directory for MmapDirectory {
|
||||
Ok(BufWriter::new(Box::new(writer)))
|
||||
}
|
||||
|
||||
|
||||
/// Any entry associated to the path in the mmap will be
|
||||
/// removed before the file is deleted.
|
||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||
debug!("Deleting file {:?}", path);
|
||||
let full_path = self.resolve_path(path);
|
||||
let mut mmap_cache = self.mmap_cache
|
||||
.write()
|
||||
.map_err(|_| {
|
||||
let msg = format!("Failed to acquired write lock \
|
||||
on mmap cache while deleting {:?}",
|
||||
path);
|
||||
IOError::with_path(path.to_owned(), make_io_err(msg))
|
||||
})?;
|
||||
let mut mmap_cache = self.mmap_cache.write().map_err(|_| {
|
||||
let msg = format!(
|
||||
"Failed to acquired write lock \
|
||||
on mmap cache while deleting {:?}",
|
||||
path
|
||||
);
|
||||
IOError::with_path(path.to_owned(), make_io_err(msg))
|
||||
})?;
|
||||
mmap_cache.discard_from_cache(path);
|
||||
|
||||
// Removing the entry in the MMap cache.
|
||||
@@ -415,7 +409,10 @@ mod tests {
|
||||
}
|
||||
for (i, path) in paths.iter().enumerate() {
|
||||
mmap_directory.delete(path).unwrap();
|
||||
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), num_paths - i - 1);
|
||||
assert_eq!(
|
||||
mmap_directory.get_cache_info().mmapped.len(),
|
||||
num_paths - i - 1
|
||||
);
|
||||
}
|
||||
}
|
||||
assert_eq!(mmap_directory.get_cache_info().counters.hit, 10);
|
||||
|
||||
@@ -4,7 +4,7 @@ use super::shared_vec_slice::SharedVecSlice;
|
||||
use common::HasLen;
|
||||
use std::slice;
|
||||
use std::io::{self, Read};
|
||||
use stable_deref_trait::{StableDeref, CloneStableDeref};
|
||||
use stable_deref_trait::{CloneStableDeref, StableDeref};
|
||||
|
||||
/// Read object that represents files in tantivy.
|
||||
///
|
||||
|
||||
@@ -4,7 +4,6 @@ use termdict::TermOrdinal;
|
||||
use schema::Facet;
|
||||
use termdict::{TermDictionary, TermDictionaryImpl};
|
||||
|
||||
|
||||
/// The facet reader makes it possible to access the list of
|
||||
/// facets associated to a given document in a specific
|
||||
/// segment.
|
||||
@@ -24,7 +23,6 @@ pub struct FacetReader {
|
||||
}
|
||||
|
||||
impl FacetReader {
|
||||
|
||||
/// Creates a new `FacetReader`.
|
||||
///
|
||||
/// A facet reader just wraps :
|
||||
@@ -32,8 +30,10 @@ impl FacetReader {
|
||||
/// access the list of facet ords for a given document.
|
||||
/// - a `TermDictionaryImpl` that helps associating a facet to
|
||||
/// an ordinal and vice versa.
|
||||
pub fn new(term_ords: MultiValueIntFastFieldReader,
|
||||
term_dict: TermDictionaryImpl) -> FacetReader {
|
||||
pub fn new(
|
||||
term_ords: MultiValueIntFastFieldReader,
|
||||
term_dict: TermDictionaryImpl,
|
||||
) -> FacetReader {
|
||||
FacetReader {
|
||||
term_ords: term_ords,
|
||||
term_dict: term_dict,
|
||||
@@ -56,7 +56,8 @@ impl FacetReader {
|
||||
|
||||
/// Given a term ordinal returns the term associated to it.
|
||||
pub fn facet_from_ord(&self, facet_ord: TermOrdinal, output: &mut Facet) {
|
||||
let found_term = self.term_dict.ord_to_term(facet_ord as u64, output.inner_buffer_mut());
|
||||
let found_term = self.term_dict
|
||||
.ord_to_term(facet_ord as u64, output.inner_buffer_mut());
|
||||
assert!(found_term, "Term ordinal {} no found.", facet_ord);
|
||||
}
|
||||
|
||||
@@ -64,4 +65,4 @@ impl FacetReader {
|
||||
pub fn facet_ords(&mut self, doc: DocId, output: &mut Vec<u64>) {
|
||||
self.term_ords.get_vals(doc, output);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -95,7 +95,9 @@ mod tests {
|
||||
add_single_field_doc(&mut fast_field_writers, *FIELD, 13u64);
|
||||
add_single_field_doc(&mut fast_field_writers, *FIELD, 14u64);
|
||||
add_single_field_doc(&mut fast_field_writers, *FIELD, 2u64);
|
||||
fast_field_writers.serialize(&mut serializer, HashMap::new()).unwrap();
|
||||
fast_field_writers
|
||||
.serialize(&mut serializer, &HashMap::new())
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
@@ -129,7 +131,9 @@ mod tests {
|
||||
add_single_field_doc(&mut fast_field_writers, *FIELD, 1_002u64);
|
||||
add_single_field_doc(&mut fast_field_writers, *FIELD, 1_501u64);
|
||||
add_single_field_doc(&mut fast_field_writers, *FIELD, 215u64);
|
||||
fast_field_writers.serialize(&mut serializer, HashMap::new()).unwrap();
|
||||
fast_field_writers
|
||||
.serialize(&mut serializer, &HashMap::new())
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
@@ -164,7 +168,9 @@ mod tests {
|
||||
for _ in 0..10_000 {
|
||||
add_single_field_doc(&mut fast_field_writers, *FIELD, 100_000u64);
|
||||
}
|
||||
fast_field_writers.serialize(&mut serializer, HashMap::new()).unwrap();
|
||||
fast_field_writers
|
||||
.serialize(&mut serializer, &HashMap::new())
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
@@ -199,7 +205,9 @@ mod tests {
|
||||
5_000_000_000_000_000_000u64 + i,
|
||||
);
|
||||
}
|
||||
fast_field_writers.serialize(&mut serializer, HashMap::new()).unwrap();
|
||||
fast_field_writers
|
||||
.serialize(&mut serializer, &HashMap::new())
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
@@ -238,7 +246,9 @@ mod tests {
|
||||
doc.add_i64(i64_field, i);
|
||||
fast_field_writers.add_document(&doc);
|
||||
}
|
||||
fast_field_writers.serialize(&mut serializer, HashMap::new()).unwrap();
|
||||
fast_field_writers
|
||||
.serialize(&mut serializer, &HashMap::new())
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
@@ -277,7 +287,9 @@ mod tests {
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
||||
let doc = Document::default();
|
||||
fast_field_writers.add_document(&doc);
|
||||
fast_field_writers.serialize(&mut serializer, HashMap::new()).unwrap();
|
||||
fast_field_writers
|
||||
.serialize(&mut serializer, &HashMap::new())
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -311,7 +323,9 @@ mod tests {
|
||||
for x in &permutation {
|
||||
add_single_field_doc(&mut fast_field_writers, *FIELD, *x);
|
||||
}
|
||||
fast_field_writers.serialize(&mut serializer, HashMap::new()).unwrap();
|
||||
fast_field_writers
|
||||
.serialize(&mut serializer, &HashMap::new())
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
@@ -366,7 +380,9 @@ mod tests {
|
||||
for x in &permutation {
|
||||
add_single_field_doc(&mut fast_field_writers, *FIELD, *x);
|
||||
}
|
||||
fast_field_writers.serialize(&mut serializer, HashMap::new()).unwrap();
|
||||
fast_field_writers
|
||||
.serialize(&mut serializer, &HashMap::new())
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
@@ -398,7 +414,9 @@ mod tests {
|
||||
for x in &permutation {
|
||||
add_single_field_doc(&mut fast_field_writers, *FIELD, *x);
|
||||
}
|
||||
fast_field_writers.serialize(&mut serializer, HashMap::new()).unwrap();
|
||||
fast_field_writers
|
||||
.serialize(&mut serializer, &HashMap::new())
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
|
||||
@@ -2,4 +2,4 @@ mod writer;
|
||||
mod reader;
|
||||
|
||||
pub use self::writer::MultiValueIntFastFieldWriter;
|
||||
pub use self::reader::MultiValueIntFastFieldReader;
|
||||
pub use self::reader::MultiValueIntFastFieldReader;
|
||||
|
||||
@@ -18,8 +18,10 @@ pub struct MultiValueIntFastFieldReader {
|
||||
}
|
||||
|
||||
impl MultiValueIntFastFieldReader {
|
||||
|
||||
pub(crate) fn open(idx_reader: U64FastFieldReader, vals_reader: U64FastFieldReader) -> MultiValueIntFastFieldReader {
|
||||
pub(crate) fn open(
|
||||
idx_reader: U64FastFieldReader,
|
||||
vals_reader: U64FastFieldReader,
|
||||
) -> MultiValueIntFastFieldReader {
|
||||
MultiValueIntFastFieldReader {
|
||||
idx_reader: idx_reader,
|
||||
vals_reader: vals_reader,
|
||||
@@ -38,12 +40,11 @@ impl MultiValueIntFastFieldReader {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use core::Index;
|
||||
use schema::{Facet, Document, SchemaBuilder};
|
||||
use schema::{Document, Facet, SchemaBuilder};
|
||||
|
||||
#[test]
|
||||
fn test_multifastfield_reader() {
|
||||
@@ -51,7 +52,9 @@ mod tests {
|
||||
let facet_field = schema_builder.add_facet_field("facets");
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_with_num_threads(1, 30_000_000).expect("Failed to create index writer.");
|
||||
let mut index_writer = index
|
||||
.writer_with_num_threads(1, 30_000_000)
|
||||
.expect("Failed to create index writer.");
|
||||
{
|
||||
let mut doc = Document::new();
|
||||
doc.add_facet(facet_field, "/category/cat2");
|
||||
@@ -72,9 +75,7 @@ mod tests {
|
||||
index.load_searchers().expect("Reloading searchers");
|
||||
let searcher = index.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let mut facet_reader = segment_reader
|
||||
.facet_reader(facet_field)
|
||||
.unwrap();
|
||||
let mut facet_reader = segment_reader.facet_reader(facet_field).unwrap();
|
||||
|
||||
let mut facet = Facet::root();
|
||||
{
|
||||
@@ -108,7 +109,5 @@ mod tests {
|
||||
facet_reader.facet_ords(2, &mut vals);
|
||||
assert_eq!(&vals[..], &[4]);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,10 +38,15 @@ impl MultiValueIntFastFieldWriter {
|
||||
}
|
||||
|
||||
/// Push the fast fields value to the `FastFieldWriter`.
|
||||
pub fn serialize(&self, serializer: &mut FastFieldSerializer, mapping: &HashMap<UnorderedTermId, usize>) -> io::Result<()> {
|
||||
pub fn serialize(
|
||||
&self,
|
||||
serializer: &mut FastFieldSerializer,
|
||||
mapping: &HashMap<UnorderedTermId, usize>,
|
||||
) -> io::Result<()> {
|
||||
{
|
||||
// writing the offset index
|
||||
let mut doc_index_serializer = serializer.new_u64_fast_field_with_idx(self.field, 0, self.vals.len() as u64, 0)?;
|
||||
let mut doc_index_serializer =
|
||||
serializer.new_u64_fast_field_with_idx(self.field, 0, self.vals.len() as u64, 0)?;
|
||||
for &offset in &self.doc_index {
|
||||
doc_index_serializer.add_val(offset)?;
|
||||
}
|
||||
@@ -50,13 +55,13 @@ impl MultiValueIntFastFieldWriter {
|
||||
}
|
||||
{
|
||||
// writing the values themselves.
|
||||
let mut value_serializer = serializer.new_u64_fast_field_with_idx(self.field, 0u64, mapping.len() as u64, 1)?;
|
||||
let mut value_serializer =
|
||||
serializer.new_u64_fast_field_with_idx(self.field, 0u64, mapping.len() as u64, 1)?;
|
||||
for val in &self.vals {
|
||||
value_serializer.add_val(*mapping.get(val).expect("Missing term ordinal") as u64)?;
|
||||
}
|
||||
value_serializer.close_field()?;
|
||||
}
|
||||
Ok(())
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
use directory::ReadOnlySource;
|
||||
use common::{self, BinarySerializable};
|
||||
use common::bitpacker::{compute_num_bits, BitUnpacker};
|
||||
use common::compute_num_bits;
|
||||
use common::bitpacker::BitUnpacker;
|
||||
use DocId;
|
||||
use schema::SchemaBuilder;
|
||||
use std::path::Path;
|
||||
use schema::FAST;
|
||||
use directory::{WritePtr, RAMDirectory, Directory};
|
||||
use directory::{Directory, RAMDirectory, WritePtr};
|
||||
use fastfield::{FastFieldSerializer, FastFieldsWriter};
|
||||
use schema::FieldType;
|
||||
use std::mem;
|
||||
@@ -88,7 +89,7 @@ impl FastFieldReader for U64FastFieldReader {
|
||||
fn is_enabled(field_type: &FieldType) -> bool {
|
||||
match *field_type {
|
||||
FieldType::U64(ref integer_options) => integer_options.is_fast(),
|
||||
FieldType::HierarchicalFacet => { true },
|
||||
FieldType::HierarchicalFacet => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
@@ -113,21 +114,19 @@ impl FastFieldReader for U64FastFieldReader {
|
||||
u64::deserialize(&mut cursor).expect("Failed to read the min_value of fast field.");
|
||||
amplitude =
|
||||
u64::deserialize(&mut cursor).expect("Failed to read the amplitude of fast field.");
|
||||
|
||||
}
|
||||
let max_value = min_value + amplitude;
|
||||
let num_bits = compute_num_bits(amplitude);
|
||||
let owning_ref = OwningRef::new(data).map(|data| &data[16..]);
|
||||
let bit_unpacker = BitUnpacker::new(owning_ref, num_bits as usize);
|
||||
let bit_unpacker = BitUnpacker::new(owning_ref, num_bits);
|
||||
U64FastFieldReader {
|
||||
min_value: min_value,
|
||||
max_value: max_value,
|
||||
bit_unpacker: bit_unpacker,
|
||||
min_value,
|
||||
max_value,
|
||||
bit_unpacker,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl From<Vec<u64>> for U64FastFieldReader {
|
||||
fn from(vals: Vec<u64>) -> U64FastFieldReader {
|
||||
let mut schema_builder = SchemaBuilder::default();
|
||||
@@ -136,22 +135,23 @@ impl From<Vec<u64>> for U64FastFieldReader {
|
||||
let path = Path::new("__dummy__");
|
||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||
{
|
||||
let write: WritePtr = directory.open_write(path).expect(
|
||||
"With a RAMDirectory, this should never fail.",
|
||||
);
|
||||
let mut serializer = FastFieldSerializer::from_write(write).expect(
|
||||
"With a RAMDirectory, this should never fail.",
|
||||
);
|
||||
let write: WritePtr = directory
|
||||
.open_write(path)
|
||||
.expect("With a RAMDirectory, this should never fail.");
|
||||
let mut serializer = FastFieldSerializer::from_write(write)
|
||||
.expect("With a RAMDirectory, this should never fail.");
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
||||
{
|
||||
let fast_field_writer = fast_field_writers.get_field_writer(field).expect(
|
||||
"With a RAMDirectory, this should never fail.",
|
||||
);
|
||||
let fast_field_writer = fast_field_writers
|
||||
.get_field_writer(field)
|
||||
.expect("With a RAMDirectory, this should never fail.");
|
||||
for val in vals {
|
||||
fast_field_writer.add_val(val);
|
||||
}
|
||||
}
|
||||
fast_field_writers.serialize(&mut serializer, HashMap::new()).unwrap();
|
||||
fast_field_writers
|
||||
.serialize(&mut serializer, &HashMap::new())
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -159,9 +159,9 @@ impl From<Vec<u64>> for U64FastFieldReader {
|
||||
let composite_file =
|
||||
CompositeFile::open(&source).expect("Failed to read the composite file");
|
||||
|
||||
let field_source = composite_file.open_read(field).expect(
|
||||
"File component not found",
|
||||
);
|
||||
let field_source = composite_file
|
||||
.open_read(field)
|
||||
.expect("File component not found");
|
||||
U64FastFieldReader::open(field_source)
|
||||
}
|
||||
}
|
||||
@@ -222,7 +222,9 @@ impl FastFieldReader for I64FastFieldReader {
|
||||
/// # Panics
|
||||
/// Panics if the data is corrupted.
|
||||
fn open(data: ReadOnlySource) -> I64FastFieldReader {
|
||||
I64FastFieldReader { underlying: U64FastFieldReader::open(data) }
|
||||
I64FastFieldReader {
|
||||
underlying: U64FastFieldReader::open(data),
|
||||
}
|
||||
}
|
||||
|
||||
fn is_enabled(field_type: &FieldType) -> bool {
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
use common::BinarySerializable;
|
||||
use directory::WritePtr;
|
||||
use schema::Field;
|
||||
use common::bitpacker::{compute_num_bits, BitPacker};
|
||||
use common::bitpacker::BitPacker;
|
||||
use common::compute_num_bits;
|
||||
use common::CountingWriter;
|
||||
use common::CompositeWrite;
|
||||
use std::io::{self, Write};
|
||||
@@ -35,7 +36,9 @@ impl FastFieldSerializer {
|
||||
pub fn from_write(write: WritePtr) -> io::Result<FastFieldSerializer> {
|
||||
// just making room for the pointer to header.
|
||||
let composite_write = CompositeWrite::wrap(write);
|
||||
Ok(FastFieldSerializer { composite_write: composite_write })
|
||||
Ok(FastFieldSerializer {
|
||||
composite_write: composite_write,
|
||||
})
|
||||
}
|
||||
|
||||
/// Start serializing a new u64 fast field
|
||||
@@ -54,12 +57,12 @@ impl FastFieldSerializer {
|
||||
field: Field,
|
||||
min_value: u64,
|
||||
max_value: u64,
|
||||
idx: usize) -> io::Result<FastSingleFieldSerializer<CountingWriter<WritePtr>>> {
|
||||
idx: usize,
|
||||
) -> io::Result<FastSingleFieldSerializer<CountingWriter<WritePtr>>> {
|
||||
let field_write = self.composite_write.for_field_with_idx(field, idx);
|
||||
FastSingleFieldSerializer::open(field_write, min_value, max_value)
|
||||
}
|
||||
|
||||
|
||||
/// Closes the serializer
|
||||
///
|
||||
/// After this call the data must be persistently save on disk.
|
||||
@@ -72,6 +75,7 @@ pub struct FastSingleFieldSerializer<'a, W: Write + 'a> {
|
||||
bit_packer: BitPacker,
|
||||
write: &'a mut W,
|
||||
min_value: u64,
|
||||
num_bits: u8,
|
||||
}
|
||||
|
||||
impl<'a, W: Write> FastSingleFieldSerializer<'a, W> {
|
||||
@@ -84,18 +88,20 @@ impl<'a, W: Write> FastSingleFieldSerializer<'a, W> {
|
||||
let amplitude = max_value - min_value;
|
||||
amplitude.serialize(write)?;
|
||||
let num_bits = compute_num_bits(amplitude);
|
||||
let bit_packer = BitPacker::new(num_bits as usize);
|
||||
let bit_packer = BitPacker::new();
|
||||
Ok(FastSingleFieldSerializer {
|
||||
write: write,
|
||||
bit_packer: bit_packer,
|
||||
min_value: min_value,
|
||||
write,
|
||||
bit_packer,
|
||||
min_value,
|
||||
num_bits,
|
||||
})
|
||||
}
|
||||
|
||||
/// Pushes a new value to the currently open u64 fast field.
|
||||
pub fn add_val(&mut self, val: u64) -> io::Result<()> {
|
||||
let val_to_write: u64 = val - self.min_value;
|
||||
self.bit_packer.write(val_to_write, &mut self.write)?;
|
||||
self.bit_packer
|
||||
.write(val_to_write, self.num_bits, &mut self.write)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use schema::{Schema, Field, Document, Cardinality};
|
||||
use schema::{Cardinality, Document, Field, Schema};
|
||||
use fastfield::FastFieldSerializer;
|
||||
use std::io;
|
||||
use schema::Value;
|
||||
@@ -25,12 +25,11 @@ impl FastFieldsWriter {
|
||||
|
||||
for (field_id, field_entry) in schema.fields().iter().enumerate() {
|
||||
let field = Field(field_id as u32);
|
||||
let default_value =
|
||||
if let FieldType::I64(_) = *field_entry.field_type() {
|
||||
common::i64_to_u64(0i64)
|
||||
} else {
|
||||
0u64
|
||||
};
|
||||
let default_value = if let FieldType::I64(_) = *field_entry.field_type() {
|
||||
common::i64_to_u64(0i64)
|
||||
} else {
|
||||
0u64
|
||||
};
|
||||
match *field_entry.field_type() {
|
||||
FieldType::I64(ref int_options) | FieldType::U64(ref int_options) => {
|
||||
match int_options.get_fastfield_cardinality() {
|
||||
@@ -50,7 +49,7 @@ impl FastFieldsWriter {
|
||||
let fast_field_writer = MultiValueIntFastFieldWriter::new(field);
|
||||
multi_values_writers.push(fast_field_writer);
|
||||
}
|
||||
_ => {},
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
FastFieldsWriter {
|
||||
@@ -64,7 +63,7 @@ impl FastFieldsWriter {
|
||||
pub(crate) fn new(fields: Vec<Field>) -> FastFieldsWriter {
|
||||
FastFieldsWriter {
|
||||
single_value_writers: fields.into_iter().map(IntFastFieldWriter::new).collect(),
|
||||
multi_values_writers: vec!(),
|
||||
multi_values_writers: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,23 +72,22 @@ impl FastFieldsWriter {
|
||||
// TODO optimize
|
||||
self.single_value_writers
|
||||
.iter_mut()
|
||||
.find(|field_writer| {
|
||||
field_writer.field() == field
|
||||
})
|
||||
.find(|field_writer| field_writer.field() == field)
|
||||
}
|
||||
|
||||
/// Returns the fast field multi-value writer for the given field.
|
||||
///
|
||||
/// Returns None if the field does not exist, or is not
|
||||
/// configured as a multivalued fastfield in the schema.
|
||||
pub(crate) fn get_multivalue_writer(&mut self, field: Field) -> Option<&mut MultiValueIntFastFieldWriter> {
|
||||
pub(crate) fn get_multivalue_writer(
|
||||
&mut self,
|
||||
field: Field,
|
||||
) -> Option<&mut MultiValueIntFastFieldWriter> {
|
||||
// TODO optimize
|
||||
// TODO expose for users
|
||||
self.multi_values_writers
|
||||
.iter_mut()
|
||||
.find(|multivalue_writer| {
|
||||
multivalue_writer.field() == field
|
||||
})
|
||||
.find(|multivalue_writer| multivalue_writer.field() == field)
|
||||
}
|
||||
|
||||
/// Indexes all of the fastfields of a new document.
|
||||
@@ -104,9 +102,11 @@ impl FastFieldsWriter {
|
||||
|
||||
/// Serializes all of the `FastFieldWriter`s by pushing them in
|
||||
/// order to the fast field serializer.
|
||||
pub fn serialize(&self,
|
||||
serializer: &mut FastFieldSerializer,
|
||||
mapping: HashMap<Field, HashMap<UnorderedTermId, usize>>) -> io::Result<()> {
|
||||
pub fn serialize(
|
||||
&self,
|
||||
serializer: &mut FastFieldSerializer,
|
||||
mapping: &HashMap<Field, HashMap<UnorderedTermId, usize>>,
|
||||
) -> io::Result<()> {
|
||||
for field_writer in &self.single_value_writers {
|
||||
field_writer.serialize(serializer)?;
|
||||
}
|
||||
@@ -201,9 +201,9 @@ impl IntFastFieldWriter {
|
||||
/// associated to the document with the `DocId` n.
|
||||
/// (Well, `n-1` actually because of 0-indexing)
|
||||
pub fn add_val(&mut self, val: u64) {
|
||||
VInt(val).serialize(&mut self.vals).expect(
|
||||
"unable to serialize VInt to Vec",
|
||||
);
|
||||
VInt(val)
|
||||
.serialize(&mut self.vals)
|
||||
.expect("unable to serialize VInt to Vec");
|
||||
|
||||
if val > self.val_max {
|
||||
self.val_max = val;
|
||||
@@ -215,7 +215,6 @@ impl IntFastFieldWriter {
|
||||
self.val_count += 1;
|
||||
}
|
||||
|
||||
|
||||
/// Extract the value associated to the fast field for
|
||||
/// this document.
|
||||
///
|
||||
@@ -228,13 +227,11 @@ impl IntFastFieldWriter {
|
||||
/// only the first one is taken in account.
|
||||
fn extract_val(&self, doc: &Document) -> u64 {
|
||||
match doc.get_first(self.field) {
|
||||
Some(v) => {
|
||||
match *v {
|
||||
Value::U64(ref val) => *val,
|
||||
Value::I64(ref val) => common::i64_to_u64(*val),
|
||||
_ => panic!("Expected a u64field, got {:?} ", v),
|
||||
}
|
||||
}
|
||||
Some(v) => match *v {
|
||||
Value::U64(ref val) => *val,
|
||||
Value::I64(ref val) => common::i64_to_u64(*val),
|
||||
_ => panic!("Expected a u64field, got {:?} ", v),
|
||||
},
|
||||
None => self.val_if_missing,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,7 +23,6 @@ use indexer::SegmentWriter;
|
||||
use postings::DocSet;
|
||||
use schema::IndexRecordOption;
|
||||
use schema::Document;
|
||||
use schema::Schema;
|
||||
use schema::Term;
|
||||
use std::mem;
|
||||
use std::mem::swap;
|
||||
@@ -250,17 +249,18 @@ fn index_documents(
|
||||
heap: &mut Heap,
|
||||
table_size: usize,
|
||||
segment: &Segment,
|
||||
schema: &Schema,
|
||||
generation: usize,
|
||||
document_iterator: &mut Iterator<Item = AddOperation>,
|
||||
segment_updater: &mut SegmentUpdater,
|
||||
mut delete_cursor: DeleteCursor,
|
||||
) -> Result<bool> {
|
||||
heap.clear();
|
||||
let schema = segment.schema();
|
||||
let segment_id = segment.id();
|
||||
let mut segment_writer = SegmentWriter::for_segment(heap, table_size, segment.clone(), schema)?;
|
||||
let mut segment_writer =
|
||||
SegmentWriter::for_segment(heap, table_size, segment.clone(), &schema)?;
|
||||
for doc in document_iterator {
|
||||
segment_writer.add_document(doc, schema)?;
|
||||
segment_writer.add_document(doc, &schema)?;
|
||||
// There is two possible conditions to close the segment.
|
||||
// One is the memory arena dedicated to the segment is
|
||||
// getting full.
|
||||
@@ -368,7 +368,6 @@ impl IndexWriter {
|
||||
/// The thread consumes documents from the pipeline.
|
||||
///
|
||||
fn add_indexing_worker(&mut self) -> Result<()> {
|
||||
let schema = self.index.schema();
|
||||
let document_receiver_clone = self.document_receiver.clone();
|
||||
let mut segment_updater = self.segment_updater.clone();
|
||||
let (heap_size, table_size) = split_memory(self.heap_size_in_bytes_per_thread);
|
||||
@@ -409,7 +408,6 @@ impl IndexWriter {
|
||||
&mut heap,
|
||||
table_size,
|
||||
&segment,
|
||||
&schema,
|
||||
generation,
|
||||
&mut document_iterator,
|
||||
&mut segment_updater,
|
||||
|
||||
@@ -69,7 +69,7 @@ pub fn save_metas(
|
||||
segments: segment_metas,
|
||||
schema,
|
||||
opstamp,
|
||||
payload: payload.clone(),
|
||||
payload,
|
||||
};
|
||||
let mut buffer = serde_json::to_vec_pretty(&metas)?;
|
||||
write!(&mut buffer, "\n")?;
|
||||
|
||||
@@ -17,7 +17,7 @@ use super::operation::AddOperation;
|
||||
use postings::MultiFieldPostingsWriter;
|
||||
use tokenizer::BoxedTokenizer;
|
||||
use tokenizer::FacetTokenizer;
|
||||
use tokenizer::{Tokenizer, TokenStream};
|
||||
use tokenizer::{TokenStream, Tokenizer};
|
||||
use schema::Value;
|
||||
|
||||
/// A `SegmentWriter` is in charge of creating segment index from a
|
||||
@@ -126,11 +126,7 @@ impl<'a> SegmentWriter<'a> {
|
||||
/// Indexes a new document
|
||||
///
|
||||
/// As a user, you should rather use `IndexWriter`'s add_document.
|
||||
pub fn add_document(
|
||||
&mut self,
|
||||
add_operation: AddOperation,
|
||||
schema: &Schema,
|
||||
) -> io::Result<()> {
|
||||
pub fn add_document(&mut self, add_operation: AddOperation, schema: &Schema) -> io::Result<()> {
|
||||
let doc_id = self.max_doc;
|
||||
let mut doc = add_operation.document;
|
||||
self.doc_opstamps.push(add_operation.opstamp);
|
||||
@@ -144,28 +140,26 @@ impl<'a> SegmentWriter<'a> {
|
||||
}
|
||||
match *field_options.field_type() {
|
||||
FieldType::HierarchicalFacet => {
|
||||
let facets: Vec<&[u8]> = field_values.iter()
|
||||
.flat_map(|field_value| {
|
||||
match field_value.value() {
|
||||
&Value::Facet(ref facet) => Some(facet.encoded_bytes()),
|
||||
_ => {
|
||||
panic!("Expected hierarchical facet");
|
||||
}
|
||||
let facets: Vec<&[u8]> = field_values
|
||||
.iter()
|
||||
.flat_map(|field_value| match *field_value.value() {
|
||||
Value::Facet(ref facet) => Some(facet.encoded_bytes()),
|
||||
_ => {
|
||||
panic!("Expected hierarchical facet");
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
let mut term = unsafe {Term::with_capacity(100)};
|
||||
let mut term = unsafe { Term::with_capacity(100) };
|
||||
term.set_field(field);
|
||||
for facet_bytes in facets {
|
||||
let mut unordered_term_id_opt = None;
|
||||
let fake_str = unsafe { str::from_utf8_unchecked(facet_bytes) };
|
||||
FacetTokenizer
|
||||
.token_stream(&fake_str)
|
||||
.process(&mut |ref token| {
|
||||
term.set_text(&token.text);
|
||||
let unordered_term_id = self.multifield_postings.subscribe(doc_id, &term);
|
||||
unordered_term_id_opt = Some(unordered_term_id);
|
||||
});
|
||||
FacetTokenizer.token_stream(fake_str).process(&mut |token| {
|
||||
term.set_text(&token.text);
|
||||
let unordered_term_id =
|
||||
self.multifield_postings.subscribe(doc_id, &term);
|
||||
unordered_term_id_opt = Some(unordered_term_id);
|
||||
});
|
||||
|
||||
if let Some(unordered_term_id) = unordered_term_id_opt {
|
||||
self.fast_field_writers
|
||||
@@ -176,25 +170,26 @@ impl<'a> SegmentWriter<'a> {
|
||||
}
|
||||
}
|
||||
FieldType::Str(_) => {
|
||||
let num_tokens =
|
||||
if let Some(ref mut tokenizer) = self.tokenizers[field.0 as usize] {
|
||||
let texts: Vec<&str> = field_values
|
||||
.iter()
|
||||
.flat_map(|field_value| match *field_value.value() {
|
||||
Value::Str(ref text) => Some(text.as_str()),
|
||||
_ => None,
|
||||
})
|
||||
.collect();
|
||||
if texts.is_empty() {
|
||||
0
|
||||
} else {
|
||||
let mut token_stream = tokenizer.token_stream_texts(&texts[..]);
|
||||
self.multifield_postings
|
||||
.index_text(doc_id, field, &mut token_stream)
|
||||
}
|
||||
} else {
|
||||
let num_tokens = if let Some(ref mut tokenizer) =
|
||||
self.tokenizers[field.0 as usize]
|
||||
{
|
||||
let texts: Vec<&str> = field_values
|
||||
.iter()
|
||||
.flat_map(|field_value| match *field_value.value() {
|
||||
Value::Str(ref text) => Some(text.as_str()),
|
||||
_ => None,
|
||||
})
|
||||
.collect();
|
||||
if texts.is_empty() {
|
||||
0
|
||||
};
|
||||
} else {
|
||||
let mut token_stream = tokenizer.token_stream_texts(&texts[..]);
|
||||
self.multifield_postings
|
||||
.index_text(doc_id, field, &mut token_stream)
|
||||
}
|
||||
} else {
|
||||
0
|
||||
};
|
||||
self.fieldnorms_writer
|
||||
.get_field_writer(field)
|
||||
.map(|field_norms_writer| {
|
||||
@@ -226,9 +221,7 @@ impl<'a> SegmentWriter<'a> {
|
||||
}
|
||||
}
|
||||
self.fieldnorms_writer.fill_val_up_to(doc_id);
|
||||
doc.filter_fields(|field| {
|
||||
schema.get_field_entry(field).is_stored()
|
||||
});
|
||||
doc.filter_fields(|field| schema.get_field_entry(field).is_stored());
|
||||
let doc_writer = self.segment_serializer.get_store_writer();
|
||||
doc_writer.store(&doc)?;
|
||||
self.max_doc += 1;
|
||||
@@ -264,8 +257,8 @@ fn write(
|
||||
mut serializer: SegmentSerializer,
|
||||
) -> Result<()> {
|
||||
let term_ord_map = multifield_postings.serialize(serializer.get_postings_serializer())?;
|
||||
fast_field_writers.serialize(serializer.get_fast_field_serializer(), term_ord_map)?;
|
||||
fieldnorms_writer.serialize(serializer.get_fieldnorms_serializer(), HashMap::new())?;
|
||||
fast_field_writers.serialize(serializer.get_fast_field_serializer(), &term_ord_map)?;
|
||||
fieldnorms_writer.serialize(serializer.get_fieldnorms_serializer(), &HashMap::new())?;
|
||||
serializer.close()?;
|
||||
|
||||
Ok(())
|
||||
|
||||
107
src/lib.rs
107
src/lib.rs
@@ -4,6 +4,7 @@
|
||||
#![feature(box_syntax)]
|
||||
#![feature(optin_builtin_traits)]
|
||||
#![feature(conservative_impl_trait)]
|
||||
#![feature(collections_range)]
|
||||
#![feature(integer_atomics)]
|
||||
#![feature(drain_filter)]
|
||||
#![cfg_attr(test, feature(test))]
|
||||
@@ -19,6 +20,98 @@
|
||||
//! Tantivy is a search engine library.
|
||||
//! Think `Lucene`, but in Rust.
|
||||
//!
|
||||
//! ```rust
|
||||
|
||||
//! # extern crate tempdir;
|
||||
//! #
|
||||
//! #[macro_use]
|
||||
//! extern crate tantivy;
|
||||
//!
|
||||
//! // ...
|
||||
//!
|
||||
//! # use std::path::Path;
|
||||
//! # use tempdir::TempDir;
|
||||
//! # use tantivy::Index;
|
||||
//! # use tantivy::schema::*;
|
||||
//! # use tantivy::collector::TopCollector;
|
||||
//! # use tantivy::query::QueryParser;
|
||||
//! #
|
||||
//! # fn main() {
|
||||
//! # // Let's create a temporary directory for the
|
||||
//! # // sake of this example
|
||||
//! # if let Ok(dir) = TempDir::new("tantivy_example_dir") {
|
||||
//! # run_example(dir.path()).unwrap();
|
||||
//! # dir.close().unwrap();
|
||||
//! # }
|
||||
//! # }
|
||||
//! #
|
||||
//! # fn run_example(index_path: &Path) -> tantivy::Result<()> {
|
||||
//! // First we need to define a schema ...
|
||||
//!
|
||||
//! // `TEXT` means the field should be tokenized and indexed,
|
||||
//! // along with its term frequency and term positions.
|
||||
//! //
|
||||
//! // `STORED` means that the field will also be saved
|
||||
//! // in a compressed, row-oriented key-value store.
|
||||
//! // This store is useful to reconstruct the
|
||||
//! // documents that were selected during the search phase.
|
||||
//! let mut schema_builder = SchemaBuilder::default();
|
||||
//! let title = schema_builder.add_text_field("title", TEXT | STORED);
|
||||
//! let body = schema_builder.add_text_field("body", TEXT);
|
||||
//! let schema = schema_builder.build();
|
||||
//!
|
||||
//! // Indexing documents
|
||||
//!
|
||||
//! let index = Index::create(index_path, schema.clone())?;
|
||||
//!
|
||||
//! // Here we use a buffer of 100MB that will be split
|
||||
//! // between indexing threads.
|
||||
//! let mut index_writer = index.writer(100_000_000)?;
|
||||
//!
|
||||
//! // Let's index one documents!
|
||||
//! index_writer.add_document(doc!(
|
||||
//! title => "The Old Man and the Sea",
|
||||
//! body => "He was an old man who fished alone in a skiff in \
|
||||
//! the Gulf Stream and he had gone eighty-four days \
|
||||
//! now without taking a fish."
|
||||
//! ));
|
||||
//!
|
||||
//! // We need to call .commit() explicitly to force the
|
||||
//! // index_writer to finish processing the documents in the queue,
|
||||
//! // flush the current index to the disk, and advertise
|
||||
//! // the existence of new documents.
|
||||
//! index_writer.commit()?;
|
||||
//!
|
||||
//! // # Searching
|
||||
//!
|
||||
//! index.load_searchers()?;
|
||||
//!
|
||||
//! let searcher = index.searcher();
|
||||
//!
|
||||
//! let query_parser = QueryParser::for_index(&index, vec![title, body]);
|
||||
//!
|
||||
//! // QueryParser may fail if the query is not in the right
|
||||
//! // format. For user facing applications, this can be a problem.
|
||||
//! // A ticket has been opened regarding this problem.
|
||||
//! let query = query_parser.parse_query("sea whale")?;
|
||||
//!
|
||||
//! let mut top_collector = TopCollector::with_limit(10);
|
||||
//! searcher.search(&*query, &mut top_collector)?;
|
||||
//!
|
||||
//! // Our top collector now contains the 10
|
||||
//! // most relevant doc ids...
|
||||
//! let doc_addresses = top_collector.docs();
|
||||
//! for doc_address in doc_addresses {
|
||||
//! let retrieved_doc = searcher.doc(&doc_address)?;
|
||||
//! println!("{}", schema.to_json(&retrieved_doc));
|
||||
//! }
|
||||
//!
|
||||
//! # Ok(())
|
||||
//! # }
|
||||
//! ```
|
||||
//!
|
||||
//!
|
||||
//!
|
||||
//! A good place for you to get started is to check out
|
||||
//! the example code (
|
||||
//! [literate programming](http://fulmicoton.com/tantivy-examples/simple_search.html) /
|
||||
@@ -36,8 +129,6 @@ extern crate log;
|
||||
#[macro_use]
|
||||
extern crate error_chain;
|
||||
|
||||
extern crate regex;
|
||||
extern crate tempfile;
|
||||
extern crate atomicwrites;
|
||||
extern crate bit_set;
|
||||
extern crate byteorder;
|
||||
@@ -51,11 +142,13 @@ extern crate itertools;
|
||||
extern crate lz4;
|
||||
extern crate num_cpus;
|
||||
extern crate owning_ref;
|
||||
extern crate regex;
|
||||
extern crate rust_stemmers;
|
||||
extern crate serde;
|
||||
extern crate serde_json;
|
||||
extern crate stable_deref_trait;
|
||||
extern crate tempdir;
|
||||
extern crate tempfile;
|
||||
extern crate time;
|
||||
extern crate uuid;
|
||||
|
||||
@@ -192,6 +285,7 @@ mod tests {
|
||||
use fastfield::{FastFieldReader, I64FastFieldReader, U64FastFieldReader};
|
||||
use Postings;
|
||||
use rand::{Rng, SeedableRng, XorShiftRng};
|
||||
use rand::distributions::{IndependentSample, Range};
|
||||
|
||||
fn generate_array_with_seed(n: usize, ratio: f32, seed_val: u32) -> Vec<u32> {
|
||||
let seed: &[u32; 4] = &[1, 2, 3, seed_val];
|
||||
@@ -202,6 +296,15 @@ mod tests {
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn generate_nonunique_unsorted(max_value: u32, n_elems: usize) -> Vec<u32> {
|
||||
let seed: &[u32; 4] = &[1, 2, 3, 4];
|
||||
let mut rng: XorShiftRng = XorShiftRng::from_seed(*seed);
|
||||
let between = Range::new(0u32, max_value);
|
||||
(0..n_elems)
|
||||
.map(|_| between.ind_sample(&mut rng))
|
||||
.collect::<Vec<u32>>()
|
||||
}
|
||||
|
||||
pub fn generate_array(n: usize, ratio: f32) -> Vec<u32> {
|
||||
generate_array_with_seed(n, ratio, 4)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ use DocId;
|
||||
use std::borrow::Borrow;
|
||||
use std::borrow::BorrowMut;
|
||||
use std::cmp::Ordering;
|
||||
use common::DocBitSet;
|
||||
use common::BitSet;
|
||||
|
||||
/// Expresses the outcome of a call to `DocSet`'s `.skip_next(...)`.
|
||||
#[derive(PartialEq, Eq, Debug)]
|
||||
@@ -95,13 +95,11 @@ pub trait DocSet {
|
||||
/// length of the docset.
|
||||
fn size_hint(&self) -> u32;
|
||||
|
||||
fn to_doc_bitset(&mut self, max_doc: DocId) -> DocBitSet {
|
||||
let mut docs = DocBitSet::with_maxdoc(max_doc);
|
||||
/// Appends all docs to a `bitset`.
|
||||
fn append_to_bitset(&mut self, bitset: &mut BitSet) {
|
||||
while self.advance() {
|
||||
let doc = self.doc();
|
||||
docs.insert(doc);
|
||||
bitset.insert(self.doc());
|
||||
}
|
||||
docs
|
||||
}
|
||||
}
|
||||
|
||||
@@ -125,26 +123,9 @@ impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
|
||||
let unboxed: &TDocSet = self.borrow();
|
||||
unboxed.size_hint()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, TDocSet: DocSet> DocSet for &'a mut TDocSet {
|
||||
fn advance(&mut self) -> bool {
|
||||
let unref: &mut TDocSet = *self;
|
||||
unref.advance()
|
||||
}
|
||||
|
||||
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
||||
let unref: &mut TDocSet = *self;
|
||||
unref.skip_next(target)
|
||||
}
|
||||
|
||||
fn doc(&self) -> DocId {
|
||||
let unref: &TDocSet = *self;
|
||||
unref.doc()
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> u32 {
|
||||
let unref: &TDocSet = *self;
|
||||
unref.size_hint()
|
||||
fn append_to_bitset(&mut self, bitset: &mut BitSet) {
|
||||
let unboxed: &mut TDocSet = self.borrow_mut();
|
||||
unboxed.append_to_bitset(bitset);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -97,39 +97,52 @@ mod tests {
|
||||
index_writer.commit().unwrap();
|
||||
index.load_searchers().unwrap();
|
||||
let searcher = index.searcher();
|
||||
let query = TermQuery::new(Term::from_field_text(title, "abc"), IndexRecordOption::WithFreqsAndPositions);
|
||||
let query = TermQuery::new(
|
||||
Term::from_field_text(title, "abc"),
|
||||
IndexRecordOption::WithFreqsAndPositions,
|
||||
);
|
||||
let weight = query.specialized_weight(&*searcher);
|
||||
{
|
||||
let mut scorer = weight.specialized_scorer(searcher.segment_reader(0u32)).unwrap();
|
||||
let mut scorer = weight
|
||||
.specialized_scorer(searcher.segment_reader(0u32))
|
||||
.unwrap();
|
||||
scorer.advance();
|
||||
assert_eq!(&[0,1,2], scorer.postings().positions());
|
||||
assert_eq!(&[0, 1, 2], scorer.postings().positions());
|
||||
scorer.advance();
|
||||
assert_eq!(&[0,5], scorer.postings().positions());
|
||||
assert_eq!(&[0, 5], scorer.postings().positions());
|
||||
}
|
||||
{
|
||||
let mut scorer = weight.specialized_scorer(searcher.segment_reader(0u32)).unwrap();
|
||||
let mut scorer = weight
|
||||
.specialized_scorer(searcher.segment_reader(0u32))
|
||||
.unwrap();
|
||||
scorer.advance();
|
||||
scorer.advance();
|
||||
assert_eq!(&[0,5], scorer.postings().positions());
|
||||
assert_eq!(&[0, 5], scorer.postings().positions());
|
||||
}
|
||||
{
|
||||
let mut scorer = weight.specialized_scorer(searcher.segment_reader(0u32)).unwrap();
|
||||
let mut scorer = weight
|
||||
.specialized_scorer(searcher.segment_reader(0u32))
|
||||
.unwrap();
|
||||
assert_eq!(scorer.skip_next(1), SkipResult::Reached);
|
||||
assert_eq!(scorer.doc(), 1);
|
||||
assert_eq!(&[0,5], scorer.postings().positions());
|
||||
assert_eq!(&[0, 5], scorer.postings().positions());
|
||||
}
|
||||
{
|
||||
let mut scorer = weight.specialized_scorer(searcher.segment_reader(0u32)).unwrap();
|
||||
let mut scorer = weight
|
||||
.specialized_scorer(searcher.segment_reader(0u32))
|
||||
.unwrap();
|
||||
assert_eq!(scorer.skip_next(1002), SkipResult::Reached);
|
||||
assert_eq!(scorer.doc(), 1002);
|
||||
assert_eq!(&[0,5], scorer.postings().positions());
|
||||
assert_eq!(&[0, 5], scorer.postings().positions());
|
||||
}
|
||||
{
|
||||
let mut scorer = weight.specialized_scorer(searcher.segment_reader(0u32)).unwrap();
|
||||
let mut scorer = weight
|
||||
.specialized_scorer(searcher.segment_reader(0u32))
|
||||
.unwrap();
|
||||
assert_eq!(scorer.skip_next(100), SkipResult::Reached);
|
||||
assert_eq!(scorer.skip_next(1002), SkipResult::Reached);
|
||||
assert_eq!(scorer.doc(), 1002);
|
||||
assert_eq!(&[0,5], scorer.postings().positions());
|
||||
assert_eq!(&[0, 5], scorer.postings().positions());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -30,15 +30,15 @@ impl<TPostings: Postings> Postings for Box<TPostings> {
|
||||
unboxed.positions()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, TPostings: Postings> Postings for &'a mut TPostings {
|
||||
fn term_freq(&self) -> u32 {
|
||||
let unref: &TPostings = *self;
|
||||
unref.term_freq()
|
||||
}
|
||||
|
||||
fn positions(&self) -> &[u32] {
|
||||
let unref: &TPostings = *self;
|
||||
unref.positions()
|
||||
}
|
||||
}
|
||||
//
|
||||
//impl<'a, TPostings: Postings> Postings for &'a mut TPostings {
|
||||
// fn term_freq(&self) -> u32 {
|
||||
// let unref: &TPostings = *self;
|
||||
// unref.term_freq()
|
||||
// }
|
||||
//
|
||||
// fn positions(&self) -> &[u32] {
|
||||
// let unref: &TPostings = *self;
|
||||
// unref.positions()
|
||||
// }
|
||||
//}
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
use DocId;
|
||||
use schema::Term;
|
||||
use postings::{InvertedIndexSerializer, FieldSerializer};
|
||||
use postings::{FieldSerializer, InvertedIndexSerializer};
|
||||
use std::io;
|
||||
use std::collections::HashMap;
|
||||
use postings::Recorder;
|
||||
use Result;
|
||||
use schema::{Schema, Field};
|
||||
use schema::{Field, Schema};
|
||||
use std::marker::PhantomData;
|
||||
use std::ops::DerefMut;
|
||||
use datastruct::stacker::{TermHashMap, Heap};
|
||||
use postings::{NothingRecorder, TermFrequencyRecorder, TFAndPositionRecorder};
|
||||
use datastruct::stacker::{Heap, TermHashMap};
|
||||
use postings::{NothingRecorder, TFAndPositionRecorder, TermFrequencyRecorder};
|
||||
use schema::FieldEntry;
|
||||
use schema::FieldType;
|
||||
use tokenizer::Token;
|
||||
@@ -17,39 +17,31 @@ use tokenizer::TokenStream;
|
||||
use schema::IndexRecordOption;
|
||||
use postings::UnorderedTermId;
|
||||
|
||||
|
||||
fn posting_from_field_entry<'a>(
|
||||
field_entry: &FieldEntry,
|
||||
heap: &'a Heap,
|
||||
) -> Box<PostingsWriter + 'a> {
|
||||
match *field_entry.field_type() {
|
||||
FieldType::Str(ref text_options) => {
|
||||
text_options
|
||||
FieldType::Str(ref text_options) => text_options
|
||||
.get_indexing_options()
|
||||
.map(|indexing_options| {
|
||||
match indexing_options.index_option() {
|
||||
IndexRecordOption::Basic => {
|
||||
SpecializedPostingsWriter::<NothingRecorder>::new_boxed(heap)
|
||||
}
|
||||
IndexRecordOption::WithFreqs => {
|
||||
SpecializedPostingsWriter::<TermFrequencyRecorder>::new_boxed(heap)
|
||||
}
|
||||
IndexRecordOption::WithFreqsAndPositions => {
|
||||
SpecializedPostingsWriter::<TFAndPositionRecorder>::new_boxed(heap)
|
||||
}
|
||||
.map(|indexing_options| match indexing_options.index_option() {
|
||||
IndexRecordOption::Basic => {
|
||||
SpecializedPostingsWriter::<NothingRecorder>::new_boxed(heap)
|
||||
}
|
||||
IndexRecordOption::WithFreqs => {
|
||||
SpecializedPostingsWriter::<TermFrequencyRecorder>::new_boxed(heap)
|
||||
}
|
||||
IndexRecordOption::WithFreqsAndPositions => {
|
||||
SpecializedPostingsWriter::<TFAndPositionRecorder>::new_boxed(heap)
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| {
|
||||
SpecializedPostingsWriter::<NothingRecorder>::new_boxed(heap)
|
||||
})
|
||||
.unwrap_or_else(|| SpecializedPostingsWriter::<NothingRecorder>::new_boxed(heap)),
|
||||
FieldType::U64(_) | FieldType::I64(_) | FieldType::HierarchicalFacet => {
|
||||
SpecializedPostingsWriter::<NothingRecorder>::new_boxed(heap)
|
||||
}
|
||||
FieldType::U64(_) |
|
||||
FieldType::I64(_) |
|
||||
FieldType::HierarchicalFacet => SpecializedPostingsWriter::<NothingRecorder>::new_boxed(heap),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub struct MultiFieldPostingsWriter<'a> {
|
||||
heap: &'a Heap,
|
||||
term_index: TermHashMap<'a>,
|
||||
@@ -88,7 +80,10 @@ impl<'a> MultiFieldPostingsWriter<'a> {
|
||||
/// It pushes all term, one field at a time, towards the
|
||||
/// postings serializer.
|
||||
#[allow(needless_range_loop)]
|
||||
pub fn serialize(&self, serializer: &mut InvertedIndexSerializer) -> Result<HashMap<Field, HashMap<UnorderedTermId, usize>>> {
|
||||
pub fn serialize(
|
||||
&self,
|
||||
serializer: &mut InvertedIndexSerializer,
|
||||
) -> Result<HashMap<Field, HashMap<UnorderedTermId, usize>>> {
|
||||
let mut term_offsets: Vec<(&[u8], u32, UnorderedTermId)> = self.term_index.iter().collect();
|
||||
term_offsets.sort_by_key(|&(k, _, _)| k);
|
||||
|
||||
@@ -99,8 +94,8 @@ impl<'a> MultiFieldPostingsWriter<'a> {
|
||||
.map(|(key, _, _)| Term::wrap(key).field())
|
||||
.enumerate();
|
||||
|
||||
|
||||
let mut unordered_term_mappings: HashMap<Field, HashMap<UnorderedTermId, usize>> = HashMap::new();
|
||||
let mut unordered_term_mappings: HashMap<Field, HashMap<UnorderedTermId, usize>> =
|
||||
HashMap::new();
|
||||
|
||||
let mut prev_field = Field(u32::max_value());
|
||||
for (offset, field) in term_offsets_it {
|
||||
@@ -120,8 +115,9 @@ impl<'a> MultiFieldPostingsWriter<'a> {
|
||||
let mut mapping = HashMap::new();
|
||||
for (term_ord, term_unord_id) in term_offsets[start..stop]
|
||||
.iter()
|
||||
.map(|&(_,_,bucket)| bucket)
|
||||
.enumerate() {
|
||||
.map(|&(_, _, bucket)| bucket)
|
||||
.enumerate()
|
||||
{
|
||||
mapping.insert(term_unord_id, term_ord);
|
||||
}
|
||||
unordered_term_mappings.insert(field, mapping);
|
||||
@@ -144,7 +140,6 @@ impl<'a> MultiFieldPostingsWriter<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// The `PostingsWriter` is in charge of receiving documenting
|
||||
/// and building a `Segment` in anonymous memory.
|
||||
///
|
||||
@@ -168,20 +163,22 @@ pub trait PostingsWriter {
|
||||
|
||||
/// Serializes the postings on disk.
|
||||
/// The actual serialization format is handled by the `PostingsSerializer`.
|
||||
fn serialize(&self,
|
||||
term_addrs: &[(&[u8], u32, UnorderedTermId)],
|
||||
serializer: &mut FieldSerializer,
|
||||
heap: &Heap)
|
||||
-> io::Result<()>;
|
||||
fn serialize(
|
||||
&self,
|
||||
term_addrs: &[(&[u8], u32, UnorderedTermId)],
|
||||
serializer: &mut FieldSerializer,
|
||||
heap: &Heap,
|
||||
) -> io::Result<()>;
|
||||
|
||||
/// Tokenize a text and subscribe all of its token.
|
||||
fn index_text<'a>(&mut self,
|
||||
term_index: &mut TermHashMap,
|
||||
doc_id: DocId,
|
||||
field: Field,
|
||||
token_stream: &mut TokenStream,
|
||||
heap: &Heap)
|
||||
-> u32 {
|
||||
fn index_text(
|
||||
&mut self,
|
||||
term_index: &mut TermHashMap,
|
||||
doc_id: DocId,
|
||||
field: Field,
|
||||
token_stream: &mut TokenStream,
|
||||
heap: &Heap,
|
||||
) -> u32 {
|
||||
let mut term = unsafe { Term::with_capacity(100) };
|
||||
term.set_field(field);
|
||||
let mut sink = |token: &Token| {
|
||||
@@ -215,7 +212,6 @@ impl<'a, Rec: Recorder + 'static> SpecializedPostingsWriter<'a, Rec> {
|
||||
}
|
||||
|
||||
impl<'a, Rec: Recorder + 'static> PostingsWriter for SpecializedPostingsWriter<'a, Rec> {
|
||||
|
||||
fn subscribe(
|
||||
&mut self,
|
||||
term_index: &mut TermHashMap,
|
||||
@@ -237,8 +233,6 @@ impl<'a, Rec: Recorder + 'static> PostingsWriter for SpecializedPostingsWriter<'
|
||||
term_ord
|
||||
}
|
||||
|
||||
|
||||
|
||||
fn serialize(
|
||||
&self,
|
||||
term_addrs: &[(&[u8], u32, UnorderedTermId)],
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use compression::{BlockDecoder, CompressedIntStream, VIntDecoder, COMPRESSION_BLOCK_SIZE};
|
||||
use DocId;
|
||||
use common::DocBitSet;
|
||||
use common::BitSet;
|
||||
use postings::{DocSet, HasLen, Postings, SkipResult};
|
||||
use std::cmp;
|
||||
use fst::Streamer;
|
||||
@@ -250,22 +250,20 @@ impl DocSet for SegmentPostings {
|
||||
);
|
||||
docs[self.cur]
|
||||
}
|
||||
|
||||
fn to_doc_bitset(&mut self, max_doc: DocId) -> DocBitSet {
|
||||
|
||||
fn append_to_bitset(&mut self, bitset: &mut BitSet) {
|
||||
// finish the current block
|
||||
let mut docs = DocBitSet::with_maxdoc(max_doc);
|
||||
if self.advance() {
|
||||
for &doc in &self.block_cursor.docs()[self.cur..] {
|
||||
docs.insert(doc);
|
||||
bitset.insert(doc);
|
||||
}
|
||||
// ... iterate through the remaining blocks.
|
||||
while self.block_cursor.advance() {
|
||||
for &doc in self.block_cursor.docs() {
|
||||
docs.insert(doc);
|
||||
bitset.insert(doc);
|
||||
}
|
||||
}
|
||||
}
|
||||
return docs;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use common::BinarySerializable;
|
||||
use common::{BinarySerializable, FixedSize};
|
||||
use std::io;
|
||||
|
||||
/// `TermInfo` contains all of the information
|
||||
@@ -23,10 +23,13 @@ pub struct TermInfo {
|
||||
pub positions_inner_offset: u8,
|
||||
}
|
||||
|
||||
impl TermInfo {
|
||||
/// Size required to encode the `TermInfo`.
|
||||
// TODO make this smaller when positions are unused for instance.
|
||||
pub(crate) const SIZE_IN_BYTES: usize = 4 + 8 + 8 + 1;
|
||||
impl FixedSize for TermInfo {
|
||||
/// Size required for the binary serialization of `TermInfo`.
|
||||
/// This is large, but in practise, all `TermInfo` but the first one
|
||||
/// of the block are bitpacked.
|
||||
///
|
||||
/// See `TermInfoStore`.
|
||||
const SIZE_IN_BYTES: usize = u32::SIZE_IN_BYTES + 2 * u64::SIZE_IN_BYTES + u8::SIZE_IN_BYTES;
|
||||
}
|
||||
|
||||
impl BinarySerializable for TermInfo {
|
||||
@@ -50,3 +53,15 @@ impl BinarySerializable for TermInfo {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::TermInfo;
|
||||
use common::test::fixed_size_test;
|
||||
|
||||
#[test]
|
||||
fn test_fixed_size() {
|
||||
fixed_size_test::<TermInfo>();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ const HORIZON: usize = 64 * HORIZON_NUM_TINYBITSETS;
|
||||
/// Creates a `DocSet` that iterator through the intersection of two `DocSet`s.
|
||||
pub struct UnionDocSet<TDocSet: DocSet> {
|
||||
docsets: Vec<TDocSet>,
|
||||
bitsets: Box<[u64; HORIZON_NUM_TINYBITSETS]>,
|
||||
bitsets: Box<[TinySet; HORIZON_NUM_TINYBITSETS]>,
|
||||
cursor: usize,
|
||||
offset: DocId,
|
||||
doc: DocId,
|
||||
@@ -31,7 +31,7 @@ impl<TDocSet: DocSet> From<Vec<TDocSet>> for UnionDocSet<TDocSet> {
|
||||
.collect();
|
||||
UnionDocSet {
|
||||
docsets: non_empty_docsets,
|
||||
bitsets: Box::new([0u64; HORIZON_NUM_TINYBITSETS]),
|
||||
bitsets: Box::new([TinySet::empty(); HORIZON_NUM_TINYBITSETS]),
|
||||
cursor: HORIZON_NUM_TINYBITSETS,
|
||||
offset: 0,
|
||||
doc: 0
|
||||
@@ -40,10 +40,10 @@ impl<TDocSet: DocSet> From<Vec<TDocSet>> for UnionDocSet<TDocSet> {
|
||||
}
|
||||
|
||||
|
||||
fn refill<TDocSet: DocSet>(docsets: &mut Vec<TDocSet>, bitsets: &mut [u64; HORIZON_NUM_TINYBITSETS], min_doc: DocId) {
|
||||
fn refill<TDocSet: DocSet>(docsets: &mut Vec<TDocSet>, bitsets: &mut [TinySet; HORIZON_NUM_TINYBITSETS], min_doc: DocId) {
|
||||
docsets
|
||||
.drain_filter(|docset| {
|
||||
let horizon = min_doc + HORIZON_NUM_TINYBITSETS as u32;
|
||||
let horizon = min_doc + HORIZON as u32;
|
||||
loop {
|
||||
let doc = docset.doc();
|
||||
if doc >= horizon {
|
||||
@@ -51,7 +51,7 @@ fn refill<TDocSet: DocSet>(docsets: &mut Vec<TDocSet>, bitsets: &mut [u64; HORIZ
|
||||
}
|
||||
// add this document
|
||||
let delta = doc - min_doc;
|
||||
bitsets[(delta / 64) as usize] |= 1 << (delta % 64);
|
||||
bitsets[(delta / 64) as usize].insert_mut(delta % 64u32);
|
||||
if !docset.advance() {
|
||||
// remove the docset, it has been entirely consumed.
|
||||
return true;
|
||||
@@ -91,14 +91,6 @@ impl<TDocSet: DocSet> DocSet for UnionDocSet<TDocSet> {
|
||||
self.refill()
|
||||
}
|
||||
|
||||
fn doc(&self) -> DocId {
|
||||
self.doc
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> u32 {
|
||||
0u32
|
||||
}
|
||||
|
||||
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
||||
let mut reached = false;
|
||||
self.docsets
|
||||
@@ -122,6 +114,14 @@ impl<TDocSet: DocSet> DocSet for UnionDocSet<TDocSet> {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn doc(&self) -> DocId {
|
||||
self.doc
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> u32 {
|
||||
0u32
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -9,7 +9,6 @@ use DocId;
|
||||
use std::any::Any;
|
||||
use core::Searcher;
|
||||
|
||||
|
||||
/// Query that matches all of the documents.
|
||||
///
|
||||
/// All of the document get the score 1f32.
|
||||
@@ -34,12 +33,11 @@ impl Weight for AllWeight {
|
||||
Ok(box AllScorer {
|
||||
started: false,
|
||||
doc: 0u32,
|
||||
max_doc: reader.max_doc()
|
||||
max_doc: reader.max_doc(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Scorer associated to the `AllQuery` query.
|
||||
pub struct AllScorer {
|
||||
started: bool,
|
||||
@@ -51,8 +49,7 @@ impl DocSet for AllScorer {
|
||||
fn advance(&mut self) -> bool {
|
||||
if self.started {
|
||||
self.doc += 1u32;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
self.started = true;
|
||||
}
|
||||
self.doc < self.max_doc
|
||||
@@ -71,4 +68,4 @@ impl Scorer for AllScorer {
|
||||
fn score(&self) -> Score {
|
||||
1f32
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use common::{DocBitSet, TinySet};
|
||||
use common::{BitSet, TinySet};
|
||||
use DocId;
|
||||
use postings::DocSet;
|
||||
use postings::SkipResult;
|
||||
@@ -8,107 +8,100 @@ use std::cmp::Ordering;
|
||||
///
|
||||
/// # Implementation detail
|
||||
///
|
||||
/// Skipping is relatively fast here as we can directly point to the
|
||||
/// Skipping is relatively fast here as we can directly point to the
|
||||
/// right tiny bitset bucket.
|
||||
///
|
||||
/// TODO: Consider implementing a `BitTreeSet` in order to advance faster
|
||||
/// TODO: Consider implementing a `BitTreeSet` in order to advance faster
|
||||
/// when the bitset is sparse
|
||||
pub struct BitSetDocSet {
|
||||
docs: DocBitSet,
|
||||
cursor_bucket: usize, //< index associated to the current tiny bitset
|
||||
cursor_tinybitset: u64,
|
||||
doc: u32
|
||||
docs: BitSet,
|
||||
cursor_bucket: u32, //< index associated to the current tiny bitset
|
||||
cursor_tinybitset: TinySet,
|
||||
doc: u32,
|
||||
}
|
||||
|
||||
impl From<DocBitSet> for BitSetDocSet {
|
||||
fn from(docs: DocBitSet) -> BitSetDocSet {
|
||||
let first_tiny_bitset =
|
||||
if docs.num_tiny_bitsets() == 0 {
|
||||
0u64
|
||||
} else {
|
||||
docs.tiny_bitset(0) as u64
|
||||
};
|
||||
impl BitSetDocSet {
|
||||
fn go_to_bucket(&mut self, bucket_addr: u32) {
|
||||
self.cursor_bucket = bucket_addr;
|
||||
self.cursor_tinybitset = self.docs.tinyset(bucket_addr);
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BitSet> for BitSetDocSet {
|
||||
fn from(docs: BitSet) -> BitSetDocSet {
|
||||
let first_tiny_bitset = if docs.max_value() == 0 {
|
||||
TinySet::empty()
|
||||
} else {
|
||||
docs.tinyset(0)
|
||||
};
|
||||
BitSetDocSet {
|
||||
docs,
|
||||
cursor_bucket: 0,
|
||||
cursor_tinybitset: first_tiny_bitset,
|
||||
doc: 0u32
|
||||
doc: 0u32,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DocSet for BitSetDocSet {
|
||||
fn advance(&mut self) -> bool {
|
||||
loop {
|
||||
if let Some(lower) = self.cursor_tinybitset.pop_lowest() {
|
||||
self.doc = (self.cursor_bucket as u32 * 64u32) | lower;
|
||||
return true;
|
||||
} else {
|
||||
if self.cursor_bucket < self.docs.num_tiny_bitsets() - 1 {
|
||||
self.cursor_bucket += 1;
|
||||
self.cursor_tinybitset = self.docs.tiny_bitset(self.cursor_bucket);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(lower) = self.cursor_tinybitset.pop_lowest() {
|
||||
self.doc = (self.cursor_bucket as u32 * 64u32) | lower;
|
||||
return true;
|
||||
}
|
||||
if let Some(cursor_bucket) = self.docs.first_non_empty_bucket(self.cursor_bucket + 1) {
|
||||
self.go_to_bucket(cursor_bucket);
|
||||
let lower = self.cursor_tinybitset.pop_lowest().unwrap();
|
||||
self.doc = (cursor_bucket * 64u32) | lower;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
||||
// skip is required to advance.
|
||||
if !self.advance() {
|
||||
return SkipResult::End;
|
||||
}
|
||||
let target_bucket = (target / 64u32) as usize;
|
||||
|
||||
let target_bucket = target / 64u32;
|
||||
|
||||
// Mask for all of the bits greater or equal
|
||||
// to our target document.
|
||||
match target_bucket.cmp(&self.cursor_bucket) {
|
||||
Ordering::Greater => {
|
||||
self.cursor_bucket = target_bucket;
|
||||
self.cursor_tinybitset = self.docs.tiny_bitset(target_bucket);
|
||||
// let greater: u64 = <u64 as TinySet>::range_greater_or_equal(target % 64);
|
||||
// self.cursor_tinybitset.intersect(greater);
|
||||
loop {
|
||||
if !self.advance() {
|
||||
return SkipResult::End;
|
||||
self.go_to_bucket(target_bucket);
|
||||
let greater_filter: TinySet = TinySet::range_greater_or_equal(target);
|
||||
self.cursor_tinybitset = self.cursor_tinybitset.intersect(greater_filter);
|
||||
if !self.advance() {
|
||||
SkipResult::End
|
||||
} else {
|
||||
if self.doc() == target {
|
||||
SkipResult::Reached
|
||||
} else {
|
||||
if self.doc() == target {
|
||||
return SkipResult::Reached;
|
||||
} else {
|
||||
// assert!(self.doc() > target);
|
||||
if self.doc() > target {
|
||||
return SkipResult::OverStep;
|
||||
}
|
||||
|
||||
}
|
||||
debug_assert!(self.doc() > target);
|
||||
SkipResult::OverStep
|
||||
}
|
||||
}
|
||||
}
|
||||
Ordering::Equal => {
|
||||
loop {
|
||||
match self.doc().cmp(&target) {
|
||||
Ordering::Less => {
|
||||
if !self.advance() {
|
||||
return SkipResult::End;
|
||||
}
|
||||
}
|
||||
Ordering::Equal => {
|
||||
assert!(self.doc() == target);
|
||||
return SkipResult::Reached;
|
||||
}
|
||||
Ordering::Greater => {
|
||||
assert!(self.doc() > target);
|
||||
return SkipResult::OverStep;
|
||||
Ordering::Equal => loop {
|
||||
match self.doc().cmp(&target) {
|
||||
Ordering::Less => {
|
||||
if !self.advance() {
|
||||
return SkipResult::End;
|
||||
}
|
||||
}
|
||||
Ordering::Equal => {
|
||||
return SkipResult::Reached;
|
||||
}
|
||||
Ordering::Greater => {
|
||||
debug_assert!(self.doc() > target);
|
||||
return SkipResult::OverStep;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
},
|
||||
Ordering::Less => {
|
||||
assert!(self.doc() > target);
|
||||
debug_assert!(self.doc() > target);
|
||||
SkipResult::OverStep
|
||||
}
|
||||
}
|
||||
@@ -135,19 +128,20 @@ impl DocSet for BitSetDocSet {
|
||||
/// but we don't have access to any better
|
||||
/// value.
|
||||
fn size_hint(&self) -> u32 {
|
||||
self.docs.size_hint()
|
||||
self.docs.len() as u32
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use DocId;
|
||||
use common::DocBitSet;
|
||||
use postings::{SkipResult, DocSet};
|
||||
use common::BitSet;
|
||||
use postings::{DocSet, SkipResult};
|
||||
use super::BitSetDocSet;
|
||||
extern crate test;
|
||||
|
||||
fn create_docbitset(docs: &[DocId], max_doc: DocId) -> BitSetDocSet {
|
||||
let mut docset = DocBitSet::with_maxdoc(max_doc);
|
||||
let mut docset = BitSet::with_max_value(max_doc);
|
||||
for &doc in docs {
|
||||
docset.insert(doc);
|
||||
}
|
||||
@@ -167,10 +161,10 @@ mod tests {
|
||||
#[test]
|
||||
fn test_docbitset_sequential() {
|
||||
test_go_through_sequential(&[]);
|
||||
test_go_through_sequential(&[1,2,3]);
|
||||
test_go_through_sequential(&[1,2,3,4,5,63,64,65]);
|
||||
test_go_through_sequential(&[63,64,65]);
|
||||
test_go_through_sequential(&[1,2,3,4,95,96,97,98,99]);
|
||||
test_go_through_sequential(&[1, 2, 3]);
|
||||
test_go_through_sequential(&[1, 2, 3, 4, 5, 63, 64, 65]);
|
||||
test_go_through_sequential(&[63, 64, 65]);
|
||||
test_go_through_sequential(&[1, 2, 3, 4, 95, 96, 97, 98, 99]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -206,6 +200,73 @@ mod tests {
|
||||
assert_eq!(docset.doc(), 5112);
|
||||
assert!(!docset.advance());
|
||||
}
|
||||
{
|
||||
let mut docset = create_docbitset(&[1, 5, 6, 7, 5112, 5500, 6666], 10_000);
|
||||
assert_eq!(docset.skip_next(5112), SkipResult::Reached);
|
||||
assert_eq!(docset.doc(), 5112);
|
||||
assert!(docset.advance());
|
||||
assert_eq!(docset.doc(), 5500);
|
||||
assert!(docset.advance());
|
||||
assert_eq!(docset.doc(), 6666);
|
||||
assert!(!docset.advance());
|
||||
}
|
||||
{
|
||||
let mut docset = create_docbitset(&[1, 5, 6, 7, 5112, 5500, 6666], 10_000);
|
||||
assert_eq!(docset.skip_next(5111), SkipResult::OverStep);
|
||||
assert_eq!(docset.doc(), 5112);
|
||||
assert!(docset.advance());
|
||||
assert_eq!(docset.doc(), 5500);
|
||||
assert!(docset.advance());
|
||||
assert_eq!(docset.doc(), 6666);
|
||||
assert!(!docset.advance());
|
||||
}
|
||||
{
|
||||
let mut docset = create_docbitset(&[1, 5, 6, 7, 5112, 5513, 6666], 10_000);
|
||||
assert_eq!(docset.skip_next(5111), SkipResult::OverStep);
|
||||
assert_eq!(docset.doc(), 5112);
|
||||
assert!(docset.advance());
|
||||
assert_eq!(docset.doc(), 5513);
|
||||
assert!(docset.advance());
|
||||
assert_eq!(docset.doc(), 6666);
|
||||
assert!(!docset.advance());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
#[bench]
|
||||
fn bench_bitset_1pct_insert(b: &mut test::Bencher) {
|
||||
use tests;
|
||||
let els = tests::generate_nonunique_unsorted(1_000_000u32, 10_000);
|
||||
b.iter(|| {
|
||||
let mut bitset = BitSet::with_max_value(1_000_000);
|
||||
for el in els.iter().cloned() {
|
||||
bitset.insert(el);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_bitset_1pct_clone(b: &mut test::Bencher) {
|
||||
use tests;
|
||||
let els = tests::generate_nonunique_unsorted(1_000_000u32, 10_000);
|
||||
let mut bitset = BitSet::with_max_value(1_000_000);
|
||||
for el in els {
|
||||
bitset.insert(el);
|
||||
}
|
||||
b.iter(|| bitset.clone());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_bitset_1pct_clone_iterate(b: &mut test::Bencher) {
|
||||
use tests;
|
||||
use DocSet;
|
||||
let els = tests::generate_nonunique_unsorted(1_000_000u32, 10_000);
|
||||
let mut bitset = BitSet::with_max_value(1_000_000);
|
||||
for el in els {
|
||||
bitset.insert(el);
|
||||
}
|
||||
b.iter(|| {
|
||||
let mut docset = BitSetDocSet::from(bitset.clone());
|
||||
while docset.advance() {}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,6 +39,13 @@ impl Query for BooleanQuery {
|
||||
self
|
||||
}
|
||||
|
||||
fn disable_scoring(&mut self) {
|
||||
self.scoring_disabled = true;
|
||||
for &mut (_, ref mut subquery) in &mut self.subqueries {
|
||||
subquery.disable_scoring();
|
||||
}
|
||||
}
|
||||
|
||||
fn weight(&self, searcher: &Searcher) -> Result<Box<Weight>> {
|
||||
let sub_weights = self.subqueries
|
||||
.iter()
|
||||
@@ -48,13 +55,6 @@ impl Query for BooleanQuery {
|
||||
.collect::<Result<_>>()?;
|
||||
Ok(box BooleanWeight::new(sub_weights, self.scoring_disabled))
|
||||
}
|
||||
|
||||
fn disable_scoring(&mut self) {
|
||||
self.scoring_disabled = true;
|
||||
for &mut (_, ref mut subquery) in &mut self.subqueries {
|
||||
subquery.disable_scoring();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BooleanQuery {
|
||||
|
||||
@@ -24,13 +24,14 @@ impl BooleanWeight {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl Weight for BooleanWeight {
|
||||
fn scorer<'a>(&'a self, reader: &'a SegmentReader) -> Result<Box<Scorer + 'a>> {
|
||||
if self.weights.is_empty() {
|
||||
Ok(box EmptyScorer)
|
||||
} else if self.weights.len() == 1 {
|
||||
let &(occur, ref weight) = &self.weights[0];
|
||||
if occur == Occur::MustNot {
|
||||
let &(occur, ref weight) = &self.weights[0];
|
||||
if occur == Occur::MustNot {
|
||||
Ok(box EmptyScorer)
|
||||
} else {
|
||||
weight.scorer(reader)
|
||||
|
||||
@@ -15,7 +15,6 @@ mod all_query;
|
||||
mod bitset;
|
||||
mod range_query;
|
||||
|
||||
|
||||
pub use self::bitset::BitSetDocSet;
|
||||
pub use self::boolean_query::BooleanQuery;
|
||||
pub use self::occur_filter::OccurFilter;
|
||||
@@ -28,7 +27,7 @@ pub use self::scorer::EmptyScorer;
|
||||
pub use self::scorer::Scorer;
|
||||
pub use self::term_query::TermQuery;
|
||||
pub use self::weight::Weight;
|
||||
pub use self::all_query::{AllQuery, AllWeight, AllScorer};
|
||||
pub use self::range_query::{RangeQuery,RangeDefinition, RangeWeight};
|
||||
pub use self::all_query::{AllQuery, AllScorer, AllWeight};
|
||||
pub use self::range_query::RangeQuery;
|
||||
pub use self::scorer::ConstScorer;
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ pub use self::phrase_query::PhraseQuery;
|
||||
pub use self::phrase_weight::PhraseWeight;
|
||||
pub use self::phrase_scorer::PhraseScorer;
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
@@ -75,8 +74,6 @@ mod tests {
|
||||
assert_eq!(test_query(vec!["g", "a"]), empty_vec);
|
||||
}
|
||||
|
||||
|
||||
|
||||
#[test] // motivated by #234
|
||||
pub fn test_phrase_query_docfreq_order() {
|
||||
let mut schema_builder = SchemaBuilder::default();
|
||||
@@ -90,11 +87,13 @@ mod tests {
|
||||
let doc = doc!(text_field=>"b");
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
{ // 1
|
||||
{
|
||||
// 1
|
||||
let doc = doc!(text_field=>"a b");
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
{ // 2
|
||||
{
|
||||
// 2
|
||||
let doc = doc!(text_field=>"b a");
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use query::Scorer;
|
||||
use DocId;
|
||||
use postings::{SkipResult, IntersectionDocSet, DocSet, Postings, SegmentPostings};
|
||||
use postings::{DocSet, IntersectionDocSet, Postings, SegmentPostings, SkipResult};
|
||||
|
||||
struct PostingsWithOffset {
|
||||
offset: u32,
|
||||
@@ -11,7 +11,7 @@ impl PostingsWithOffset {
|
||||
pub fn new(segment_postings: SegmentPostings, offset: u32) -> PostingsWithOffset {
|
||||
PostingsWithOffset {
|
||||
offset,
|
||||
segment_postings
|
||||
segment_postings,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -49,7 +49,6 @@ pub struct PhraseScorer {
|
||||
}
|
||||
|
||||
impl PhraseScorer {
|
||||
|
||||
pub fn new(term_postings: Vec<SegmentPostings>) -> PhraseScorer {
|
||||
let postings_with_offsets: Vec<_> = term_postings
|
||||
.into_iter()
|
||||
@@ -57,12 +56,11 @@ impl PhraseScorer {
|
||||
.map(|(offset, postings)| PostingsWithOffset::new(postings, offset as u32))
|
||||
.collect();
|
||||
PhraseScorer {
|
||||
intersection_docset: IntersectionDocSet::from(postings_with_offsets)
|
||||
intersection_docset: IntersectionDocSet::from(postings_with_offsets),
|
||||
}
|
||||
}
|
||||
|
||||
fn phrase_match(&self) -> bool {
|
||||
|
||||
// TODO maybe we could avoid decoding positions lazily for all terms
|
||||
// when there is > 2 terms.
|
||||
//
|
||||
@@ -74,7 +72,6 @@ impl PhraseScorer {
|
||||
positions_arr[docset.offset as usize] = docset.positions();
|
||||
}
|
||||
|
||||
|
||||
let num_postings = positions_arr.len() as u32;
|
||||
|
||||
let mut ord = 1u32;
|
||||
|
||||
@@ -23,7 +23,8 @@ impl Weight for PhraseWeight {
|
||||
for term in &self.phrase_terms {
|
||||
if let Some(postings) = reader
|
||||
.inverted_index(term.field())
|
||||
.read_postings(term, IndexRecordOption::WithFreqsAndPositions) {
|
||||
.read_postings(term, IndexRecordOption::WithFreqsAndPositions)
|
||||
{
|
||||
term_postings_list.push(postings);
|
||||
} else {
|
||||
return Ok(box EmptyScorer);
|
||||
|
||||
@@ -1,91 +1,129 @@
|
||||
use schema::{Field, Term, IndexRecordOption};
|
||||
use query::{Query, Weight, Scorer};
|
||||
use schema::{Field, IndexRecordOption, Term};
|
||||
use query::{Query, Scorer, Weight};
|
||||
use termdict::{TermDictionary, TermStreamer, TermStreamerBuilder};
|
||||
use core::SegmentReader;
|
||||
use common::DocBitSet;
|
||||
use common::BitSet;
|
||||
use Result;
|
||||
use std::any::Any;
|
||||
use core::Searcher;
|
||||
use query::BitSetDocSet;
|
||||
use query::ConstScorer;
|
||||
use std::collections::Bound;
|
||||
use std::collections::range::RangeArgument;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
enum Boundary {
|
||||
Included(Vec<u8>),
|
||||
Excluded(Vec<u8>),
|
||||
Unbounded,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RangeDefinition {
|
||||
field: Field,
|
||||
left_bound: Boundary,
|
||||
right_bound: Boundary
|
||||
}
|
||||
|
||||
impl RangeDefinition {
|
||||
pub fn for_field(field: Field) -> RangeDefinition{
|
||||
RangeDefinition {
|
||||
field,
|
||||
left_bound: Boundary::Unbounded,
|
||||
right_bound: Boundary::Unbounded
|
||||
}
|
||||
}
|
||||
|
||||
pub fn left_included(mut self, left: Term) -> RangeDefinition {
|
||||
assert_eq!(left.field(), self.field);
|
||||
self.left_bound = Boundary::Included(left.value_bytes().to_owned());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn left_excluded(mut self, left: Term) -> RangeDefinition {
|
||||
assert_eq!(left.field(), self.field);
|
||||
self.left_bound = Boundary::Excluded(left.value_bytes().to_owned());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn right_included(mut self, right: Term) -> RangeDefinition {
|
||||
assert_eq!(right.field(), self.field);
|
||||
self.right_bound = Boundary::Included(right.value_bytes().to_owned());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn right_excluded(mut self, right: Term) -> RangeDefinition {
|
||||
assert_eq!(right.field(), self.field);
|
||||
self.right_bound = Boundary::Excluded(right.value_bytes().to_owned());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn term_range<'a, T>(&self, term_dict: &'a T) -> T::Streamer
|
||||
where T: TermDictionary<'a> + 'a
|
||||
{
|
||||
use self::Boundary::*;
|
||||
let mut term_stream_builder = term_dict.range();
|
||||
term_stream_builder =
|
||||
match &self.left_bound {
|
||||
&Included(ref term_val) => term_stream_builder.ge(term_val),
|
||||
&Excluded(ref term_val) => term_stream_builder.gt(term_val),
|
||||
&Unbounded => term_stream_builder
|
||||
};
|
||||
term_stream_builder =
|
||||
match &self.right_bound {
|
||||
&Included(ref term_val) => term_stream_builder.le(term_val),
|
||||
&Excluded(ref term_val) => term_stream_builder.lt(term_val),
|
||||
&Unbounded => term_stream_builder
|
||||
};
|
||||
term_stream_builder.into_stream()
|
||||
fn map_bound<TFrom, Transform: Fn(TFrom) -> Vec<u8>>(
|
||||
bound: Bound<TFrom>,
|
||||
transform: &Transform,
|
||||
) -> Bound<Vec<u8>> {
|
||||
use self::Bound::*;
|
||||
match bound {
|
||||
Excluded(from_val) => Excluded(transform(from_val)),
|
||||
Included(from_val) => Included(transform(from_val)),
|
||||
Unbounded => Unbounded,
|
||||
}
|
||||
}
|
||||
|
||||
/// `RangeQuery` match all documents that have at least one term within a defined range.
|
||||
///
|
||||
/// Matched document will all get a constant `Score` of one.
|
||||
///
|
||||
/// # Implementation
|
||||
///
|
||||
/// The current implement will iterate over the terms within the range
|
||||
/// and append all of the document cross into a `BitSet`.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
///
|
||||
/// # #[macro_use]
|
||||
/// # extern crate tantivy;
|
||||
/// # use tantivy::Index;
|
||||
/// # use tantivy::schema::{SchemaBuilder, INT_INDEXED};
|
||||
/// # use tantivy::collector::CountCollector;
|
||||
/// # use tantivy::query::Query;
|
||||
/// # use tantivy::Result;
|
||||
/// # use tantivy::query::RangeQuery;
|
||||
/// #
|
||||
/// # fn run() -> Result<()> {
|
||||
/// # let mut schema_builder = SchemaBuilder::new();
|
||||
/// # let year_field = schema_builder.add_u64_field("year", INT_INDEXED);
|
||||
/// # let schema = schema_builder.build();
|
||||
/// #
|
||||
/// # let index = Index::create_in_ram(schema);
|
||||
/// # {
|
||||
/// # let mut index_writer = index.writer_with_num_threads(1, 6_000_000).unwrap();
|
||||
/// # for year in 1950u64..2017u64 {
|
||||
/// # let num_docs_within_year = 10 + (year - 1950) * (year - 1950);
|
||||
/// # for _ in 0..num_docs_within_year {
|
||||
/// # index_writer.add_document(doc!(year_field => year));
|
||||
/// # }
|
||||
/// # }
|
||||
/// # index_writer.commit().unwrap();
|
||||
/// # }
|
||||
/// # index.load_searchers()?;
|
||||
/// let searcher = index.searcher();
|
||||
///
|
||||
/// let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960..1970);
|
||||
///
|
||||
/// // ... or `1960..=1969` if inclusive range is enabled.
|
||||
/// let mut count_collector = CountCollector::default();
|
||||
/// docs_in_the_sixties.search(&*searcher, &mut count_collector)?;
|
||||
///
|
||||
/// let num_60s_books = count_collector.count();
|
||||
///
|
||||
/// # assert_eq!(num_60s_books, 2285);
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// #
|
||||
/// # fn main() {
|
||||
/// # run().unwrap()
|
||||
/// # }
|
||||
/// ```
|
||||
#[derive(Debug)]
|
||||
pub struct RangeQuery {
|
||||
range_definition: RangeDefinition
|
||||
field: Field,
|
||||
left_bound: Bound<Vec<u8>>,
|
||||
right_bound: Bound<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl RangeQuery {
|
||||
pub fn new(range_definition: RangeDefinition) -> RangeQuery {
|
||||
/// Create a new `RangeQuery` over a `i64` field.
|
||||
pub fn new_i64<TRangeArgument: RangeArgument<i64>>(
|
||||
field: Field,
|
||||
range: TRangeArgument,
|
||||
) -> RangeQuery {
|
||||
let make_term_val = |val: &i64| Term::from_field_i64(field, *val).value_bytes().to_owned();
|
||||
RangeQuery {
|
||||
range_definition
|
||||
field,
|
||||
left_bound: map_bound(range.start(), &make_term_val),
|
||||
right_bound: map_bound(range.end(), &make_term_val),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new `RangeQuery` over a `u64` field.
|
||||
pub fn new_u64<TRangeArgument: RangeArgument<u64>>(
|
||||
field: Field,
|
||||
range: TRangeArgument,
|
||||
) -> RangeQuery {
|
||||
let make_term_val = |val: &u64| Term::from_field_u64(field, *val).value_bytes().to_owned();
|
||||
RangeQuery {
|
||||
field,
|
||||
left_bound: map_bound(range.start(), &make_term_val),
|
||||
right_bound: map_bound(range.end(), &make_term_val),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new `RangeQuery` over a `Str` field.
|
||||
pub fn new_str<'b, TRangeArgument: RangeArgument<&'b str>>(
|
||||
field: Field,
|
||||
range: TRangeArgument,
|
||||
) -> RangeQuery {
|
||||
let make_term_val = |val: &&str| val.as_bytes().to_vec();
|
||||
RangeQuery {
|
||||
field,
|
||||
left_bound: map_bound(range.start(), &make_term_val),
|
||||
right_bound: map_bound(range.end(), &make_term_val),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -97,27 +135,52 @@ impl Query for RangeQuery {
|
||||
|
||||
fn weight(&self, _searcher: &Searcher) -> Result<Box<Weight>> {
|
||||
Ok(box RangeWeight {
|
||||
range_definition: self.range_definition.clone()
|
||||
field: self.field,
|
||||
left_bound: self.left_bound.clone(),
|
||||
right_bound: self.right_bound.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub struct RangeWeight {
|
||||
range_definition: RangeDefinition
|
||||
field: Field,
|
||||
left_bound: Bound<Vec<u8>>,
|
||||
right_bound: Bound<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl RangeWeight {
|
||||
fn term_range<'a, T>(&self, term_dict: &'a T) -> T::Streamer
|
||||
where
|
||||
T: TermDictionary<'a> + 'a,
|
||||
{
|
||||
use std::collections::Bound::*;
|
||||
let mut term_stream_builder = term_dict.range();
|
||||
term_stream_builder = match &self.left_bound {
|
||||
&Included(ref term_val) => term_stream_builder.ge(term_val),
|
||||
&Excluded(ref term_val) => term_stream_builder.gt(term_val),
|
||||
&Unbounded => term_stream_builder,
|
||||
};
|
||||
term_stream_builder = match &self.right_bound {
|
||||
&Included(ref term_val) => term_stream_builder.le(term_val),
|
||||
&Excluded(ref term_val) => term_stream_builder.lt(term_val),
|
||||
&Unbounded => term_stream_builder,
|
||||
};
|
||||
term_stream_builder.into_stream()
|
||||
}
|
||||
}
|
||||
|
||||
impl Weight for RangeWeight {
|
||||
fn scorer<'a>(&'a self, reader: &'a SegmentReader) -> Result<Box<Scorer + 'a>> {
|
||||
let max_doc = reader.max_doc();
|
||||
let mut doc_bitset = DocBitSet::with_maxdoc(max_doc);
|
||||
let mut doc_bitset = BitSet::with_max_value(max_doc);
|
||||
|
||||
let inverted_index = reader.inverted_index(self.range_definition.field);
|
||||
let inverted_index = reader.inverted_index(self.field);
|
||||
let term_dict = inverted_index.terms();
|
||||
let mut term_range = self.range_definition.term_range(term_dict);
|
||||
let mut term_range = self.term_range(term_dict);
|
||||
while term_range.advance() {
|
||||
let term_info = term_range.value();
|
||||
let mut block_segment_postings = inverted_index.read_block_postings_from_terminfo(term_info,IndexRecordOption::Basic);
|
||||
let mut block_segment_postings = inverted_index
|
||||
.read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic);
|
||||
while block_segment_postings.advance() {
|
||||
for &doc in block_segment_postings.docs() {
|
||||
doc_bitset.insert(doc);
|
||||
@@ -133,8 +196,45 @@ impl Weight for RangeWeight {
|
||||
mod tests {
|
||||
|
||||
use Index;
|
||||
use schema::{SchemaBuilder, Field, Document, INT_INDEXED};
|
||||
use schema::{Document, Field, SchemaBuilder, INT_INDEXED};
|
||||
use collector::CountCollector;
|
||||
use std::collections::Bound;
|
||||
use query::Query;
|
||||
use Result;
|
||||
use super::RangeQuery;
|
||||
|
||||
#[test]
|
||||
fn test_range_query_simple() {
|
||||
fn run() -> Result<()> {
|
||||
let mut schema_builder = SchemaBuilder::new();
|
||||
let year_field = schema_builder.add_u64_field("year", INT_INDEXED);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 6_000_000).unwrap();
|
||||
for year in 1950u64..2017u64 {
|
||||
let num_docs_within_year = 10 + (year - 1950) * (year - 1950);
|
||||
for _ in 0..num_docs_within_year {
|
||||
index_writer.add_document(doc!(year_field => year));
|
||||
}
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
index.load_searchers().unwrap();
|
||||
let searcher = index.searcher();
|
||||
|
||||
let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960u64..1970u64);
|
||||
|
||||
// ... or `1960..=1969` if inclusive range is enabled.
|
||||
let mut count_collector = CountCollector::default();
|
||||
docs_in_the_sixties.search(&*searcher, &mut count_collector)?;
|
||||
assert_eq!(count_collector.count(), 2285);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
run().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_query() {
|
||||
@@ -163,43 +263,30 @@ mod tests {
|
||||
}
|
||||
index.load_searchers().unwrap();
|
||||
let searcher = index.searcher();
|
||||
use collector::CountCollector;
|
||||
use schema::Term;
|
||||
use query::Query;
|
||||
use super::{RangeQuery, RangeDefinition};
|
||||
|
||||
let count_multiples = |range: RangeDefinition| {
|
||||
let count_multiples = |range_query: RangeQuery| {
|
||||
let mut count_collector = CountCollector::default();
|
||||
let range_query = RangeQuery::new(range);
|
||||
range_query.search(&*searcher, &mut count_collector).unwrap();
|
||||
range_query
|
||||
.search(&*searcher, &mut count_collector)
|
||||
.unwrap();
|
||||
count_collector.count()
|
||||
};
|
||||
|
||||
assert_eq!(count_multiples(RangeQuery::new_i64(int_field, 10..11)), 9);
|
||||
assert_eq!(
|
||||
count_multiples(RangeDefinition::for_field(int_field)
|
||||
.left_included(Term::from_field_i64(int_field, 10))
|
||||
.right_excluded(Term::from_field_i64(int_field, 11)))
|
||||
, 9
|
||||
count_multiples(RangeQuery::new_i64(
|
||||
int_field,
|
||||
(Bound::Included(10), Bound::Included(11))
|
||||
)),
|
||||
18
|
||||
);
|
||||
assert_eq!(
|
||||
count_multiples(RangeDefinition::for_field(int_field)
|
||||
.left_included(Term::from_field_i64(int_field, 10))
|
||||
.right_included(Term::from_field_i64(int_field, 11)))
|
||||
, 18
|
||||
count_multiples(RangeQuery::new_i64(
|
||||
int_field,
|
||||
(Bound::Excluded(9), Bound::Included(10))
|
||||
)),
|
||||
9
|
||||
);
|
||||
assert_eq!(
|
||||
count_multiples(RangeDefinition::for_field(int_field)
|
||||
.left_excluded(Term::from_field_i64(int_field, 9))
|
||||
.right_included(Term::from_field_i64(int_field, 10)))
|
||||
, 9
|
||||
);
|
||||
assert_eq!(
|
||||
count_multiples(RangeDefinition::for_field(int_field)
|
||||
.left_excluded(Term::from_field_i64(int_field, 9)))
|
||||
, 90
|
||||
);
|
||||
|
||||
assert_eq!(count_multiples(RangeQuery::new_i64(int_field, 9..)), 91);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ use DocId;
|
||||
use Score;
|
||||
use collector::Collector;
|
||||
use postings::SkipResult;
|
||||
use common::DocBitSet;
|
||||
use common::BitSet;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
|
||||
/// Scored set of documents matching a query within a specific segment.
|
||||
@@ -62,19 +62,27 @@ impl Scorer for EmptyScorer {
|
||||
}
|
||||
}
|
||||
|
||||
/// Wraps a `DocSet` and simply returns a constant `Scorer`.
|
||||
/// The `ConstScorer` is useful if you have a `DocSet` where
|
||||
/// you needed a scorer.
|
||||
///
|
||||
/// The `ConstScorer`'s constant score can be set
|
||||
/// by calling `.set_score(...)`.
|
||||
pub struct ConstScorer<TDocSet: DocSet> {
|
||||
docset: TDocSet,
|
||||
score: Score
|
||||
score: Score,
|
||||
}
|
||||
|
||||
impl<TDocSet: DocSet> ConstScorer<TDocSet> {
|
||||
/// Creates a new `ConstScorer`.
|
||||
pub fn new(docset: TDocSet) -> ConstScorer<TDocSet> {
|
||||
ConstScorer {
|
||||
docset,
|
||||
score: 1f32
|
||||
score: 1f32,
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the constant score to a different value.
|
||||
pub fn set_score(&mut self, score: Score) {
|
||||
self.score = score;
|
||||
}
|
||||
@@ -101,12 +109,11 @@ impl<TDocSet: DocSet> DocSet for ConstScorer<TDocSet> {
|
||||
self.docset.size_hint()
|
||||
}
|
||||
|
||||
fn to_doc_bitset(&mut self, max_doc: DocId) -> DocBitSet {
|
||||
self.docset.to_doc_bitset(max_doc)
|
||||
fn append_to_bitset(&mut self, bitset: &mut BitSet) {
|
||||
self.docset.append_to_bitset(bitset);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<TDocSet: DocSet> Scorer for ConstScorer<TDocSet> {
|
||||
fn score(&self) -> Score {
|
||||
1f32
|
||||
|
||||
@@ -21,9 +21,7 @@ pub struct Document {
|
||||
|
||||
impl From<Vec<FieldValue>> for Document {
|
||||
fn from(field_values: Vec<FieldValue>) -> Self {
|
||||
Document {
|
||||
field_values
|
||||
}
|
||||
Document { field_values }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,7 +36,6 @@ impl PartialEq for Document {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl Eq for Document {}
|
||||
|
||||
impl Document {
|
||||
@@ -59,14 +56,16 @@ impl Document {
|
||||
|
||||
/// Retain only the field that are matching the
|
||||
/// predicate given in argument.
|
||||
pub fn filter_fields<P: Fn(Field)->bool>(&mut self, predicate: P) {
|
||||
pub fn filter_fields<P: Fn(Field) -> bool>(&mut self, predicate: P) {
|
||||
self.field_values
|
||||
.retain(|field_value| predicate(field_value.field()));
|
||||
}
|
||||
|
||||
/// Adding a facet to the document.
|
||||
pub fn add_facet<F>(&mut self, field: Field, path: F)
|
||||
where Facet: From<F> {
|
||||
where
|
||||
Facet: From<F>,
|
||||
{
|
||||
let facet = Facet::from(path);
|
||||
let value = Value::Facet(facet);
|
||||
self.add(FieldValue::new(field, value));
|
||||
@@ -144,9 +143,7 @@ impl BinarySerializable for Document {
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let num_field_values = VInt::deserialize(reader)?.val() as usize;
|
||||
let field_values = (0..num_field_values)
|
||||
.map(|_| {
|
||||
FieldValue::deserialize(reader)
|
||||
})
|
||||
.map(|_| FieldValue::deserialize(reader))
|
||||
.collect::<io::Result<Vec<FieldValue>>>()?;
|
||||
Ok(Document::from(field_values))
|
||||
}
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
use std::fmt::{self, Display, Debug, Formatter};
|
||||
use std::fmt::{self, Debug, Display, Formatter};
|
||||
use std::str;
|
||||
use std::io::{self, Read, Write};
|
||||
use regex::Regex;
|
||||
use std::borrow::Borrow;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
use std::borrow::Cow;
|
||||
use common::BinarySerializable;
|
||||
|
||||
|
||||
const SLASH_BYTE: u8 = '/' as u8;
|
||||
const ESCAPE_BYTE: u8 = '\\' as u8;
|
||||
const SLASH_BYTE: u8 = b'/';
|
||||
const ESCAPE_BYTE: u8 = b'\\';
|
||||
|
||||
/// BYTE used as a level separation in the binary
|
||||
/// representation of facets.
|
||||
@@ -29,7 +29,6 @@ pub const FACET_SEP_BYTE: u8 = 0u8;
|
||||
pub struct Facet(Vec<u8>);
|
||||
|
||||
impl Facet {
|
||||
|
||||
/// Returns a new instance of the "root facet"
|
||||
/// Equivalent to `/`.
|
||||
pub fn root() -> Facet {
|
||||
@@ -64,8 +63,10 @@ impl Facet {
|
||||
/// It is conceptually, if one of the steps of this path
|
||||
/// contains a `/` or a `\`, it should be escaped
|
||||
/// using an anti-slash `/`.
|
||||
pub fn from_text<'a, T>(path: &'a T) -> Facet
|
||||
where T: ?Sized + AsRef<str> {
|
||||
pub fn from_text<T>(path: &T) -> Facet
|
||||
where
|
||||
T: ?Sized + AsRef<str>,
|
||||
{
|
||||
From::from(path)
|
||||
}
|
||||
|
||||
@@ -74,9 +75,10 @@ impl Facet {
|
||||
///
|
||||
/// The steps are expected to be unescaped.
|
||||
pub fn from_path<Path>(path: Path) -> Facet
|
||||
where
|
||||
Path: IntoIterator,
|
||||
Path::Item: ToString {
|
||||
where
|
||||
Path: IntoIterator,
|
||||
Path::Item: ToString,
|
||||
{
|
||||
let mut facet_bytes: Vec<u8> = Vec::with_capacity(100);
|
||||
let mut step_it = path.into_iter();
|
||||
if let Some(step) = step_it.next() {
|
||||
@@ -93,11 +95,28 @@ impl Facet {
|
||||
pub(crate) fn inner_buffer_mut(&mut self) -> &mut Vec<u8> {
|
||||
&mut self.0
|
||||
}
|
||||
|
||||
/// Returns `true` iff other is a subfacet of `self`.
|
||||
#[allow(collapsible_if)]
|
||||
pub fn is_prefix_of(&self, other: &Facet) -> bool {
|
||||
let self_bytes: &[u8] = self.encoded_bytes();
|
||||
let other_bytes: &[u8] = other.encoded_bytes();
|
||||
if self_bytes.len() < other_bytes.len() {
|
||||
if other_bytes.starts_with(self_bytes) {
|
||||
return other_bytes[self_bytes.len()] == 0u8;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
impl Borrow<[u8]> for Facet {
|
||||
fn borrow(&self) -> &[u8] {
|
||||
self.encoded_bytes()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized + AsRef<str>> From<&'a T> for Facet {
|
||||
|
||||
fn from(path_asref: &'a T) -> Facet {
|
||||
#[derive(Copy, Clone)]
|
||||
enum State {
|
||||
@@ -110,9 +129,7 @@ impl<'a, T: ?Sized + AsRef<str>> From<&'a T> for Facet {
|
||||
let path_bytes = path.as_bytes();
|
||||
for &c in &path_bytes[1..] {
|
||||
match (state, c) {
|
||||
(State::Idle, ESCAPE_BYTE) => {
|
||||
state = State::Escaped
|
||||
}
|
||||
(State::Idle, ESCAPE_BYTE) => state = State::Escaped,
|
||||
(State::Idle, SLASH_BYTE) => {
|
||||
facet_encoded.push(FACET_SEP_BYTE);
|
||||
}
|
||||
@@ -160,16 +177,19 @@ fn escape_slashes(s: &str) -> Cow<str> {
|
||||
|
||||
impl Serialize for Facet {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where S: Serializer {
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
serializer.serialize_str(&self.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for Facet {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where
|
||||
D: Deserializer<'de> {
|
||||
<&'de str as Deserialize<'de>>::deserialize(deserializer)
|
||||
.map(Facet::from)
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
<&'de str as Deserialize<'de>>::deserialize(deserializer).map(Facet::from)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -180,7 +200,6 @@ impl Debug for Facet {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
@@ -207,7 +226,6 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_facet_debug() {
|
||||
let v = ["first", "second", "third"];
|
||||
@@ -215,4 +233,4 @@ mod tests {
|
||||
assert_eq!(format!("{:?}", facet), "Facet(/first/second/third)");
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,7 +71,7 @@ impl FieldEntry {
|
||||
match self.field_type {
|
||||
FieldType::Str(ref options) => options.get_indexing_options().is_some(),
|
||||
FieldType::U64(ref options) | FieldType::I64(ref options) => options.is_indexed(),
|
||||
FieldType::HierarchicalFacet => true
|
||||
FieldType::HierarchicalFacet => true,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ impl FieldType {
|
||||
FieldType::U64(ref int_options) | FieldType::I64(ref int_options) => {
|
||||
int_options.is_indexed()
|
||||
}
|
||||
FieldType::HierarchicalFacet => true
|
||||
FieldType::HierarchicalFacet => true,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ impl FieldType {
|
||||
None
|
||||
}
|
||||
}
|
||||
FieldType::HierarchicalFacet => Some(IndexRecordOption::Basic)
|
||||
FieldType::HierarchicalFacet => Some(IndexRecordOption::Basic),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -75,9 +75,7 @@ impl FieldType {
|
||||
FieldType::U64(_) | FieldType::I64(_) => Err(ValueParsingError::TypeError(
|
||||
format!("Expected an integer, got {:?}", json),
|
||||
)),
|
||||
FieldType::HierarchicalFacet => {
|
||||
Ok(Value::Facet(Facet::from(field_text)))
|
||||
}
|
||||
FieldType::HierarchicalFacet => Ok(Value::Facet(Facet::from(field_text))),
|
||||
},
|
||||
JsonValue::Number(ref field_val_num) => match *self {
|
||||
FieldType::I64(_) => {
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use std::ops::BitOr;
|
||||
|
||||
|
||||
/// Express whether a field is single-value or multi-valued.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Serialize, Deserialize)]
|
||||
pub enum Cardinality {
|
||||
@@ -10,14 +9,14 @@ pub enum Cardinality {
|
||||
/// The document can have any number of values associated to the document.
|
||||
/// This is more memory and CPU expensive than the SingleValue solution.
|
||||
#[serde(rename = "multi")]
|
||||
MultiValues
|
||||
MultiValues,
|
||||
}
|
||||
|
||||
/// Define how an int field should be handled by tantivy.
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct IntOptions {
|
||||
indexed: bool,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
fast: Option<Cardinality>,
|
||||
stored: bool,
|
||||
}
|
||||
@@ -86,7 +85,6 @@ impl Default for IntOptions {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Shortcut for a u64 fast field.
|
||||
///
|
||||
/// Such a shortcut can be composed as follows `STORED | FAST | INT_INDEXED`
|
||||
@@ -114,7 +112,6 @@ pub const INT_STORED: IntOptions = IntOptions {
|
||||
fast: None,
|
||||
};
|
||||
|
||||
|
||||
impl BitOr for IntOptions {
|
||||
type Output = IntOptions;
|
||||
|
||||
|
||||
@@ -334,8 +334,12 @@ mod tests {
|
||||
#[test]
|
||||
pub fn test_schema_serialization() {
|
||||
let mut schema_builder = SchemaBuilder::default();
|
||||
let count_options = IntOptions::default().set_stored().set_fast(Cardinality::SingleValue);
|
||||
let popularity_options = IntOptions::default().set_stored().set_fast(Cardinality::SingleValue);
|
||||
let count_options = IntOptions::default()
|
||||
.set_stored()
|
||||
.set_fast(Cardinality::SingleValue);
|
||||
let popularity_options = IntOptions::default()
|
||||
.set_stored()
|
||||
.set_fast(Cardinality::SingleValue);
|
||||
schema_builder.add_text_field("title", TEXT);
|
||||
schema_builder.add_text_field("author", STRING);
|
||||
schema_builder.add_u64_field("count", count_options);
|
||||
@@ -399,7 +403,9 @@ mod tests {
|
||||
#[test]
|
||||
pub fn test_document_to_json() {
|
||||
let mut schema_builder = SchemaBuilder::default();
|
||||
let count_options = IntOptions::default().set_stored().set_fast(Cardinality::SingleValue);
|
||||
let count_options = IntOptions::default()
|
||||
.set_stored()
|
||||
.set_fast(Cardinality::SingleValue);
|
||||
schema_builder.add_text_field("title", TEXT);
|
||||
schema_builder.add_text_field("author", STRING);
|
||||
schema_builder.add_u64_field("count", count_options);
|
||||
@@ -418,8 +424,12 @@ mod tests {
|
||||
#[test]
|
||||
pub fn test_parse_document() {
|
||||
let mut schema_builder = SchemaBuilder::default();
|
||||
let count_options = IntOptions::default().set_stored().set_fast(Cardinality::SingleValue);
|
||||
let popularity_options = IntOptions::default().set_stored().set_fast(Cardinality::SingleValue);
|
||||
let count_options = IntOptions::default()
|
||||
.set_stored()
|
||||
.set_fast(Cardinality::SingleValue);
|
||||
let popularity_options = IntOptions::default()
|
||||
.set_stored()
|
||||
.set_fast(Cardinality::SingleValue);
|
||||
let title_field = schema_builder.add_text_field("title", TEXT);
|
||||
let author_field = schema_builder.add_text_field("author", STRING);
|
||||
let count_field = schema_builder.add_u64_field("count", count_options);
|
||||
|
||||
@@ -44,7 +44,7 @@ impl Default for TextOptions {
|
||||
|
||||
/// Configuration defining indexing for a text field.
|
||||
/// It wraps:
|
||||
///
|
||||
///
|
||||
/// * record (See [`IndexRecordOption`](./enum.IndexRecordOption.html))
|
||||
/// * tokenizer
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]
|
||||
|
||||
@@ -26,7 +26,7 @@ impl Serialize for Value {
|
||||
Value::Str(ref v) => serializer.serialize_str(v),
|
||||
Value::U64(u) => serializer.serialize_u64(u),
|
||||
Value::I64(u) => serializer.serialize_i64(u),
|
||||
Value::Facet(ref facet) => facet.serialize(serializer)
|
||||
Value::Facet(ref facet) => facet.serialize(serializer),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -178,9 +178,7 @@ mod binary_serialize {
|
||||
let value = i64::deserialize(reader)?;
|
||||
Ok(Value::I64(value))
|
||||
}
|
||||
HIERARCHICAL_FACET_CODE => {
|
||||
Ok(Value::Facet(Facet::deserialize(reader)?))
|
||||
}
|
||||
HIERARCHICAL_FACET_CODE => Ok(Value::Facet(Facet::deserialize(reader)?)),
|
||||
_ => Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("No field type is associated with code {:?}", type_code),
|
||||
|
||||
@@ -40,7 +40,8 @@ impl StoreReader {
|
||||
|
||||
fn block_offset(&self, doc_id: DocId) -> (DocId, u64) {
|
||||
self.block_index()
|
||||
.seek(doc_id + 1)
|
||||
.seek(doc_id as u64 + 1)
|
||||
.map(|(doc, offset)| (doc as DocId, offset))
|
||||
.unwrap_or((0u32, 0u64))
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use directory::WritePtr;
|
||||
use DocId;
|
||||
use common::{VInt, BinarySerializable};
|
||||
use common::{BinarySerializable, VInt};
|
||||
use std::io::{self, Write};
|
||||
use super::StoreReader;
|
||||
use lz4;
|
||||
@@ -34,7 +34,7 @@ impl StoreWriter {
|
||||
pub fn new(writer: WritePtr) -> StoreWriter {
|
||||
StoreWriter {
|
||||
doc: 0,
|
||||
offset_index_writer: SkipListBuilder::new(3),
|
||||
offset_index_writer: SkipListBuilder::new(4),
|
||||
writer: CountingWriter::wrap(writer),
|
||||
intermediary_buffer: Vec::new(),
|
||||
current_block: Vec::new(),
|
||||
@@ -46,7 +46,7 @@ impl StoreWriter {
|
||||
/// The document id is implicitely the number of times
|
||||
/// this method has been called.
|
||||
///
|
||||
pub fn store<'a>(&mut self, stored_document: &Document) -> io::Result<()> {
|
||||
pub fn store(&mut self, stored_document: &Document) -> io::Result<()> {
|
||||
self.intermediary_buffer.clear();
|
||||
stored_document.serialize(&mut self.intermediary_buffer)?;
|
||||
let doc_num_bytes = self.intermediary_buffer.len();
|
||||
@@ -66,10 +66,8 @@ impl StoreWriter {
|
||||
pub fn stack(&mut self, store_reader: &StoreReader) -> io::Result<()> {
|
||||
if !self.current_block.is_empty() {
|
||||
self.write_and_compress_block()?;
|
||||
self.offset_index_writer.insert(
|
||||
self.doc,
|
||||
&(self.writer.written_bytes() as u64),
|
||||
)?;
|
||||
self.offset_index_writer
|
||||
.insert(self.doc as u64, &(self.writer.written_bytes() as u64))?;
|
||||
}
|
||||
let doc_offset = self.doc;
|
||||
let start_offset = self.writer.written_bytes() as u64;
|
||||
@@ -80,10 +78,9 @@ impl StoreWriter {
|
||||
// concatenate the index of the `store_reader`, after translating
|
||||
// its start doc id and its start file offset.
|
||||
for (next_doc_id, block_addr) in store_reader.block_index() {
|
||||
self.doc = doc_offset + next_doc_id;
|
||||
self.offset_index_writer.insert(
|
||||
self.doc,
|
||||
&(start_offset + block_addr))?;
|
||||
self.doc = doc_offset + next_doc_id as u32;
|
||||
self.offset_index_writer
|
||||
.insert(self.doc as u64, &(start_offset + block_addr))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -99,7 +96,7 @@ impl StoreWriter {
|
||||
(self.intermediary_buffer.len() as u32).serialize(&mut self.writer)?;
|
||||
self.writer.write_all(&self.intermediary_buffer)?;
|
||||
self.offset_index_writer
|
||||
.insert(self.doc, &(self.writer.written_bytes() as u64))?;
|
||||
.insert(self.doc as u64, &(self.writer.written_bytes() as u64))?;
|
||||
self.current_block.clear();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -16,8 +16,10 @@ Keys (`&[u8]`) in this datastructure are sorted.
|
||||
|
||||
mod termdict;
|
||||
mod streamer;
|
||||
mod term_info_store;
|
||||
|
||||
pub use self::termdict::TermDictionaryImpl;
|
||||
pub use self::termdict::TermDictionaryBuilderImpl;
|
||||
pub use self::term_info_store::{TermInfoStore, TermInfoStoreWriter};
|
||||
pub use self::streamer::TermStreamerImpl;
|
||||
pub use self::streamer::TermStreamerBuilderImpl;
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use fst::{IntoStreamer, Streamer};
|
||||
use fst::map::{StreamBuilder, Stream};
|
||||
use fst::map::{Stream, StreamBuilder};
|
||||
use postings::TermInfo;
|
||||
use super::TermDictionaryImpl;
|
||||
use termdict::{TermOrdinal, TermDictionary, TermStreamerBuilder, TermStreamer};
|
||||
use termdict::{TermDictionary, TermOrdinal, TermStreamer, TermStreamerBuilder};
|
||||
|
||||
/// See [`TermStreamerBuilder`](./trait.TermStreamerBuilder.html)
|
||||
pub struct TermStreamerBuilderImpl<'a> {
|
||||
@@ -53,7 +53,6 @@ impl<'a> TermStreamerBuilder for TermStreamerBuilderImpl<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// See [`TermStreamer`](./trait.TermStreamer.html)
|
||||
pub struct TermStreamerImpl<'a> {
|
||||
fst_map: &'a TermDictionaryImpl,
|
||||
@@ -88,4 +87,3 @@ impl<'a> TermStreamer for TermStreamerImpl<'a> {
|
||||
&self.current_value
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
330
src/termdict/fstdict/term_info_store.rs
Normal file
330
src/termdict/fstdict/term_info_store.rs
Normal file
@@ -0,0 +1,330 @@
|
||||
use std::io;
|
||||
use std::cmp;
|
||||
use std::io::{Read, Write};
|
||||
use postings::TermInfo;
|
||||
use common::{BinarySerializable, FixedSize};
|
||||
use common::compute_num_bits;
|
||||
use common::Endianness;
|
||||
use common::bitpacker::BitPacker;
|
||||
use directory::ReadOnlySource;
|
||||
use termdict::TermOrdinal;
|
||||
use byteorder::ByteOrder;
|
||||
|
||||
const BLOCK_LEN: usize = 256;
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Default)]
|
||||
struct TermInfoBlockMeta {
|
||||
offset: u64,
|
||||
ref_term_info: TermInfo,
|
||||
doc_freq_nbits: u8,
|
||||
postings_offset_nbits: u8,
|
||||
positions_offset_nbits: u8,
|
||||
}
|
||||
|
||||
impl BinarySerializable for TermInfoBlockMeta {
|
||||
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
|
||||
self.offset.serialize(write)?;
|
||||
self.ref_term_info.serialize(write)?;
|
||||
write.write_all(&[
|
||||
self.doc_freq_nbits,
|
||||
self.postings_offset_nbits,
|
||||
self.positions_offset_nbits,
|
||||
])?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let offset = u64::deserialize(reader)?;
|
||||
let ref_term_info = TermInfo::deserialize(reader)?;
|
||||
let mut buffer = [0u8; 3];
|
||||
reader.read_exact(&mut buffer)?;
|
||||
Ok(TermInfoBlockMeta {
|
||||
offset,
|
||||
ref_term_info,
|
||||
doc_freq_nbits: buffer[0],
|
||||
postings_offset_nbits: buffer[1],
|
||||
positions_offset_nbits: buffer[2],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl FixedSize for TermInfoBlockMeta {
|
||||
const SIZE_IN_BYTES: usize =
|
||||
u64::SIZE_IN_BYTES + TermInfo::SIZE_IN_BYTES + 3 * u8::SIZE_IN_BYTES;
|
||||
}
|
||||
|
||||
impl TermInfoBlockMeta {
|
||||
fn num_bits(&self) -> u8 {
|
||||
self.doc_freq_nbits + self.postings_offset_nbits + self.positions_offset_nbits + 7
|
||||
}
|
||||
|
||||
fn deserialize_term_info(&self, data: &[u8], inner_offset: usize) -> TermInfo {
|
||||
let num_bits = self.num_bits() as usize;
|
||||
let mut cursor = num_bits * inner_offset;
|
||||
|
||||
let doc_freq = extract_bits(data, cursor, self.doc_freq_nbits) as u32;
|
||||
cursor += self.doc_freq_nbits as usize;
|
||||
|
||||
let postings_offset = extract_bits(data, cursor, self.postings_offset_nbits);
|
||||
cursor += self.postings_offset_nbits as usize;
|
||||
|
||||
let positions_offset = extract_bits(data, cursor, self.positions_offset_nbits);
|
||||
cursor += self.positions_offset_nbits as usize;
|
||||
|
||||
let positions_inner_offset = extract_bits(data, cursor, 7) as u8;
|
||||
|
||||
TermInfo {
|
||||
doc_freq,
|
||||
postings_offset: postings_offset + self.ref_term_info.postings_offset,
|
||||
positions_offset: positions_offset + self.ref_term_info.positions_offset,
|
||||
positions_inner_offset,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TermInfoStore {
|
||||
num_terms: usize,
|
||||
block_meta_source: ReadOnlySource,
|
||||
term_info_source: ReadOnlySource,
|
||||
}
|
||||
|
||||
fn extract_bits(data: &[u8], addr_bits: usize, num_bits: u8) -> u64 {
|
||||
assert!(num_bits <= 56);
|
||||
let addr_byte = addr_bits / 8;
|
||||
let bit_shift = (addr_bits % 8) as u64;
|
||||
let val_unshifted_unmasked: u64 = unsafe { *(data[addr_byte..].as_ptr() as *const u64) };
|
||||
let val_shifted_unmasked = val_unshifted_unmasked >> bit_shift;
|
||||
let mask = (1u64 << (num_bits as u64)) - 1;
|
||||
val_shifted_unmasked & mask
|
||||
}
|
||||
|
||||
impl TermInfoStore {
|
||||
pub fn open(data: ReadOnlySource) -> TermInfoStore {
|
||||
let buffer = data.as_slice();
|
||||
let len = Endianness::read_u64(&buffer[0..8]) as usize;
|
||||
let num_terms = Endianness::read_u64(&buffer[8..16]) as usize;
|
||||
let block_meta_source = data.slice(16, 16 + len);
|
||||
let term_info_source = data.slice_from(16 + len);
|
||||
TermInfoStore {
|
||||
num_terms,
|
||||
block_meta_source,
|
||||
term_info_source,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get(&self, term_ord: TermOrdinal) -> TermInfo {
|
||||
let block_id = (term_ord as usize) / BLOCK_LEN;
|
||||
let buffer = self.block_meta_source.as_slice();
|
||||
let mut block_data: &[u8] = &buffer[block_id * TermInfoBlockMeta::SIZE_IN_BYTES..];
|
||||
let term_info_block_data = TermInfoBlockMeta::deserialize(&mut block_data)
|
||||
.expect("Failed to deserialize terminfoblockmeta");
|
||||
let inner_offset = (term_ord as usize) % BLOCK_LEN;
|
||||
if inner_offset == 0 {
|
||||
term_info_block_data.ref_term_info
|
||||
} else {
|
||||
let term_info_data = self.term_info_source.as_slice();
|
||||
term_info_block_data.deserialize_term_info(
|
||||
&term_info_data[term_info_block_data.offset as usize..],
|
||||
inner_offset - 1,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn num_terms(&self) -> usize {
|
||||
self.num_terms
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TermInfoStoreWriter {
|
||||
buffer_block_metas: Vec<u8>,
|
||||
buffer_term_infos: Vec<u8>,
|
||||
term_infos: Vec<TermInfo>,
|
||||
num_terms: u64,
|
||||
}
|
||||
|
||||
fn bitpack_serialize<W: Write>(
|
||||
write: &mut W,
|
||||
bit_packer: &mut BitPacker,
|
||||
term_info_block_meta: &TermInfoBlockMeta,
|
||||
term_info: &TermInfo,
|
||||
) -> io::Result<()> {
|
||||
bit_packer.write(
|
||||
term_info.doc_freq as u64,
|
||||
term_info_block_meta.doc_freq_nbits,
|
||||
write,
|
||||
)?;
|
||||
bit_packer.write(
|
||||
term_info.postings_offset,
|
||||
term_info_block_meta.postings_offset_nbits,
|
||||
write,
|
||||
)?;
|
||||
bit_packer.write(
|
||||
term_info.positions_offset,
|
||||
term_info_block_meta.positions_offset_nbits,
|
||||
write,
|
||||
)?;
|
||||
bit_packer.write(term_info.positions_inner_offset as u64, 7, write)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl TermInfoStoreWriter {
|
||||
pub fn new() -> TermInfoStoreWriter {
|
||||
TermInfoStoreWriter {
|
||||
buffer_block_metas: Vec::new(),
|
||||
buffer_term_infos: Vec::new(),
|
||||
term_infos: Vec::with_capacity(BLOCK_LEN),
|
||||
num_terms: 0u64,
|
||||
}
|
||||
}
|
||||
|
||||
fn flush_block(&mut self) -> io::Result<()> {
|
||||
if self.term_infos.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
let mut bit_packer = BitPacker::new();
|
||||
let ref_term_info = self.term_infos[0].clone();
|
||||
for term_info in &mut self.term_infos[1..] {
|
||||
term_info.postings_offset -= ref_term_info.postings_offset;
|
||||
term_info.positions_offset -= ref_term_info.positions_offset;
|
||||
}
|
||||
|
||||
let mut max_doc_freq: u32 = 0u32;
|
||||
let mut max_postings_offset: u64 = 0u64;
|
||||
let mut max_positions_offset: u64 = 0u64;
|
||||
for term_info in &self.term_infos[1..] {
|
||||
max_doc_freq = cmp::max(max_doc_freq, term_info.doc_freq);
|
||||
max_postings_offset = cmp::max(max_postings_offset, term_info.postings_offset);
|
||||
max_positions_offset = cmp::max(max_positions_offset, term_info.positions_offset);
|
||||
}
|
||||
|
||||
let max_doc_freq_nbits: u8 = compute_num_bits(max_doc_freq as u64);
|
||||
let max_postings_offset_nbits = compute_num_bits(max_postings_offset);
|
||||
let max_positions_offset_nbits = compute_num_bits(max_positions_offset);
|
||||
|
||||
let term_info_block_meta = TermInfoBlockMeta {
|
||||
offset: self.buffer_term_infos.len() as u64,
|
||||
ref_term_info,
|
||||
doc_freq_nbits: max_doc_freq_nbits,
|
||||
postings_offset_nbits: max_postings_offset_nbits,
|
||||
positions_offset_nbits: max_positions_offset_nbits,
|
||||
};
|
||||
|
||||
term_info_block_meta.serialize(&mut self.buffer_block_metas)?;
|
||||
for term_info in self.term_infos[1..].iter().cloned() {
|
||||
bitpack_serialize(
|
||||
&mut self.buffer_term_infos,
|
||||
&mut bit_packer,
|
||||
&term_info_block_meta,
|
||||
&term_info,
|
||||
)?;
|
||||
}
|
||||
|
||||
// Block need end up at the end of a byte.
|
||||
bit_packer.flush(&mut self.buffer_term_infos)?;
|
||||
self.term_infos.clear();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn write_term_info(&mut self, term_info: &TermInfo) -> io::Result<()> {
|
||||
self.num_terms += 1u64;
|
||||
self.term_infos.push(term_info.clone());
|
||||
if self.term_infos.len() >= BLOCK_LEN {
|
||||
self.flush_block()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn serialize<W: io::Write>(&mut self, write: &mut W) -> io::Result<()> {
|
||||
if !self.term_infos.is_empty() {
|
||||
self.flush_block()?;
|
||||
}
|
||||
let len = self.buffer_block_metas.len() as u64;
|
||||
len.serialize(write)?;
|
||||
self.num_terms.serialize(write)?;
|
||||
write.write_all(&self.buffer_block_metas)?;
|
||||
write.write_all(&self.buffer_term_infos)?;
|
||||
write.write_all(&[0u8; 7])?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::extract_bits;
|
||||
use common::bitpacker::BitPacker;
|
||||
use common::BinarySerializable;
|
||||
use super::TermInfoBlockMeta;
|
||||
use super::{TermInfoStore, TermInfoStoreWriter};
|
||||
use directory::ReadOnlySource;
|
||||
use postings::TermInfo;
|
||||
use common::compute_num_bits;
|
||||
use common;
|
||||
|
||||
#[test]
|
||||
fn test_term_info_block() {
|
||||
common::test::fixed_size_test::<TermInfoBlockMeta>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bitpacked() {
|
||||
let mut buffer = Vec::new();
|
||||
let mut bitpack = BitPacker::new();
|
||||
bitpack.write(321u64, 9, &mut buffer).unwrap();
|
||||
assert_eq!(compute_num_bits(321u64), 9);
|
||||
bitpack.write(2u64, 2, &mut buffer).unwrap();
|
||||
assert_eq!(compute_num_bits(2u64), 2);
|
||||
bitpack.write(51, 6, &mut buffer).unwrap();
|
||||
assert_eq!(compute_num_bits(51), 6);
|
||||
bitpack.close(&mut buffer).unwrap();
|
||||
assert_eq!(buffer.len(), 3 + 7);
|
||||
assert_eq!(extract_bits(&buffer[..], 0, 9), 321u64);
|
||||
assert_eq!(extract_bits(&buffer[..], 9, 2), 2u64);
|
||||
assert_eq!(extract_bits(&buffer[..], 11, 6), 51u64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_term_info_block_meta_serialization() {
|
||||
let term_info_block_meta = TermInfoBlockMeta {
|
||||
offset: 2009,
|
||||
ref_term_info: TermInfo {
|
||||
doc_freq: 512,
|
||||
postings_offset: 51,
|
||||
positions_offset: 3584,
|
||||
positions_inner_offset: 0,
|
||||
},
|
||||
doc_freq_nbits: 10,
|
||||
postings_offset_nbits: 5,
|
||||
positions_offset_nbits: 11,
|
||||
};
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
term_info_block_meta.serialize(&mut buffer).unwrap();
|
||||
let mut cursor: &[u8] = &buffer[..];
|
||||
let term_info_block_meta_serde = TermInfoBlockMeta::deserialize(&mut cursor).unwrap();
|
||||
assert_eq!(term_info_block_meta_serde, term_info_block_meta);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pack() {
|
||||
let mut store_writer = TermInfoStoreWriter::new();
|
||||
let mut term_infos = vec![];
|
||||
for i in 0..1000 {
|
||||
let term_info = TermInfo {
|
||||
doc_freq: i as u32,
|
||||
postings_offset: (i / 10) as u64,
|
||||
positions_offset: (i * 7) as u64,
|
||||
positions_inner_offset: (i % 128) as u8,
|
||||
};
|
||||
store_writer.write_term_info(&term_info).unwrap();
|
||||
term_infos.push(term_info);
|
||||
}
|
||||
let mut buffer = Vec::new();
|
||||
store_writer.serialize(&mut buffer).unwrap();
|
||||
let term_info_store = TermInfoStore::open(ReadOnlySource::from(buffer));
|
||||
for i in 0..1000 {
|
||||
assert_eq!(term_info_store.get(i as u64), term_infos[i]);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -3,10 +3,11 @@ use fst;
|
||||
use fst::raw::Fst;
|
||||
use directory::ReadOnlySource;
|
||||
use common::BinarySerializable;
|
||||
use common::CountingWriter;
|
||||
use schema::FieldType;
|
||||
use postings::TermInfo;
|
||||
use termdict::{TermDictionary, TermDictionaryBuilder, TermOrdinal};
|
||||
use super::{TermStreamerImpl, TermStreamerBuilderImpl};
|
||||
use super::{TermInfoStore, TermInfoStoreWriter, TermStreamerBuilderImpl, TermStreamerImpl};
|
||||
|
||||
fn convert_fst_error(e: fst::Error) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::Other, e)
|
||||
@@ -15,7 +16,7 @@ fn convert_fst_error(e: fst::Error) -> io::Error {
|
||||
/// See [`TermDictionaryBuilder`](./trait.TermDictionaryBuilder.html)
|
||||
pub struct TermDictionaryBuilderImpl<W> {
|
||||
fst_builder: fst::MapBuilder<W>,
|
||||
data: Vec<u8>,
|
||||
term_info_store_writer: TermInfoStoreWriter,
|
||||
term_ord: u64,
|
||||
}
|
||||
|
||||
@@ -41,8 +42,8 @@ where
|
||||
/// # Warning
|
||||
///
|
||||
/// Horribly dangerous internal API. See `.insert_key(...)`.
|
||||
pub(crate) fn insert_value(&mut self, value: &TermInfo) -> io::Result<()> {
|
||||
value.serialize(&mut self.data)?;
|
||||
pub(crate) fn insert_value(&mut self, term_info: &TermInfo) -> io::Result<()> {
|
||||
self.term_info_store_writer.write_term_info(term_info)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -54,25 +55,28 @@ where
|
||||
fn new(w: W, _field_type: FieldType) -> io::Result<Self> {
|
||||
let fst_builder = fst::MapBuilder::new(w).map_err(convert_fst_error)?;
|
||||
Ok(TermDictionaryBuilderImpl {
|
||||
fst_builder: fst_builder,
|
||||
data: Vec::new(),
|
||||
fst_builder,
|
||||
term_info_store_writer: TermInfoStoreWriter::new(),
|
||||
term_ord: 0,
|
||||
})
|
||||
}
|
||||
|
||||
fn insert<K: AsRef<[u8]>>(&mut self, key_ref: K, value: &TermInfo) -> io::Result<()> {
|
||||
let key = key_ref.as_ref();
|
||||
self.insert_key(key.as_ref())?;
|
||||
self.insert_key(key)?;
|
||||
self.insert_value(value)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn finish(self) -> io::Result<W> {
|
||||
fn finish(mut self) -> io::Result<W> {
|
||||
let mut file = self.fst_builder.into_inner().map_err(convert_fst_error)?;
|
||||
let footer_size = self.data.len() as u32;
|
||||
file.write_all(&self.data)?;
|
||||
(footer_size as u32).serialize(&mut file)?;
|
||||
file.flush()?;
|
||||
{
|
||||
let mut counting_writer = CountingWriter::wrap(&mut file);
|
||||
self.term_info_store_writer.serialize(&mut counting_writer)?;
|
||||
let footer_size = counting_writer.written_bytes();
|
||||
(footer_size as u64).serialize(&mut counting_writer)?;
|
||||
counting_writer.flush()?;
|
||||
}
|
||||
Ok(file)
|
||||
}
|
||||
}
|
||||
@@ -92,10 +96,9 @@ fn open_fst_index(source: ReadOnlySource) -> fst::Map {
|
||||
/// See [`TermDictionary`](./trait.TermDictionary.html)
|
||||
pub struct TermDictionaryImpl {
|
||||
fst_index: fst::Map,
|
||||
values_mmap: ReadOnlySource,
|
||||
term_info_store: TermInfoStore,
|
||||
}
|
||||
|
||||
|
||||
impl<'a> TermDictionary<'a> for TermDictionaryImpl {
|
||||
type Streamer = TermStreamerImpl<'a>;
|
||||
|
||||
@@ -103,23 +106,26 @@ impl<'a> TermDictionary<'a> for TermDictionaryImpl {
|
||||
|
||||
fn from_source(source: ReadOnlySource) -> Self {
|
||||
let total_len = source.len();
|
||||
let length_offset = total_len - 4;
|
||||
let length_offset = total_len - 8;
|
||||
let mut split_len_buffer: &[u8] = &source.as_slice()[length_offset..];
|
||||
let footer_size = u32::deserialize(&mut split_len_buffer).expect(
|
||||
"Deserializing 4 bytes should always work",
|
||||
) as usize;
|
||||
let footer_size = u64::deserialize(&mut split_len_buffer)
|
||||
.expect("Deserializing 8 bytes should always work") as usize;
|
||||
let split_len = length_offset - footer_size;
|
||||
let fst_source = source.slice(0, split_len);
|
||||
let values_source = source.slice(split_len, length_offset);
|
||||
let fst_index = open_fst_index(fst_source);
|
||||
TermDictionaryImpl {
|
||||
fst_index: fst_index,
|
||||
values_mmap: values_source,
|
||||
fst_index,
|
||||
term_info_store: TermInfoStore::open(values_source),
|
||||
}
|
||||
}
|
||||
|
||||
fn num_terms(&self) -> usize {
|
||||
self.values_mmap.len() / TermInfo::SIZE_IN_BYTES
|
||||
self.term_info_store.num_terms()
|
||||
}
|
||||
|
||||
fn term_ord<K: AsRef<[u8]>>(&self, key: K) -> Option<TermOrdinal> {
|
||||
self.fst_index.get(key)
|
||||
}
|
||||
|
||||
fn ord_to_term(&self, mut ord: TermOrdinal, bytes: &mut Vec<u8>) -> bool {
|
||||
@@ -128,32 +134,22 @@ impl<'a> TermDictionary<'a> for TermDictionaryImpl {
|
||||
let mut node = fst.root();
|
||||
while ord != 0 || !node.is_final() {
|
||||
if let Some(transition) = node.transitions()
|
||||
.take_while(|transition| {
|
||||
transition.out.value() <= ord
|
||||
})
|
||||
.last() {
|
||||
.take_while(|transition| transition.out.value() <= ord)
|
||||
.last()
|
||||
{
|
||||
ord -= transition.out.value();
|
||||
bytes.push(transition.inp);
|
||||
let new_node_addr = transition.addr;
|
||||
node = fst.node(new_node_addr);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
fn term_ord<K: AsRef<[u8]>>(&self, key: K) -> Option<TermOrdinal> {
|
||||
self.fst_index.get(key)
|
||||
true
|
||||
}
|
||||
|
||||
fn term_info_from_ord(&self, term_ord: TermOrdinal) -> TermInfo {
|
||||
let buffer = self.values_mmap.as_slice();
|
||||
let offset = term_ord as usize * TermInfo::SIZE_IN_BYTES;
|
||||
let mut cursor = &buffer[offset..];
|
||||
TermInfo::deserialize(&mut cursor)
|
||||
.expect("The fst is corrupted. Failed to deserialize a value.")
|
||||
self.term_info_store.get(term_ord)
|
||||
}
|
||||
|
||||
fn get<K: AsRef<[u8]>>(&self, key: K) -> Option<TermInfo> {
|
||||
|
||||
@@ -5,7 +5,6 @@ that serves as an address in their respective posting list.
|
||||
|
||||
The term dictionary API makes it possible to iterate through
|
||||
a range of keys in a sorted manner.
|
||||
```
|
||||
|
||||
|
||||
# Implementations
|
||||
@@ -48,34 +47,30 @@ followed by a streaming through at most `1024` elements in the
|
||||
term `stream`.
|
||||
*/
|
||||
|
||||
use schema::{Term, Field, FieldType};
|
||||
use schema::{Field, FieldType, Term};
|
||||
use directory::ReadOnlySource;
|
||||
use postings::TermInfo;
|
||||
|
||||
|
||||
/// Position of the term in the sorted list of terms.
|
||||
pub type TermOrdinal = u64;
|
||||
|
||||
|
||||
pub use self::merger::TermMerger;
|
||||
|
||||
#[cfg(not(feature = "streamdict"))]
|
||||
mod fstdict;
|
||||
#[cfg(not(feature = "streamdict"))]
|
||||
pub use self::fstdict::{TermDictionaryImpl, TermDictionaryBuilderImpl, TermStreamerImpl,
|
||||
TermStreamerBuilderImpl};
|
||||
|
||||
pub use self::fstdict::{TermDictionaryBuilderImpl, TermDictionaryImpl, TermStreamerBuilderImpl,
|
||||
TermStreamerImpl};
|
||||
|
||||
#[cfg(feature = "streamdict")]
|
||||
mod streamdict;
|
||||
#[cfg(feature = "streamdict")]
|
||||
pub use self::streamdict::{TermDictionaryImpl, TermDictionaryBuilderImpl, TermStreamerImpl,
|
||||
TermStreamerBuilderImpl};
|
||||
pub use self::streamdict::{TermDictionaryBuilderImpl, TermDictionaryImpl, TermStreamerBuilderImpl,
|
||||
TermStreamerImpl};
|
||||
|
||||
mod merger;
|
||||
use std::io;
|
||||
|
||||
|
||||
/// Dictionary associating sorted `&[u8]` to values
|
||||
pub trait TermDictionary<'a>
|
||||
where
|
||||
@@ -90,6 +85,10 @@ where
|
||||
/// Opens a `TermDictionary` given a data source.
|
||||
fn from_source(source: ReadOnlySource) -> Self;
|
||||
|
||||
/// Returns the number of terms in the dictionary.
|
||||
/// Term ordinals range from 0 to `num_terms() - 1`.
|
||||
fn num_terms(&self) -> usize;
|
||||
|
||||
/// Returns the ordinal associated to a given term.
|
||||
fn term_ord<K: AsRef<[u8]>>(&self, term: K) -> Option<TermOrdinal>;
|
||||
|
||||
@@ -107,10 +106,6 @@ where
|
||||
/// Returns the number of terms in the dictionary.
|
||||
fn term_info_from_ord(&self, term_ord: TermOrdinal) -> TermInfo;
|
||||
|
||||
/// Returns the number of terms in the dictionary.
|
||||
/// Term ordinals range from 0 to `num_terms() - 1`.
|
||||
fn num_terms(&self) -> usize;
|
||||
|
||||
/// Lookups the value corresponding to the key.
|
||||
fn get<K: AsRef<[u8]>>(&self, target_key: K) -> Option<TermInfo>;
|
||||
|
||||
@@ -154,7 +149,6 @@ where
|
||||
fn finish(self) -> io::Result<W>;
|
||||
}
|
||||
|
||||
|
||||
/// `TermStreamer` acts as a cursor over a range of terms of a segment.
|
||||
/// Terms are guaranteed to be sorted.
|
||||
pub trait TermStreamer: Sized {
|
||||
@@ -202,7 +196,6 @@ pub trait TermStreamer: Sized {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// `TermStreamerBuilder` is an helper object used to define
|
||||
/// a range of terms that should be streamed.
|
||||
pub trait TermStreamerBuilder {
|
||||
@@ -226,13 +219,12 @@ pub trait TermStreamerBuilder {
|
||||
fn into_stream(self) -> Self::Streamer;
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{TermDictionaryImpl, TermDictionaryBuilderImpl, TermStreamerImpl};
|
||||
use directory::{RAMDirectory, Directory, ReadOnlySource};
|
||||
use super::{TermDictionaryBuilderImpl, TermDictionaryImpl, TermStreamerImpl};
|
||||
use directory::{Directory, RAMDirectory, ReadOnlySource};
|
||||
use std::path::PathBuf;
|
||||
use schema::{FieldType, SchemaBuilder, Document, TEXT};
|
||||
use schema::{Document, FieldType, SchemaBuilder, TEXT};
|
||||
use core::Index;
|
||||
use std::str;
|
||||
use termdict::TermStreamer;
|
||||
@@ -243,17 +235,15 @@ mod tests {
|
||||
|
||||
const BLOCK_SIZE: usize = 1_500;
|
||||
|
||||
|
||||
fn make_term_info(val: u64) -> TermInfo {
|
||||
TermInfo {
|
||||
doc_freq: val as u32,
|
||||
positions_offset: val * 2u64,
|
||||
positions_offset: val * 2u64,
|
||||
postings_offset: val * 3u64,
|
||||
positions_inner_offset: 5u8,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_term_ordinals() {
|
||||
const COUNTRIES: [&'static str; 7] = [
|
||||
@@ -263,15 +253,15 @@ mod tests {
|
||||
"Slovenia",
|
||||
"Spain",
|
||||
"Sweden",
|
||||
"Switzerland"
|
||||
"Switzerland",
|
||||
];
|
||||
let mut directory = RAMDirectory::create();
|
||||
let path = PathBuf::from("TermDictionary");
|
||||
{
|
||||
let write = directory.open_write(&path).unwrap();
|
||||
let field_type = FieldType::Str(TEXT);
|
||||
let mut term_dictionary_builder = TermDictionaryBuilderImpl::new(write, field_type)
|
||||
.unwrap();
|
||||
let mut term_dictionary_builder =
|
||||
TermDictionaryBuilderImpl::new(write, field_type).unwrap();
|
||||
for term in COUNTRIES.iter() {
|
||||
term_dictionary_builder
|
||||
.insert(term.as_bytes(), &make_term_info(0u64))
|
||||
@@ -283,7 +273,7 @@ mod tests {
|
||||
let term_dict: TermDictionaryImpl = TermDictionaryImpl::from_source(source);
|
||||
for (term_ord, term) in COUNTRIES.iter().enumerate() {
|
||||
assert_eq!(term_dict.term_ord(term).unwrap(), term_ord as u64);
|
||||
let mut bytes = vec!();
|
||||
let mut bytes = vec![];
|
||||
assert!(term_dict.ord_to_term(term_ord as u64, &mut bytes));
|
||||
assert_eq!(bytes, term.as_bytes());
|
||||
}
|
||||
@@ -296,8 +286,8 @@ mod tests {
|
||||
{
|
||||
let write = directory.open_write(&path).unwrap();
|
||||
let field_type = FieldType::Str(TEXT);
|
||||
let mut term_dictionary_builder = TermDictionaryBuilderImpl::new(write, field_type)
|
||||
.unwrap();
|
||||
let mut term_dictionary_builder =
|
||||
TermDictionaryBuilderImpl::new(write, field_type).unwrap();
|
||||
term_dictionary_builder
|
||||
.insert("abc".as_bytes(), &make_term_info(34u64))
|
||||
.unwrap();
|
||||
@@ -377,7 +367,6 @@ mod tests {
|
||||
assert_eq!(&*term_string, "abcdef");
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_term_dictionary_stream() {
|
||||
let ids: Vec<_> = (0u32..10_000u32)
|
||||
@@ -385,8 +374,8 @@ mod tests {
|
||||
.collect();
|
||||
let field_type = FieldType::Str(TEXT);
|
||||
let buffer: Vec<u8> = {
|
||||
let mut term_dictionary_builder = TermDictionaryBuilderImpl::new(vec![], field_type)
|
||||
.unwrap();
|
||||
let mut term_dictionary_builder =
|
||||
TermDictionaryBuilderImpl::new(vec![], field_type).unwrap();
|
||||
for &(ref id, ref i) in &ids {
|
||||
term_dictionary_builder
|
||||
.insert(id.as_bytes(), &make_term_info(*i as u64))
|
||||
@@ -411,13 +400,12 @@ mod tests {
|
||||
term_dictionary.get(key.as_bytes());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_stream_high_range_prefix_suffix() {
|
||||
let field_type = FieldType::Str(TEXT);
|
||||
let buffer: Vec<u8> = {
|
||||
let mut term_dictionary_builder = TermDictionaryBuilderImpl::new(vec![], field_type)
|
||||
.unwrap();
|
||||
let mut term_dictionary_builder =
|
||||
TermDictionaryBuilderImpl::new(vec![], field_type).unwrap();
|
||||
// term requires more than 16bits
|
||||
term_dictionary_builder
|
||||
.insert("abcdefghijklmnopqrstuvwxy", &make_term_info(1))
|
||||
@@ -451,8 +439,8 @@ mod tests {
|
||||
.collect();
|
||||
let field_type = FieldType::Str(TEXT);
|
||||
let buffer: Vec<u8> = {
|
||||
let mut term_dictionary_builder = TermDictionaryBuilderImpl::new(vec![], field_type)
|
||||
.unwrap();
|
||||
let mut term_dictionary_builder =
|
||||
TermDictionaryBuilderImpl::new(vec![], field_type).unwrap();
|
||||
for &(ref id, ref i) in &ids {
|
||||
term_dictionary_builder
|
||||
.insert(id.as_bytes(), &make_term_info(*i as u64))
|
||||
@@ -520,14 +508,15 @@ mod tests {
|
||||
fn test_empty_string() {
|
||||
let field_type = FieldType::Str(TEXT);
|
||||
let buffer: Vec<u8> = {
|
||||
let mut term_dictionary_builder = TermDictionaryBuilderImpl::new(vec![], field_type)
|
||||
let mut term_dictionary_builder =
|
||||
TermDictionaryBuilderImpl::new(vec![], field_type).unwrap();
|
||||
term_dictionary_builder
|
||||
.insert(&[], &make_term_info(1 as u64))
|
||||
.unwrap();
|
||||
term_dictionary_builder
|
||||
.insert(&[], &make_term_info(1 as u64)).unwrap();
|
||||
term_dictionary_builder
|
||||
.insert(&[1u8], &make_term_info(2 as u64)).unwrap();
|
||||
term_dictionary_builder
|
||||
.finish().unwrap()
|
||||
.insert(&[1u8], &make_term_info(2 as u64))
|
||||
.unwrap();
|
||||
term_dictionary_builder.finish().unwrap()
|
||||
};
|
||||
let source = ReadOnlySource::from(buffer);
|
||||
let term_dictionary: TermDictionaryImpl = TermDictionaryImpl::from_source(source);
|
||||
@@ -543,8 +532,8 @@ mod tests {
|
||||
fn test_stream_range_boundaries() {
|
||||
let field_type = FieldType::Str(TEXT);
|
||||
let buffer: Vec<u8> = {
|
||||
let mut term_dictionary_builder = TermDictionaryBuilderImpl::new(vec![], field_type)
|
||||
.unwrap();
|
||||
let mut term_dictionary_builder =
|
||||
TermDictionaryBuilderImpl::new(vec![], field_type).unwrap();
|
||||
for i in 0u8..10u8 {
|
||||
let number_arr = [i; 1];
|
||||
term_dictionary_builder
|
||||
|
||||
@@ -49,7 +49,6 @@ impl TermDeltaDecoder {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// code
|
||||
// first bit represents whether the prefix / suffix len can be encoded
|
||||
// on the same byte. (the next one)
|
||||
@@ -57,18 +56,17 @@ impl TermDeltaDecoder {
|
||||
|
||||
#[inline(always)]
|
||||
pub fn decode<'a>(&mut self, code: u8, mut cursor: &'a [u8]) -> &'a [u8] {
|
||||
let (prefix_len, suffix_len): (usize, usize) =
|
||||
if (code & 1u8) == 1u8 {
|
||||
let b = cursor[0];
|
||||
cursor = &cursor[1..];
|
||||
let prefix_len = (b & 15u8) as usize;
|
||||
let suffix_len = (b >> 4u8) as usize;
|
||||
(prefix_len, suffix_len)
|
||||
} else {
|
||||
let prefix_len = u32::deserialize(&mut cursor).unwrap();
|
||||
let suffix_len = u32::deserialize(&mut cursor).unwrap();
|
||||
(prefix_len as usize, suffix_len as usize)
|
||||
};
|
||||
let (prefix_len, suffix_len): (usize, usize) = if (code & 1u8) == 1u8 {
|
||||
let b = cursor[0];
|
||||
cursor = &cursor[1..];
|
||||
let prefix_len = (b & 15u8) as usize;
|
||||
let suffix_len = (b >> 4u8) as usize;
|
||||
(prefix_len, suffix_len)
|
||||
} else {
|
||||
let prefix_len = u32::deserialize(&mut cursor).unwrap();
|
||||
let suffix_len = u32::deserialize(&mut cursor).unwrap();
|
||||
(prefix_len as usize, suffix_len as usize)
|
||||
};
|
||||
unsafe { self.term.set_len(prefix_len) };
|
||||
self.term.extend_from_slice(&(*cursor)[..suffix_len]);
|
||||
&cursor[suffix_len..]
|
||||
|
||||
@@ -6,32 +6,28 @@ use super::{Token, TokenFilter, TokenStream};
|
||||
pub struct AlphaNumOnlyFilter;
|
||||
|
||||
pub struct AlphaNumOnlyFilterStream<TailTokenStream>
|
||||
where TailTokenStream: TokenStream
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
{
|
||||
tail: TailTokenStream,
|
||||
}
|
||||
|
||||
|
||||
impl<TailTokenStream> AlphaNumOnlyFilterStream<TailTokenStream>
|
||||
where TailTokenStream: TokenStream
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
{
|
||||
fn predicate(&self, token: &Token) -> bool {
|
||||
token.text.chars().all(|c| c.is_ascii_alphanumeric())
|
||||
}
|
||||
|
||||
fn wrap(
|
||||
tail: TailTokenStream,
|
||||
) -> AlphaNumOnlyFilterStream<TailTokenStream> {
|
||||
AlphaNumOnlyFilterStream {
|
||||
tail
|
||||
}
|
||||
fn wrap(tail: TailTokenStream) -> AlphaNumOnlyFilterStream<TailTokenStream> {
|
||||
AlphaNumOnlyFilterStream { tail }
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<TailTokenStream> TokenFilter<TailTokenStream> for AlphaNumOnlyFilter
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
{
|
||||
type ResultTokenStream = AlphaNumOnlyFilterStream<TailTokenStream>;
|
||||
|
||||
@@ -41,8 +37,8 @@ impl<TailTokenStream> TokenFilter<TailTokenStream> for AlphaNumOnlyFilter
|
||||
}
|
||||
|
||||
impl<TailTokenStream> TokenStream for AlphaNumOnlyFilterStream<TailTokenStream>
|
||||
where
|
||||
TailTokenStream: TokenStream
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
{
|
||||
fn token(&self) -> &Token {
|
||||
self.tail.token()
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
use super::{Token, Tokenizer, TokenStream};
|
||||
use super::{Token, TokenStream, Tokenizer};
|
||||
use std::str;
|
||||
use schema::FACET_SEP_BYTE;
|
||||
|
||||
|
||||
/// The `FacetTokenizer` process a `Facet` binary representation
|
||||
/// and emits a token for all of its parent.
|
||||
///
|
||||
@@ -39,27 +38,27 @@ impl<'a> Tokenizer<'a> for FacetTokenizer {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<'a> TokenStream for FacetTokenStream<'a> {
|
||||
fn advance(&mut self) -> bool {
|
||||
match self.state {
|
||||
State::RootFacetNotEmitted => {
|
||||
self.state =
|
||||
if self.text.is_empty() {
|
||||
State::Terminated
|
||||
} else {
|
||||
State::UpToPosition(0)
|
||||
};
|
||||
self.state = if self.text.is_empty() {
|
||||
State::Terminated
|
||||
} else {
|
||||
State::UpToPosition(0)
|
||||
};
|
||||
true
|
||||
}
|
||||
State::UpToPosition(cursor) => {
|
||||
let bytes: &[u8] = self.text.as_bytes();
|
||||
if let Some(next_sep_pos) = bytes[cursor+1..]
|
||||
if let Some(next_sep_pos) = bytes[cursor + 1..]
|
||||
.iter()
|
||||
.cloned()
|
||||
.position(|b| b == FACET_SEP_BYTE)
|
||||
.map(|pos| cursor + 1 + pos) {
|
||||
let facet_part = unsafe { str::from_utf8_unchecked(&bytes[cursor..next_sep_pos]) };
|
||||
.map(|pos| cursor + 1 + pos)
|
||||
{
|
||||
let facet_part =
|
||||
unsafe { str::from_utf8_unchecked(&bytes[cursor..next_sep_pos]) };
|
||||
self.token.text.push_str(facet_part);
|
||||
self.state = State::UpToPosition(next_sep_pos);
|
||||
} else {
|
||||
@@ -69,9 +68,7 @@ impl<'a> TokenStream for FacetTokenStream<'a> {
|
||||
}
|
||||
true
|
||||
}
|
||||
State::Terminated => {
|
||||
false
|
||||
}
|
||||
State::Terminated => false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,9 +84,10 @@ impl<'a> TokenStream for FacetTokenStream<'a> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use tokenizer::{TokenStream, Token, Tokenizer};
|
||||
use tokenizer::{Token, TokenStream, Tokenizer};
|
||||
use super::FacetTokenizer;
|
||||
use schema::Facet;
|
||||
use std::str;
|
||||
|
||||
#[test]
|
||||
fn test_facet_tokenizer() {
|
||||
@@ -101,7 +99,7 @@ mod tests {
|
||||
tokens.push(format!("{}", facet));
|
||||
};
|
||||
FacetTokenizer
|
||||
.token_stream(unsafe { ::std::str::from_utf8_unchecked(facet.encoded_bytes()) })
|
||||
.token_stream(unsafe { str::from_utf8_unchecked(facet.encoded_bytes()) })
|
||||
.process(&mut add_token);
|
||||
}
|
||||
assert_eq!(tokens.len(), 4);
|
||||
@@ -121,10 +119,10 @@ mod tests {
|
||||
tokens.push(format!("{}", facet));
|
||||
};
|
||||
FacetTokenizer
|
||||
.token_stream(unsafe { ::std::str::from_utf8_unchecked(facet.encoded_bytes()) })
|
||||
.token_stream(unsafe { str::from_utf8_unchecked(facet.encoded_bytes()) })
|
||||
.process(&mut add_token);
|
||||
}
|
||||
assert_eq!(tokens.len(), 1);
|
||||
assert_eq!(tokens[0], "/");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -253,7 +253,6 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Trait for the pluggable components of `Tokenizer`s.
|
||||
pub trait TokenFilter<TailTokenStream: TokenStream>: Clone {
|
||||
/// The resulting `TokenStream` type.
|
||||
|
||||
Reference in New Issue
Block a user