mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-08 18:12:55 +00:00
blop
This commit is contained in:
@@ -192,6 +192,14 @@ impl SegmentReader {
|
||||
}
|
||||
|
||||
|
||||
pub fn postings_data(&self, offset: usize) -> &[u8] {
|
||||
&self.postings_data[offset..]
|
||||
}
|
||||
|
||||
pub fn get_block_postings(&self) -> BlockSegmentPostings {
|
||||
BlockSegmentPostings::from_data(0, &self.postings_data[..], FreqHandler::new_without_freq())
|
||||
}
|
||||
|
||||
pub fn read_block_postings_from_terminfo(&self, term_info: &TermInfo, field_type: &FieldType) -> Option<BlockSegmentPostings> {
|
||||
let offset = term_info.postings_offset as usize;
|
||||
let postings_data = &self.postings_data[offset..];
|
||||
|
||||
@@ -85,6 +85,10 @@ impl Heap {
|
||||
pub fn get_mut_ref<Item>(&self, addr: u32) -> &mut Item {
|
||||
self.inner().get_mut_ref(addr)
|
||||
}
|
||||
|
||||
pub fn get_ref<Item>(&self, addr: u32) -> &Item {
|
||||
self.inner().get_mut_ref(addr)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -17,13 +17,8 @@ fn test_indexing() {
|
||||
|
||||
let mut schema_builder = SchemaBuilder::default();
|
||||
|
||||
<<<<<<< HEAD
|
||||
let id_field = schema_builder.add_u64_field("id", U64_INDEXED);
|
||||
let multiples_field = schema_builder.add_u64_field("multiples", U64_INDEXED);
|
||||
=======
|
||||
let id_field = schema_builder.add_u64_field("id", INT_INDEXED);
|
||||
let multiples_field = schema_builder.add_u64_field("multiples", INT_INDEXED);
|
||||
>>>>>>> master
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_from_tempdir(schema).unwrap();
|
||||
|
||||
@@ -98,8 +98,7 @@ impl<'a> SegmentWriter<'a> {
|
||||
/// Return true if the term dictionary hashmap is reaching capacity.
|
||||
/// It is one of the condition that triggers a `SegmentWriter` to
|
||||
/// be finalized.
|
||||
#[doc(hidden)]
|
||||
pub fn is_termdic_saturated(&self,) -> bool {
|
||||
pub(crate) fn is_termdic_saturated(&self,) -> bool {
|
||||
self.multifield_postings.is_termdic_saturated()
|
||||
}
|
||||
|
||||
@@ -195,9 +194,6 @@ fn write<'a>(
|
||||
mut serializer: SegmentSerializer) -> Result<()> {
|
||||
|
||||
try!(multifield_postings.serialize(serializer.get_postings_serializer()));
|
||||
// for per_field_postings_writer in per_field_postings_writers {
|
||||
// try!(per_field_postings_writer.serialize(serializer.get_postings_serializer(), heap));
|
||||
// }
|
||||
try!(fast_field_writers.serialize(serializer.get_fast_field_serializer()));
|
||||
try!(fieldnorms_writer.serialize(serializer.get_fieldnorms_serializer()));
|
||||
try!(serializer.close());
|
||||
|
||||
@@ -34,6 +34,16 @@ impl<'a> BlockSegmentPostings<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reset(&mut self, len: usize, data: &'a [u8]) {
|
||||
let num_binpacked_blocks: usize = (len as usize) / NUM_DOCS_PER_BLOCK;
|
||||
let num_vint_docs = (len as usize) - NUM_DOCS_PER_BLOCK * num_binpacked_blocks;
|
||||
self.num_binpacked_blocks = num_binpacked_blocks;
|
||||
self.num_vint_docs = num_vint_docs;
|
||||
self.remaining_data = data;
|
||||
self.doc_offset = 0;
|
||||
self.len = len;
|
||||
}
|
||||
|
||||
pub fn docs(&self) -> &[DocId] {
|
||||
self.block_decoder.output_array()
|
||||
}
|
||||
@@ -94,6 +104,7 @@ pub struct SegmentPostings<'a> {
|
||||
|
||||
impl<'a> SegmentPostings<'a> {
|
||||
|
||||
|
||||
/// Reads a Segment postings from an &[u8]
|
||||
///
|
||||
/// * `len` - number of document in the posting lists.
|
||||
|
||||
Reference in New Issue
Block a user