diff --git a/src/common/bitpacker.rs b/src/common/bitpacker.rs index 54d5ccc0d..ab3d59d18 100644 --- a/src/common/bitpacker.rs +++ b/src/common/bitpacker.rs @@ -44,7 +44,7 @@ impl BitPacker { BitPacker { mini_buffer: 0u64, mini_buffer_written: 0, - num_bits: num_bits, + num_bits, } } @@ -107,9 +107,9 @@ where (1u64 << num_bits) - 1u64 }; BitUnpacker { - num_bits: num_bits, - mask: mask, - data: data, + num_bits, + mask, + data, } } diff --git a/src/common/counting_writer.rs b/src/common/counting_writer.rs index d9ea877d2..2a6431b6a 100644 --- a/src/common/counting_writer.rs +++ b/src/common/counting_writer.rs @@ -10,7 +10,7 @@ pub struct CountingWriter { impl CountingWriter { pub fn wrap(underlying: W) -> CountingWriter { CountingWriter { - underlying: underlying, + underlying, written_bytes: 0, } } diff --git a/src/core/index.rs b/src/core/index.rs index 7e0f5f7dc..6bfcbae36 100644 --- a/src/core/index.rs +++ b/src/core/index.rs @@ -91,8 +91,8 @@ impl Index { fn create_from_metas(directory: ManagedDirectory, metas: IndexMeta) -> Result { let schema = metas.schema.clone(); let index = Index { - directory: directory, - schema: schema, + directory, + schema, searcher_pool: Arc::new(Pool::new()), tokenizers: TokenizerManager::default(), }; @@ -218,12 +218,10 @@ impl Index { /// published or after a merge. pub fn load_searchers(&self) -> Result<()> { let searchable_segments = self.searchable_segments()?; - let segment_readers: Vec = try!( - searchable_segments - .into_iter() - .map(SegmentReader::open) - .collect() - ); + let segment_readers: Vec = searchable_segments + .into_iter() + .map(SegmentReader::open) + .collect::>()?; let searchers = (0..NUM_SEARCHERS) .map(|_| Searcher::from(segment_readers.clone())) .collect(); diff --git a/src/core/index_meta.rs b/src/core/index_meta.rs index 6eafddf77..d40198f92 100644 --- a/src/core/index_meta.rs +++ b/src/core/index_meta.rs @@ -6,7 +6,7 @@ use core::SegmentMeta; /// This object is serialized on disk in the `meta.json` file. /// It keeps information about /// * the searchable segments, -/// * the index docstamp +/// * the index `docstamp` /// * the schema /// #[derive(Clone, Debug, Serialize, Deserialize)] @@ -20,7 +20,7 @@ impl IndexMeta { pub fn with_schema(schema: Schema) -> IndexMeta { IndexMeta { segments: vec![], - schema: schema, + schema, opstamp: 0u64, } } diff --git a/src/core/inverted_index_reader.rs b/src/core/inverted_index_reader.rs index d4af06c0e..7f828a4dd 100644 --- a/src/core/inverted_index_reader.rs +++ b/src/core/inverted_index_reader.rs @@ -42,10 +42,10 @@ impl InvertedIndexReader { InvertedIndexReader { termdict: TermDictionaryImpl::from_source(termdict_source), - postings_source: postings_source, - positions_source: positions_source, - delete_bitset: delete_bitset, - schema: schema, + postings_source, + positions_source, + delete_bitset, + schema, } } diff --git a/src/core/searcher.rs b/src/core/searcher.rs index 14f1cb141..b47cafd7a 100644 --- a/src/core/searcher.rs +++ b/src/core/searcher.rs @@ -87,7 +87,7 @@ pub struct FieldSearcher { impl FieldSearcher { fn new(inv_index_readers: Vec>) -> FieldSearcher { - FieldSearcher { inv_index_readers: inv_index_readers } + FieldSearcher { inv_index_readers } } @@ -104,7 +104,7 @@ impl FieldSearcher { impl From> for Searcher { fn from(segment_readers: Vec) -> Searcher { - Searcher { segment_readers: segment_readers } + Searcher { segment_readers } } } diff --git a/src/core/segment.rs b/src/core/segment.rs index c4e7e3442..f9bbc02a8 100644 --- a/src/core/segment.rs +++ b/src/core/segment.rs @@ -30,8 +30,8 @@ impl fmt::Debug for Segment { /// The function is here to make it private outside `tantivy`. pub fn create_segment(index: Index, meta: SegmentMeta) -> Segment { Segment { - index: index, - meta: meta, + index, + meta, } } @@ -86,7 +86,7 @@ impl Segment { component: SegmentComponent, ) -> result::Result { let path = self.relative_path(component); - let source = try!(self.index.directory().open_read(&path)); + let source = self.index.directory().open_read(&path)?; Ok(source) } @@ -96,7 +96,7 @@ impl Segment { component: SegmentComponent, ) -> result::Result { let path = self.relative_path(component); - let write = try!(self.index.directory_mut().open_write(&path)); + let write = self.index.directory_mut().open_write(&path)?; Ok(write) } } diff --git a/src/core/segment_meta.rs b/src/core/segment_meta.rs index 1abe95652..d313d116d 100644 --- a/src/core/segment_meta.rs +++ b/src/core/segment_meta.rs @@ -25,7 +25,7 @@ impl SegmentMeta { /// a segment with no deletes and no documents. pub fn new(segment_id: SegmentId) -> SegmentMeta { SegmentMeta { - segment_id: segment_id, + segment_id, max_doc: 0, deletes: None, } @@ -109,8 +109,8 @@ impl SegmentMeta { #[doc(hidden)] pub fn set_delete_meta(&mut self, num_deleted_docs: u32, opstamp: u64) { self.deletes = Some(DeleteMeta { - num_deleted_docs: num_deleted_docs, - opstamp: opstamp, + num_deleted_docs, + opstamp, }); } } diff --git a/src/core/segment_reader.rs b/src/core/segment_reader.rs index 6e15146bd..8cd59f271 100644 --- a/src/core/segment_reader.rs +++ b/src/core/segment_reader.rs @@ -158,15 +158,15 @@ impl SegmentReader { Ok(SegmentReader { inv_idx_reader_cache: Arc::new(RwLock::new(HashMap::new())), segment_meta: segment.meta().clone(), - termdict_composite: termdict_composite, - postings_composite: postings_composite, - fast_fields_composite: fast_fields_composite, - fieldnorms_composite: fieldnorms_composite, + termdict_composite, + postings_composite, + fast_fields_composite, + fieldnorms_composite, segment_id: segment.id(), - store_reader: store_reader, - delete_bitset: delete_bitset, - positions_composite: positions_composite, - schema: schema, + store_reader, + delete_bitset, + positions_composite, + schema, }) } diff --git a/src/datastruct/skip/skiplist.rs b/src/datastruct/skip/skiplist.rs index 3cdfab759..aa5f4efd7 100644 --- a/src/datastruct/skip/skiplist.rs +++ b/src/datastruct/skip/skiplist.rs @@ -32,9 +32,9 @@ impl<'a, T: BinarySerializable> From<&'a [u8]> for Layer<'a, T> { let mut cursor = data; let next_id = u32::deserialize(&mut cursor).unwrap_or(u32::max_value()); Layer { - data: data, - cursor: cursor, - next_id: next_id, + data, + cursor, + next_id, _phantom_: PhantomData, } } @@ -123,8 +123,8 @@ impl<'a, T: BinarySerializable> From<&'a [u8]> for SkipList<'a, T> { .map(|(start, stop)| Layer::from(&layers_data[start..stop])) .collect(); SkipList { - skip_layers: skip_layers, - data_layer: data_layer, + skip_layers, + data_layer, } } } diff --git a/src/datastruct/skip/skiplist_builder.rs b/src/datastruct/skip/skiplist_builder.rs index af665ab3c..2782cd1c9 100644 --- a/src/datastruct/skip/skiplist_builder.rs +++ b/src/datastruct/skip/skiplist_builder.rs @@ -24,7 +24,7 @@ impl LayerBuilder { fn with_period(period: usize) -> LayerBuilder { LayerBuilder { - period: period, + period, buffer: Vec::new(), remaining: period, len: 0, @@ -58,7 +58,7 @@ pub struct SkipListBuilder { impl SkipListBuilder { pub fn new(period: usize) -> SkipListBuilder { SkipListBuilder { - period: period, + period, data_layer: LayerBuilder::with_period(period), skip_layers: Vec::new(), } @@ -74,14 +74,14 @@ impl SkipListBuilder { pub fn insert(&mut self, doc_id: DocId, dest: &T) -> io::Result<()> { let mut layer_id = 0; - let mut skip_pointer = try!(self.data_layer.insert(doc_id, dest)); + let mut skip_pointer = self.data_layer.insert(doc_id, dest)?; loop { skip_pointer = match skip_pointer { Some((skip_doc_id, skip_offset)) => { - try!(self.get_skip_layer(layer_id).insert( + self.get_skip_layer(layer_id).insert( skip_doc_id, &skip_offset, - )) + )? } None => { return Ok(()); diff --git a/src/datastruct/stacker/expull.rs b/src/datastruct/stacker/expull.rs index a6bd49097..3a98aa6da 100644 --- a/src/datastruct/stacker/expull.rs +++ b/src/datastruct/stacker/expull.rs @@ -26,7 +26,7 @@ pub struct ExpUnrolledLinkedList { impl ExpUnrolledLinkedList { pub fn iter<'a>(&self, addr: u32, heap: &'a Heap) -> ExpUnrolledLinkedListIterator<'a> { ExpUnrolledLinkedListIterator { - heap: heap, + heap, addr: addr + 2u32 * (mem::size_of::() as u32), len: self.len, consumed: 0, diff --git a/src/datastruct/stacker/hashmap.rs b/src/datastruct/stacker/hashmap.rs index c9054dff2..7254c8ae7 100644 --- a/src/datastruct/stacker/hashmap.rs +++ b/src/datastruct/stacker/hashmap.rs @@ -129,9 +129,9 @@ struct QuadraticProbing { impl QuadraticProbing { fn compute(hash: usize, mask: usize) -> QuadraticProbing { QuadraticProbing { - hash: hash, + hash, i: 0, - mask: mask, + mask, } } @@ -149,7 +149,7 @@ impl<'a> HashMap<'a> { let table: Vec = iter::repeat(KeyValue::default()).take(table_size).collect(); HashMap { table: table.into_boxed_slice(), - heap: heap, + heap, mask: table_size - 1, occupied: Vec::with_capacity(table_size / 2), } @@ -174,7 +174,7 @@ impl<'a> HashMap<'a> { self.occupied.push(bucket); self.table[bucket] = KeyValue { key_value_addr: key_bytes_ref, - hash: hash, + hash, }; } diff --git a/src/datastruct/stacker/heap.rs b/src/datastruct/stacker/heap.rs index 0bfd01fc2..df7ed2026 100644 --- a/src/datastruct/stacker/heap.rs +++ b/src/datastruct/stacker/heap.rs @@ -115,7 +115,7 @@ impl InnerHeap { pub fn with_capacity(num_bytes: usize) -> InnerHeap { let buffer: Vec = vec![0u8; num_bytes]; InnerHeap { - buffer: buffer, + buffer, buffer_len: num_bytes as u32, next_heap: None, used: 0u32, diff --git a/src/directory/error.rs b/src/directory/error.rs index 73424f2e0..1f9c2b835 100644 --- a/src/directory/error.rs +++ b/src/directory/error.rs @@ -33,7 +33,7 @@ impl IOError { pub(crate) fn with_path(path: PathBuf, err: io::Error) -> Self { IOError { path: Some(path), - err: err, + err, } } } @@ -42,7 +42,7 @@ impl From for IOError { fn from(err: io::Error) -> IOError { IOError { path: None, - err: err, + err, } } } diff --git a/src/directory/mmap_directory.rs b/src/directory/mmap_directory.rs index 74edfc51a..d79ee3244 100644 --- a/src/directory/mmap_directory.rs +++ b/src/directory/mmap_directory.rs @@ -166,7 +166,7 @@ impl MmapDirectory { /// This is mostly useful to test the MmapDirectory itself. /// For your unit tests, prefer the RAMDirectory. pub fn create_from_tempdir() -> io::Result { - let tempdir = try!(TempDir::new("index")); + let tempdir = TempDir::new("index")?; let tempdir_path = PathBuf::from(tempdir.path()); let directory = MmapDirectory { root_path: PathBuf::from(tempdir_path), @@ -227,8 +227,8 @@ impl MmapDirectory { ); } - let fd = try!(open_opts.open(&self.root_path)); - try!(fd.sync_all()); + let fd = open_opts.open(&self.root_path)?; + fd.sync_all()?; Ok(()) } /// Returns some statistical information @@ -260,7 +260,7 @@ impl Write for SafeFileWriter { } fn flush(&mut self) -> io::Result<()> { - try!(self.0.flush()); + self.0.flush()?; self.0.sync_all() } } @@ -387,7 +387,7 @@ impl Directory for MmapDirectory { debug!("Atomic Write {:?}", path); let full_path = self.resolve_path(path); let meta_file = atomicwrites::AtomicFile::new(full_path, atomicwrites::AllowOverwrite); - try!(meta_file.write(|f| f.write_all(data))); + meta_file.write(|f| f.write_all(data))?; Ok(()) } diff --git a/src/directory/ram_directory.rs b/src/directory/ram_directory.rs index ca23bc07c..7d2a38375 100644 --- a/src/directory/ram_directory.rs +++ b/src/directory/ram_directory.rs @@ -32,7 +32,7 @@ impl VecWriter { VecWriter { path: path_buf, data: Cursor::new(Vec::new()), - shared_directory: shared_directory, + shared_directory, is_flushed: true, } } @@ -58,16 +58,16 @@ impl Seek for VecWriter { impl Write for VecWriter { fn write(&mut self, buf: &[u8]) -> io::Result { self.is_flushed = false; - try!(self.data.write_all(buf)); + self.data.write_all(buf)?; Ok(buf.len()) } fn flush(&mut self) -> io::Result<()> { self.is_flushed = true; - try!(self.shared_directory.write( + self.shared_directory.write( self.path.clone(), self.data.get_ref(), - )); + )?; Ok(()) } } @@ -83,12 +83,13 @@ impl InnerDirectory { } fn write(&self, path: PathBuf, data: &[u8]) -> io::Result { - let mut map = try!(self.0.write().map_err(|_| { - make_io_err(format!( - "Failed to lock the directory, when trying to write {:?}", - path - )) - })); + let mut map = self.0.write() + .map_err(|_| { + make_io_err(format!( + "Failed to lock the directory, when trying to write {:?}", + path + )) + })?; let prev_value = map.insert(path, Arc::new(Vec::from(data))); Ok(prev_value.is_some()) } @@ -205,9 +206,9 @@ impl Directory for RAMDirectory { fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> { let path_buf = PathBuf::from(path); let mut vec_writer = VecWriter::new(path_buf.clone(), self.fs.clone()); - try!(self.fs.write(path_buf, &Vec::new())); - try!(vec_writer.write_all(data)); - try!(vec_writer.flush()); + self.fs.write(path_buf, &Vec::new())?; + vec_writer.write_all(data)?; + vec_writer.flush()?; Ok(()) } diff --git a/src/directory/shared_vec_slice.rs b/src/directory/shared_vec_slice.rs index 289d74721..52bb5aa86 100644 --- a/src/directory/shared_vec_slice.rs +++ b/src/directory/shared_vec_slice.rs @@ -16,7 +16,7 @@ impl SharedVecSlice { pub fn new(data: Arc>) -> SharedVecSlice { let data_len = data.len(); SharedVecSlice { - data: data, + data, start: 0, len: data_len, } diff --git a/src/fastfield/delete.rs b/src/fastfield/delete.rs index d5ab7cbc9..92a974620 100644 --- a/src/fastfield/delete.rs +++ b/src/fastfield/delete.rs @@ -47,7 +47,7 @@ impl DeleteBitSet { .map(|b| b.count_ones() as usize) .sum(); DeleteBitSet { - data: data, + data, len: num_deleted, } } diff --git a/src/fastfield/mod.rs b/src/fastfield/mod.rs index 0dc7f9875..175970eb2 100644 --- a/src/fastfield/mod.rs +++ b/src/fastfield/mod.rs @@ -9,14 +9,14 @@ It is designed for the fast random access of some document fields given a document id. `FastField` are useful when a field is required for all or most of -the `DocSet` : for instance for scoring, grouping, filtering, or facetting. +the `DocSet` : for instance for scoring, grouping, filtering, or faceting. Fields have to be declared as `FAST` in the schema. Currently only 64-bits integers (signed or unsigned) are supported. -They are stored in a bitpacked fashion so that their +They are stored in a bit-packed fashion so that their memory usage is directly linear with the amplitude of the values stored. diff --git a/src/fastfield/serializer.rs b/src/fastfield/serializer.rs index d26366de0..659f782a9 100644 --- a/src/fastfield/serializer.rs +++ b/src/fastfield/serializer.rs @@ -35,7 +35,7 @@ impl FastFieldSerializer { pub fn from_write(write: WritePtr) -> io::Result { // just making room for the pointer to header. let composite_write = CompositeWrite::wrap(write); - Ok(FastFieldSerializer { composite_write: composite_write }) + Ok(FastFieldSerializer { composite_write }) } /// Start serializing a new u64 fast field @@ -76,9 +76,9 @@ impl<'a, W: Write> FastSingleFieldSerializer<'a, W> { let num_bits = compute_num_bits(amplitude); let bit_packer = BitPacker::new(num_bits as usize); Ok(FastSingleFieldSerializer { - write: write, - bit_packer: bit_packer, - min_value: min_value, + write, + bit_packer, + min_value, }) } diff --git a/src/fastfield/writer.rs b/src/fastfield/writer.rs index 1750f90ca..20d9f1efa 100644 --- a/src/fastfield/writer.rs +++ b/src/fastfield/writer.rs @@ -43,7 +43,7 @@ impl FastFieldsWriter { } }) .collect(); - FastFieldsWriter { field_writers: field_writers } + FastFieldsWriter { field_writers } } /// Returns a `FastFieldsWriter` @@ -119,7 +119,7 @@ impl IntFastFieldWriter { /// Creates a new `IntFastFieldWriter` pub fn new(field: Field) -> IntFastFieldWriter { IntFastFieldWriter { - field: field, + field, vals: Vec::new(), val_count: 0, val_if_missing: 0u64, diff --git a/src/indexer/delete_queue.rs b/src/indexer/delete_queue.rs index da09c49c9..3a940616e 100644 --- a/src/indexer/delete_queue.rs +++ b/src/indexer/delete_queue.rs @@ -16,7 +16,7 @@ use std::ops::DerefMut; // - calling `delete_queue.cursor()` returns a cursor, that // will include all future delete operation (and no past operations). // - cloning an existing cursor returns a new cursor, that -// is at the exact same position, and can now advance independantly +// is at the exact same position, and can now advance independently // from the original cursor. #[derive(Default)] struct InnerDeleteQueue { diff --git a/src/indexer/directory_lock.rs b/src/indexer/directory_lock.rs index ce63b7643..76930289e 100644 --- a/src/indexer/directory_lock.rs +++ b/src/indexer/directory_lock.rs @@ -15,8 +15,8 @@ pub struct DirectoryLock { impl DirectoryLock { pub fn lock(mut directory: Box) -> Result { - try!(directory.open_write(&*LOCKFILE_FILEPATH)); - Ok(DirectoryLock { directory: directory }) + directory.open_write(&*LOCKFILE_FILEPATH)?; + Ok(DirectoryLock { directory }) } } diff --git a/src/indexer/index_writer.rs b/src/indexer/index_writer.rs index 22f39cac6..93f522fb9 100644 --- a/src/indexer/index_writer.rs +++ b/src/indexer/index_writer.rs @@ -133,21 +133,21 @@ pub fn open_index_writer( let mut index_writer = IndexWriter { _directory_lock: directory_lock, - heap_size_in_bytes_per_thread: heap_size_in_bytes_per_thread, + heap_size_in_bytes_per_thread, index: index.clone(), - document_receiver: document_receiver, - document_sender: document_sender, + document_receiver, + document_sender, - segment_updater: segment_updater, + segment_updater, workers_join_handle: vec![], - num_threads: num_threads, + num_threads, - delete_queue: delete_queue, + delete_queue, committed_opstamp: current_opstamp, - stamper: stamper, + stamper, generation: 0, @@ -272,7 +272,7 @@ fn index_documents( let segment_id = segment.id(); let mut segment_writer = SegmentWriter::for_segment(heap, table_size, segment.clone(), schema)?; for doc in document_iterator { - try!(segment_writer.add_document(&doc, schema)); + segment_writer.add_document(&doc, schema)?; // There is two possible conditions to close the segment. // One is the memory arena dedicated to the segment is // getting full. @@ -451,7 +451,7 @@ impl IndexWriter { fn start_workers(&mut self) -> Result<()> { for _ in 0..self.num_threads { - try!(self.add_indexing_worker()); + self.add_indexing_worker()?; } Ok(()) } @@ -584,8 +584,8 @@ impl IndexWriter { pub fn delete_term(&mut self, term: Term) -> u64 { let opstamp = self.stamper.stamp(); let delete_operation = DeleteOperation { - opstamp: opstamp, - term: term, + opstamp, + term, }; self.delete_queue.push(delete_operation); opstamp @@ -615,8 +615,8 @@ impl IndexWriter { pub fn add_document(&mut self, document: Document) -> u64 { let opstamp = self.stamper.stamp(); let add_operation = AddOperation { - opstamp: opstamp, - document: document, + opstamp, + document, }; self.document_sender.send(add_operation); opstamp diff --git a/src/indexer/segment_entry.rs b/src/indexer/segment_entry.rs index 9e8ad74a5..e256c5e47 100644 --- a/src/indexer/segment_entry.rs +++ b/src/indexer/segment_entry.rs @@ -52,8 +52,8 @@ impl SegmentEntry { SegmentEntry { meta: segment_meta, state: SegmentState::Ready, - delete_bitset: delete_bitset, - delete_cursor: delete_cursor, + delete_bitset, + delete_cursor, } } diff --git a/src/indexer/segment_register.rs b/src/indexer/segment_register.rs index 97be73c85..422795f68 100644 --- a/src/indexer/segment_register.rs +++ b/src/indexer/segment_register.rs @@ -22,16 +22,16 @@ pub struct SegmentRegister { impl Debug for SegmentRegister { fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> { - try!(write!(f, "SegmentRegister(")); + write!(f, "SegmentRegister(")?; for (k, v) in &self.segment_states { - try!(write!( + write!( f, "{}:{}, ", k.short_uuid_string(), v.state().letter_code() - )); + )?; } - try!(write!(f, ")")); + write!(f, ")")?; Ok(()) } } @@ -118,7 +118,7 @@ impl SegmentRegister { let segment_entry = SegmentEntry::new(segment_meta, delete_cursor.clone(), None); segment_states.insert(segment_id, segment_entry); } - SegmentRegister { segment_states: segment_states } + SegmentRegister { segment_states } } } diff --git a/src/indexer/segment_serializer.rs b/src/indexer/segment_serializer.rs index c2aa4bcae..dd836b835 100644 --- a/src/indexer/segment_serializer.rs +++ b/src/indexer/segment_serializer.rs @@ -18,20 +18,20 @@ pub struct SegmentSerializer { impl SegmentSerializer { /// Creates a new `SegmentSerializer`. pub fn for_segment(segment: &mut Segment) -> Result { - let store_write = try!(segment.open_write(SegmentComponent::STORE)); + let store_write = segment.open_write(SegmentComponent::STORE)?; - let fast_field_write = try!(segment.open_write(SegmentComponent::FASTFIELDS)); - let fast_field_serializer = try!(FastFieldSerializer::from_write(fast_field_write)); + let fast_field_write = segment.open_write(SegmentComponent::FASTFIELDS)?; + let fast_field_serializer = FastFieldSerializer::from_write(fast_field_write)?; - let fieldnorms_write = try!(segment.open_write(SegmentComponent::FIELDNORMS)); - let fieldnorms_serializer = try!(FastFieldSerializer::from_write(fieldnorms_write)); + let fieldnorms_write = segment.open_write(SegmentComponent::FIELDNORMS)?; + let fieldnorms_serializer = FastFieldSerializer::from_write(fieldnorms_write)?; - let postings_serializer = try!(InvertedIndexSerializer::open(segment)); + let postings_serializer = InvertedIndexSerializer::open(segment)?; Ok(SegmentSerializer { - postings_serializer: postings_serializer, + postings_serializer, store_writer: StoreWriter::new(store_write), - fast_field_serializer: fast_field_serializer, - fieldnorms_serializer: fieldnorms_serializer, + fast_field_serializer, + fieldnorms_serializer, }) } diff --git a/src/indexer/segment_updater.rs b/src/indexer/segment_updater.rs index db7add226..ab48d7ed8 100644 --- a/src/indexer/segment_updater.rs +++ b/src/indexer/segment_updater.rs @@ -69,12 +69,12 @@ pub fn save_metas( ) -> Result<()> { let metas = IndexMeta { segments: segment_metas, - schema: schema, - opstamp: opstamp, + schema, + opstamp, }; - let mut w = try!(serde_json::to_vec_pretty(&metas)); - try!(write!(&mut w, "\n")); - directory.atomic_write(&META_FILEPATH, &w[..])?; + let mut buffer = serde_json::to_vec_pretty(&metas)?; + write!(&mut buffer, "\n")?; + directory.atomic_write(&META_FILEPATH, &buffer[..])?; debug!("Saved metas {:?}", serde_json::to_string_pretty(&metas)); Ok(()) } @@ -177,14 +177,14 @@ impl SegmentUpdater { let segment_manager = SegmentManager::from_segments(segments, delete_cursor); Ok(SegmentUpdater(Arc::new(InnerSegmentUpdater { pool: CpuPool::new(1), - index: index, - segment_manager: segment_manager, + index, + segment_manager, merge_policy: RwLock::new(box DefaultMergePolicy::default()), merging_thread_id: AtomicUsize::default(), merging_threads: RwLock::new(HashMap::new()), generation: AtomicUsize::default(), killed: AtomicBool::new(false), - stamper: stamper, + stamper, }))) } diff --git a/src/indexer/segment_writer.rs b/src/indexer/segment_writer.rs index 09258c88c..5a029a5b0 100644 --- a/src/indexer/segment_writer.rs +++ b/src/indexer/segment_writer.rs @@ -82,14 +82,14 @@ impl<'a> SegmentWriter<'a> { }) .collect(); Ok(SegmentWriter { - heap: heap, + heap, max_doc: 0, - multifield_postings: multifield_postings, + multifield_postings, fieldnorms_writer: create_fieldnorms_writer(schema), - segment_serializer: segment_serializer, + segment_serializer, fast_field_writers: FastFieldsWriter::from_schema(schema), doc_opstamps: Vec::with_capacity(1_000), - tokenizers: tokenizers, + tokenizers, }) } diff --git a/src/postings/intersection.rs b/src/postings/intersection.rs index 06bc0b94e..041912fe2 100644 --- a/src/postings/intersection.rs +++ b/src/postings/intersection.rs @@ -14,7 +14,7 @@ impl From> for IntersectionDocSet { assert!(docsets.len() >= 2); docsets.sort_by_key(|docset| docset.size_hint()); IntersectionDocSet { - docsets: docsets, + docsets, finished: false, doc: 0u32, } diff --git a/src/postings/postings_writer.rs b/src/postings/postings_writer.rs index 70d797e1c..2ebe65519 100644 --- a/src/postings/postings_writer.rs +++ b/src/postings/postings_writer.rs @@ -62,9 +62,9 @@ impl<'a> MultiFieldPostingsWriter<'a> { .collect(); MultiFieldPostingsWriter { - heap: heap, - term_index: term_index, - per_field_postings_writers: per_field_postings_writers, + heap, + term_index, + per_field_postings_writers, } } @@ -185,7 +185,7 @@ impl<'a, Rec: Recorder + 'static> SpecializedPostingsWriter<'a, Rec> { /// constructor pub fn new(heap: &'a Heap) -> SpecializedPostingsWriter<'a, Rec> { SpecializedPostingsWriter { - heap: heap, + heap, _recorder_type: PhantomData, } } diff --git a/src/postings/segment_postings.rs b/src/postings/segment_postings.rs index cdf2451dd..e1dcccf45 100644 --- a/src/postings/segment_postings.rs +++ b/src/postings/segment_postings.rs @@ -27,7 +27,7 @@ impl PositionComputer { PositionComputer { position_to_skip: None, positions: vec![], - positions_stream: positions_stream, + positions_stream, } } @@ -299,7 +299,7 @@ pub struct BlockSegmentPostings { doc_freq: usize, doc_offset: DocId, - num_binpacked_blocks: usize, + num_bitpacked_blocks: usize, num_vint_docs: usize, remaining_data: SourceRead, } @@ -310,20 +310,17 @@ impl BlockSegmentPostings { data: SourceRead, has_freq: bool, ) -> BlockSegmentPostings { - let num_binpacked_blocks: usize = (doc_freq as usize) / COMPRESSION_BLOCK_SIZE; - let num_vint_docs = (doc_freq as usize) - COMPRESSION_BLOCK_SIZE * num_binpacked_blocks; + let num_bitpacked_blocks: usize = (doc_freq as usize) / COMPRESSION_BLOCK_SIZE; + let num_vint_docs = (doc_freq as usize) - COMPRESSION_BLOCK_SIZE * num_bitpacked_blocks; BlockSegmentPostings { - num_binpacked_blocks: num_binpacked_blocks, - num_vint_docs: num_vint_docs, - + num_bitpacked_blocks, + num_vint_docs, doc_decoder: BlockDecoder::new(), freq_decoder: BlockDecoder::with_val(1), - - has_freq: has_freq, - + has_freq, remaining_data: data, doc_offset: 0, - doc_freq: doc_freq, + doc_freq, } } @@ -340,7 +337,7 @@ impl BlockSegmentPostings { pub(crate) fn reset(&mut self, doc_freq: usize, postings_data: SourceRead) { let num_binpacked_blocks: usize = doc_freq / COMPRESSION_BLOCK_SIZE; let num_vint_docs = doc_freq & (COMPRESSION_BLOCK_SIZE - 1); - self.num_binpacked_blocks = num_binpacked_blocks; + self.num_bitpacked_blocks = num_binpacked_blocks; self.num_vint_docs = num_vint_docs; self.remaining_data = postings_data; self.doc_offset = 0; @@ -396,7 +393,7 @@ impl BlockSegmentPostings { /// /// Returns false iff there was no remaining blocks. pub fn advance(&mut self) -> bool { - if self.num_binpacked_blocks > 0 { + if self.num_bitpacked_blocks > 0 { let num_consumed_bytes = self.doc_decoder.uncompress_block_sorted( self.remaining_data.as_ref(), self.doc_offset, @@ -411,7 +408,7 @@ impl BlockSegmentPostings { } // it will be used as the next offset. self.doc_offset = self.doc_decoder.output(COMPRESSION_BLOCK_SIZE - 1); - self.num_binpacked_blocks -= 1; + self.num_bitpacked_blocks -= 1; true } else if self.num_vint_docs > 0 { let num_compressed_bytes = self.doc_decoder.uncompress_vint_sorted( @@ -436,7 +433,7 @@ impl BlockSegmentPostings { /// Returns an empty segment postings object pub fn empty() -> BlockSegmentPostings { BlockSegmentPostings { - num_binpacked_blocks: 0, + num_bitpacked_blocks: 0, num_vint_docs: 0, doc_decoder: BlockDecoder::new(), @@ -554,7 +551,7 @@ mod tests { ); } assert!(block_segments.advance()); - assert!(block_segments.docs() == &[0, 2, 4]); + assert_eq!(block_segments.docs(), &[0, 2, 4]); { let term = Term::from_field_u64(int_field, 1u64); let inverted_index = segment_reader.inverted_index(int_field); @@ -562,6 +559,6 @@ mod tests { inverted_index.reset_block_postings_from_terminfo(&term_info, &mut block_segments); } assert!(block_segments.advance()); - assert!(block_segments.docs() == &[1, 3, 5]); + assert_eq!(block_segments.docs(), &[1, 3, 5]); } } diff --git a/src/postings/serializer.rs b/src/postings/serializer.rs index a1a257ca3..b9a42d809 100644 --- a/src/postings/serializer.rs +++ b/src/postings/serializer.rs @@ -64,10 +64,10 @@ impl InvertedIndexSerializer { schema: Schema, ) -> Result { Ok(InvertedIndexSerializer { - terms_write: terms_write, - postings_write: postings_write, - positions_write: positions_write, - schema: schema, + terms_write, + postings_write, + positions_write, + schema, }) } @@ -153,9 +153,9 @@ impl<'a> FieldSerializer<'a> { }; Ok(FieldSerializer { - term_dictionary_builder: term_dictionary_builder, - postings_serializer: postings_serializer, - positions_serializer_opt: positions_serializer_opt, + term_dictionary_builder, + postings_serializer, + positions_serializer_opt, current_term_info: TermInfo::default(), term_open: false, }) @@ -262,7 +262,7 @@ impl PostingsSerializer { term_freqs: vec![], last_doc_id_encoded: 0u32, - termfreq_enabled: termfreq_enabled, + termfreq_enabled, } } diff --git a/src/postings/term_info.rs b/src/postings/term_info.rs index 375f73202..519641283 100644 --- a/src/postings/term_info.rs +++ b/src/postings/term_info.rs @@ -39,10 +39,10 @@ impl BinarySerializable for TermInfo { let positions_offset = u32::deserialize(reader)?; let positions_inner_offset = u8::deserialize(reader)?; Ok(TermInfo { - doc_freq: doc_freq, - postings_offset: postings_offset, - positions_offset: positions_offset, - positions_inner_offset: positions_inner_offset, + doc_freq, + postings_offset, + positions_offset, + positions_inner_offset, }) } } diff --git a/src/postings/vec_postings.rs b/src/postings/vec_postings.rs index 8c9512fb1..b9419dd94 100644 --- a/src/postings/vec_postings.rs +++ b/src/postings/vec_postings.rs @@ -19,7 +19,7 @@ pub struct VecPostings { impl From> for VecPostings { fn from(doc_ids: Vec) -> VecPostings { VecPostings { - doc_ids: doc_ids, + doc_ids, cursor: Wrapping(usize::max_value()), } } diff --git a/src/query/boolean_query/boolean_query.rs b/src/query/boolean_query/boolean_query.rs index 520cb18ed..af4418d4e 100644 --- a/src/query/boolean_query/boolean_query.rs +++ b/src/query/boolean_query/boolean_query.rs @@ -27,7 +27,7 @@ pub struct BooleanQuery { impl From)>> for BooleanQuery { fn from(subqueries: Vec<(Occur, Box)>) -> BooleanQuery { - BooleanQuery { subqueries: subqueries } + BooleanQuery { subqueries } } } @@ -37,12 +37,10 @@ impl Query for BooleanQuery { } fn weight(&self, searcher: &Searcher) -> Result> { - let sub_weights = try!( - self.subqueries - .iter() - .map(|&(ref _occur, ref subquery)| subquery.weight(searcher)) - .collect() - ); + let sub_weights = self.subqueries + .iter() + .map(|&(ref _occur, ref subquery)| subquery.weight(searcher)) + .collect::>()?; let occurs: Vec = self.subqueries .iter() .map(|&(ref occur, ref _subquery)| *occur) diff --git a/src/query/boolean_query/boolean_scorer.rs b/src/query/boolean_query/boolean_scorer.rs index 723e4a92d..9c84d04da 100644 --- a/src/query/boolean_query/boolean_scorer.rs +++ b/src/query/boolean_query/boolean_scorer.rs @@ -56,7 +56,7 @@ impl BooleanScorer { .enumerate() .map(|(ord, doc)| { HeapItem { - doc: doc, + doc, ord: ord as u32, } }) @@ -65,8 +65,8 @@ impl BooleanScorer { scorers: non_empty_scorers, queue: BinaryHeap::from(heap_items), doc: 0u32, - score_combiner: score_combiner, - occur_filter: occur_filter, + score_combiner, + occur_filter, } } diff --git a/src/query/boolean_query/boolean_weight.rs b/src/query/boolean_query/boolean_weight.rs index 0ff49cbde..6be88a23d 100644 --- a/src/query/boolean_query/boolean_weight.rs +++ b/src/query/boolean_query/boolean_weight.rs @@ -13,8 +13,8 @@ pub struct BooleanWeight { impl BooleanWeight { pub fn new(weights: Vec>, occur_filter: OccurFilter) -> BooleanWeight { BooleanWeight { - weights: weights, - occur_filter: occur_filter, + weights, + occur_filter, } } } @@ -22,12 +22,10 @@ impl BooleanWeight { impl Weight for BooleanWeight { fn scorer<'a>(&'a self, reader: &'a SegmentReader) -> Result> { - let sub_scorers: Vec> = try!( - self.weights - .iter() - .map(|weight| weight.scorer(reader)) - .collect() - ); + let sub_scorers: Vec> = self.weights + .iter() + .map(|weight| weight.scorer(reader)) + .collect::>()?; let boolean_scorer = BooleanScorer::new(sub_scorers, self.occur_filter); Ok(box boolean_scorer) } diff --git a/src/query/boolean_query/score_combiner.rs b/src/query/boolean_query/score_combiner.rs index c3d3e648b..f6d9c8944 100644 --- a/src/query/boolean_query/score_combiner.rs +++ b/src/query/boolean_query/score_combiner.rs @@ -37,7 +37,7 @@ impl ScoreCombiner { impl From> for ScoreCombiner { fn from(coords: Vec) -> ScoreCombiner { ScoreCombiner { - coords: coords, + coords, num_fields: 0, score: 0f32, } diff --git a/src/query/occur_filter.rs b/src/query/occur_filter.rs index ca80e0c1f..55303e6c0 100644 --- a/src/query/occur_filter.rs +++ b/src/query/occur_filter.rs @@ -36,8 +36,8 @@ impl OccurFilter { } } OccurFilter { - and_mask: and_mask, - result: result, + and_mask, + result, } } } diff --git a/src/query/phrase_query/phrase_query.rs b/src/query/phrase_query/phrase_query.rs index 3c9e4b597..fca742191 100644 --- a/src/query/phrase_query/phrase_query.rs +++ b/src/query/phrase_query/phrase_query.rs @@ -45,6 +45,6 @@ impl Query for PhraseQuery { impl From> for PhraseQuery { fn from(phrase_terms: Vec) -> PhraseQuery { assert!(phrase_terms.len() > 1); - PhraseQuery { phrase_terms: phrase_terms } + PhraseQuery { phrase_terms } } } diff --git a/src/query/phrase_query/phrase_weight.rs b/src/query/phrase_query/phrase_weight.rs index 50287c14b..3135526e1 100644 --- a/src/query/phrase_query/phrase_weight.rs +++ b/src/query/phrase_query/phrase_weight.rs @@ -14,7 +14,7 @@ pub struct PhraseWeight { impl From> for PhraseWeight { fn from(phrase_terms: Vec) -> PhraseWeight { - PhraseWeight { phrase_terms: phrase_terms } + PhraseWeight { phrase_terms } } } diff --git a/src/query/query.rs b/src/query/query.rs index c531cc266..c7a438388 100644 --- a/src/query/query.rs +++ b/src/query/query.rs @@ -62,19 +62,19 @@ pub trait Query: fmt::Debug { /// fn search(&self, searcher: &Searcher, collector: &mut Collector) -> Result { let mut timer_tree = TimerTree::default(); - let weight = try!(self.weight(searcher)); + let weight = self.weight(searcher)?; { let mut search_timer = timer_tree.open("search"); for (segment_ord, segment_reader) in searcher.segment_readers().iter().enumerate() { let mut segment_search_timer = search_timer.open("segment_search"); { let _ = segment_search_timer.open("set_segment"); - try!(collector.set_segment( + collector.set_segment( segment_ord as SegmentLocalId, segment_reader, - )); + )?; } - let mut scorer = try!(weight.scorer(segment_reader)); + let mut scorer = weight.scorer(segment_reader)?; { let _collection_timer = segment_search_timer.open("collection"); scorer.collect(collector); diff --git a/src/query/query_parser/logical_ast.rs b/src/query/query_parser/logical_ast.rs index 16b93eace..2624a782f 100644 --- a/src/query/query_parser/logical_ast.rs +++ b/src/query/query_parser/logical_ast.rs @@ -27,14 +27,14 @@ impl fmt::Debug for LogicalAST { match *self { LogicalAST::Clause(ref clause) => { if clause.is_empty() { - try!(write!(formatter, "")); + write!(formatter, "")?; } else { let (ref occur, ref subquery) = clause[0]; - try!(write!(formatter, "({}{:?}", occur_letter(*occur), subquery)); + write!(formatter, "({}{:?}", occur_letter(*occur), subquery)?; for &(ref occur, ref subquery) in &clause[1..] { - try!(write!(formatter, " {}{:?}", occur_letter(*occur), subquery)); + write!(formatter, " {}{:?}", occur_letter(*occur), subquery)?; } - try!(formatter.write_str(")")); + formatter.write_str(")")?; } Ok(()) } diff --git a/src/query/query_parser/query_grammar.rs b/src/query/query_parser/query_grammar.rs index 8fa2a3c11..fd23ce1c8 100644 --- a/src/query/query_parser/query_grammar.rs +++ b/src/query/query_parser/query_grammar.rs @@ -26,13 +26,13 @@ where let term_query = (field, char(':'), term_val_with_field).map(|(field_name, _, phrase)| { UserInputLiteral { field_name: Some(field_name), - phrase: phrase, + phrase, } }); let term_default_field = term_val().map(|phrase| { UserInputLiteral { field_name: None, - phrase: phrase, + phrase, } }); try(term_query) diff --git a/src/query/query_parser/query_parser.rs b/src/query/query_parser/query_parser.rs index 77baa0055..b939d2d3f 100644 --- a/src/query/query_parser/query_parser.rs +++ b/src/query/query_parser/query_parser.rs @@ -93,7 +93,7 @@ impl QueryParser { QueryParser { schema, default_fields, - tokenizer_manager: tokenizer_manager, + tokenizer_manager, conjunction_by_default: false, } } diff --git a/src/query/query_parser/stemmer.rs b/src/query/query_parser/stemmer.rs index a1818950f..75d30aec9 100644 --- a/src/query/query_parser/stemmer.rs +++ b/src/query/query_parser/stemmer.rs @@ -37,8 +37,8 @@ impl StemmerTokenStream fn wrap(stemmer: Arc, tail: TailTokenStream) -> StemmerTokenStream { StemmerTokenStream { - tail: tail, - stemmer: stemmer, + tail, + stemmer, } } } \ No newline at end of file diff --git a/src/query/query_parser/user_input_ast.rs b/src/query/query_parser/user_input_ast.rs index 26c05f628..1123666c7 100644 --- a/src/query/query_parser/user_input_ast.rs +++ b/src/query/query_parser/user_input_ast.rs @@ -33,11 +33,11 @@ impl fmt::Debug for UserInputAST { UserInputAST::Must(ref subquery) => write!(formatter, "+({:?})", subquery), UserInputAST::Clause(ref subqueries) => { if subqueries.is_empty() { - try!(write!(formatter, "")); + write!(formatter, "")?; } else { - try!(write!(formatter, "{:?}", &subqueries[0])); + write!(formatter, "{:?}", &subqueries[0])?; for subquery in &subqueries[1..] { - try!(write!(formatter, " {:?}", subquery)); + write!(formatter, " {:?}", subquery)?; } } Ok(()) diff --git a/src/query/term_query/term_query.rs b/src/query/term_query/term_query.rs index 7ab36c112..3bc4dd93a 100644 --- a/src/query/term_query/term_query.rs +++ b/src/query/term_query/term_query.rs @@ -26,7 +26,7 @@ impl TermQuery { /// Creates a new term query. pub fn new(term: Term, segment_postings_options: IndexRecordOption) -> TermQuery { TermQuery { - term: term, + term, index_record_option: segment_postings_options, } } diff --git a/src/query/term_query/term_weight.rs b/src/query/term_query/term_weight.rs index 42f9cfe23..b19cdc656 100644 --- a/src/query/term_query/term_weight.rs +++ b/src/query/term_query/term_weight.rs @@ -40,7 +40,7 @@ impl TermWeight { if let Some(segment_postings) = postings_opt { Ok(TermScorer { idf: self.idf(), - fieldnorm_reader_opt: fieldnorm_reader_opt, + fieldnorm_reader_opt, postings: segment_postings, }) } else { diff --git a/src/schema/field_entry.rs b/src/schema/field_entry.rs index 4bb0204a9..09a3fd2b2 100644 --- a/src/schema/field_entry.rs +++ b/src/schema/field_entry.rs @@ -195,8 +195,8 @@ impl<'de> Deserialize<'de> for FieldEntry { )?; Ok(FieldEntry { - name: name, - field_type: field_type, + name, + field_type, }) } } diff --git a/src/schema/field_value.rs b/src/schema/field_value.rs index 5b7359fd1..150f586c8 100644 --- a/src/schema/field_value.rs +++ b/src/schema/field_value.rs @@ -17,8 +17,8 @@ impl FieldValue { /// Constructor pub fn new(field: Field, value: Value) -> FieldValue { FieldValue { - field: field, - value: value, + field, + value, } } diff --git a/src/schema/value.rs b/src/schema/value.rs index 828822a8e..8b257e667 100644 --- a/src/schema/value.rs +++ b/src/schema/value.rs @@ -149,18 +149,18 @@ mod binary_serialize { } } fn deserialize(reader: &mut R) -> io::Result { - let type_code = try!(u8::deserialize(reader)); + let type_code = u8::deserialize(reader)?; match type_code { TEXT_CODE => { - let text = try!(String::deserialize(reader)); + let text = String::deserialize(reader)?; Ok(Value::Str(text)) } U64_CODE => { - let value = try!(u64::deserialize(reader)); + let value = u64::deserialize(reader)?; Ok(Value::U64(value)) } I64_CODE => { - let value = try!(i64::deserialize(reader)); + let value = i64::deserialize(reader)?; Ok(Value::I64(value)) } _ => { diff --git a/src/store/writer.rs b/src/store/writer.rs index 2b7aacb19..78503640d 100644 --- a/src/store/writer.rs +++ b/src/store/writer.rs @@ -49,11 +49,11 @@ impl StoreWriter { /// pub fn store<'a>(&mut self, field_values: &[&'a FieldValue]) -> io::Result<()> { self.intermediary_buffer.clear(); - try!((field_values.len() as u32).serialize( - &mut self.intermediary_buffer, - )); - for field_value in field_values { - try!((*field_value).serialize(&mut self.intermediary_buffer)); + (field_values.len() as u32) + .serialize(&mut self.intermediary_buffer)?; + for &field_value in field_values { + field_value + .serialize(&mut self.intermediary_buffer)?; } (self.intermediary_buffer.len() as u32).serialize( &mut self.current_block, @@ -69,12 +69,11 @@ impl StoreWriter { fn write_and_compress_block(&mut self) -> io::Result<()> { self.intermediary_buffer.clear(); { - let mut encoder = try!(lz4::EncoderBuilder::new().build( - &mut self.intermediary_buffer, - )); - try!(encoder.write_all(&self.current_block)); + let mut encoder = lz4::EncoderBuilder::new() + .build(&mut self.intermediary_buffer)?; + encoder.write_all(&self.current_block)?; let (_, encoder_result) = encoder.finish(); - try!(encoder_result); + encoder_result?; } (self.intermediary_buffer.len() as u32).serialize( &mut self.writer, @@ -96,12 +95,12 @@ impl StoreWriter { /// and serializes the skip list index on disc. pub fn close(mut self) -> io::Result<()> { if !self.current_block.is_empty() { - try!(self.write_and_compress_block()); + self.write_and_compress_block()?; } let header_offset: u64 = self.writer.written_bytes() as u64; - try!(self.offset_index_writer.write(&mut self.writer)); - try!(header_offset.serialize(&mut self.writer)); - try!(self.doc.serialize(&mut self.writer)); + self.offset_index_writer.write(&mut self.writer)?; + header_offset.serialize(&mut self.writer)?; + self.doc.serialize(&mut self.writer)?; self.writer.flush() } } diff --git a/src/termdict/fstdict/streamer.rs b/src/termdict/fstdict/streamer.rs index 1d90fe9c1..cb99034ec 100644 --- a/src/termdict/fstdict/streamer.rs +++ b/src/termdict/fstdict/streamer.rs @@ -13,8 +13,8 @@ pub struct TermStreamerBuilderImpl<'a> { impl<'a> TermStreamerBuilderImpl<'a> { pub(crate) fn new(fst_map: &'a TermDictionaryImpl, stream_builder: StreamBuilder<'a>) -> Self { TermStreamerBuilderImpl { - fst_map: fst_map, - stream_builder: stream_builder, + fst_map, + stream_builder, } } } diff --git a/src/termdict/fstdict/termdict.rs b/src/termdict/fstdict/termdict.rs index ce608113b..a41088a96 100644 --- a/src/termdict/fstdict/termdict.rs +++ b/src/termdict/fstdict/termdict.rs @@ -52,7 +52,7 @@ where fn new(w: W, _field_type: FieldType) -> io::Result { let fst_builder = fst::MapBuilder::new(w).map_err(convert_fst_error)?; Ok(TermDictionaryBuilderImpl { - fst_builder: fst_builder, + fst_builder, data: Vec::new(), }) } @@ -121,7 +121,7 @@ impl<'a> TermDictionary<'a> for TermDictionaryImpl { let values_source = source.slice(split_len, length_offset); let fst_index = open_fst_index(fst_source); TermDictionaryImpl { - fst_index: fst_index, + fst_index, values_mmap: values_source, } } diff --git a/src/termdict/merger.rs b/src/termdict/merger.rs index 517f9589a..d37542821 100644 --- a/src/termdict/merger.rs +++ b/src/termdict/merger.rs @@ -53,7 +53,7 @@ impl<'a> TermMerger<'a> { .enumerate() .map(|(ord, streamer)| { HeapItem { - streamer: streamer, + streamer, segment_ord: ord, } }) diff --git a/src/termdict/streamdict/delta_encoder.rs b/src/termdict/streamdict/delta_encoder.rs index 5ba466203..da119460c 100644 --- a/src/termdict/streamdict/delta_encoder.rs +++ b/src/termdict/streamdict/delta_encoder.rs @@ -88,7 +88,7 @@ impl TermInfoDeltaEncoder { pub fn new(has_positions: bool) -> Self { TermInfoDeltaEncoder { term_info: TermInfo::default(), - has_positions: has_positions, + has_positions, } } @@ -129,8 +129,8 @@ pub fn make_mask(num_bytes: usize) -> u32 { impl TermInfoDeltaDecoder { pub fn from_term_info(term_info: TermInfo, has_positions: bool) -> TermInfoDeltaDecoder { TermInfoDeltaDecoder { - term_info: term_info, - has_positions: has_positions, + term_info, + has_positions, } } @@ -142,7 +142,7 @@ impl TermInfoDeltaDecoder { positions_offset: checkpoint.positions_offset, positions_inner_offset: 0u8, }, - has_positions: has_positions, + has_positions, } } diff --git a/src/termdict/streamdict/mod.rs b/src/termdict/streamdict/mod.rs index 176f63377..0f6e152a1 100644 --- a/src/termdict/streamdict/mod.rs +++ b/src/termdict/streamdict/mod.rs @@ -34,9 +34,9 @@ impl BinarySerializable for CheckPoint { let postings_offset = u32::deserialize(reader)?; let positions_offset = u32::deserialize(reader)?; Ok(CheckPoint { - stream_offset: stream_offset, - postings_offset: postings_offset, - positions_offset: positions_offset, + stream_offset, + postings_offset, + positions_offset, }) } } diff --git a/src/termdict/streamdict/streamer.rs b/src/termdict/streamdict/streamer.rs index 22f687da1..649de0e1c 100644 --- a/src/termdict/streamdict/streamer.rs +++ b/src/termdict/streamdict/streamer.rs @@ -107,8 +107,8 @@ impl<'a> TermStreamerBuilder for TermStreamerBuilderImpl<'a> { TermInfoDeltaDecoder::from_term_info(self.term_info, self.has_positions); TermStreamerImpl { cursor: &data[start..stop], - term_delta_decoder: term_delta_decoder, - term_info_decoder: term_info_decoder, + term_delta_decoder, + term_info_decoder, } } } @@ -146,13 +146,13 @@ impl<'a> TermStreamerBuilderImpl<'a> { let data = term_dictionary.stream_data(); let origin = data.as_ptr() as usize; TermStreamerBuilderImpl { - term_dictionary: term_dictionary, + term_dictionary, term_info: TermInfo::default(), - origin: origin, + origin, offset_from: 0, offset_to: data.len(), current_key: Vec::with_capacity(300), - has_positions: has_positions, + has_positions, } } } diff --git a/src/termdict/streamdict/termdict.rs b/src/termdict/streamdict/termdict.rs index f0f7c618f..5376d92a5 100644 --- a/src/termdict/streamdict/termdict.rs +++ b/src/termdict/streamdict/termdict.rs @@ -66,9 +66,9 @@ where let postings_offset = term_info.postings_offset as u32; let positions_offset = term_info.positions_offset as u32; let checkpoint = CheckPoint { - stream_offset: stream_offset, - postings_offset: postings_offset, - positions_offset: positions_offset, + stream_offset, + postings_offset, + positions_offset, }; self.block_index .insert( @@ -330,10 +330,10 @@ impl<'a> TermDictionary<'a> for TermDictionaryImpl { let fst_index = open_fst_index(fst_data).expect("Index FST data corrupted"); TermDictionaryImpl { - has_positions: has_positions, - stream_data: stream_data, - checkpoints_data: checkpoints_data, - fst_index: fst_index, + has_positions, + stream_data, + checkpoints_data, + fst_index, } } diff --git a/src/tokenizer/japanese_tokenizer.rs b/src/tokenizer/japanese_tokenizer.rs index c9981b201..3dfb3ad5c 100644 --- a/src/tokenizer/japanese_tokenizer.rs +++ b/src/tokenizer/japanese_tokenizer.rs @@ -30,15 +30,15 @@ impl<'a> Tokenizer<'a> for JapaneseTokenizer { offset_to = offset_from + term.len(); if term.chars().all(char::is_alphanumeric) { tokens.push(Token { - offset_from: offset_from, - offset_to: offset_to, + offset_from, + offset_to, position: pos, text: term, }); } } JapaneseTokenizerStream { - tokens: tokens, + tokens, cursor: Cursor::HasNotStarted, } } diff --git a/src/tokenizer/lower_caser.rs b/src/tokenizer/lower_caser.rs index b7357ee07..a15d34b99 100644 --- a/src/tokenizer/lower_caser.rs +++ b/src/tokenizer/lower_caser.rs @@ -48,6 +48,6 @@ where TailTokenStream: TokenStream, { fn wrap(tail: TailTokenStream) -> LowerCaserTokenStream { - LowerCaserTokenStream { tail: tail } + LowerCaserTokenStream { tail } } } diff --git a/src/tokenizer/raw_tokenizer.rs b/src/tokenizer/raw_tokenizer.rs index fe363386a..039ac6a01 100644 --- a/src/tokenizer/raw_tokenizer.rs +++ b/src/tokenizer/raw_tokenizer.rs @@ -21,7 +21,7 @@ impl<'a> Tokenizer<'a> for RawTokenizer { text: text.to_string(), }; RawTokenStream { - token: token, + token, has_token: true, } } diff --git a/src/tokenizer/remove_long.rs b/src/tokenizer/remove_long.rs index 5637906f4..94d6b6c58 100644 --- a/src/tokenizer/remove_long.rs +++ b/src/tokenizer/remove_long.rs @@ -14,7 +14,7 @@ pub struct RemoveLongFilter { impl RemoveLongFilter { // the limit is in bytes of the UTF-8 representation. pub fn limit(length_limit: usize) -> RemoveLongFilter { - RemoveLongFilter { length_limit: length_limit } + RemoveLongFilter { length_limit } } } @@ -31,8 +31,8 @@ where tail: TailTokenStream, ) -> RemoveLongFilterStream { RemoveLongFilterStream { - token_length_limit: token_length_limit, - tail: tail, + token_length_limit, + tail, } } } diff --git a/src/tokenizer/simple_tokenizer.rs b/src/tokenizer/simple_tokenizer.rs index e9d93deee..8850c5f45 100644 --- a/src/tokenizer/simple_tokenizer.rs +++ b/src/tokenizer/simple_tokenizer.rs @@ -18,7 +18,7 @@ impl<'a> Tokenizer<'a> for SimpleTokenizer { fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl { SimpleTokenStream { - text: text, + text, chars: text.char_indices(), token: Token::default(), } diff --git a/src/tokenizer/stemmer.rs b/src/tokenizer/stemmer.rs index 1c349e049..9a8e7d173 100644 --- a/src/tokenizer/stemmer.rs +++ b/src/tokenizer/stemmer.rs @@ -67,8 +67,8 @@ where tail: TailTokenStream, ) -> StemmerTokenStream { StemmerTokenStream { - tail: tail, - stemmer: stemmer, + tail, + stemmer, } } } diff --git a/src/tokenizer/token_stream_chain.rs b/src/tokenizer/token_stream_chain.rs index eaeccd420..48159365f 100644 --- a/src/tokenizer/token_stream_chain.rs +++ b/src/tokenizer/token_stream_chain.rs @@ -18,9 +18,9 @@ where token_streams: Vec, ) -> TokenStreamChain { TokenStreamChain { - offsets: offsets, + offsets, stream_idx: 0, - token_streams: token_streams, + token_streams, position_shift: 0, token: Token::default(), } diff --git a/src/tokenizer/tokenizer_manager.rs b/src/tokenizer/tokenizer_manager.rs index 24f611a89..54c28f107 100644 --- a/src/tokenizer/tokenizer_manager.rs +++ b/src/tokenizer/tokenizer_manager.rs @@ -17,9 +17,13 @@ use tokenizer::Stemmer; /// /// By default, it is populated with the following managers. /// -/// * raw : does not process nor tokenize the text. -/// * default : Chops the text on according to whitespace and -/// punctuation, removes tokens that are too long, lowercases +/// * `raw` : does not process nor tokenize the text. +/// * `default` : Chops the text on according to whitespace and +/// punctuation, removes tokens that are too long, and lowercases +/// tokens +/// * `en_stem` : Like `default`, but also applies stemming on the +/// resulting tokens. Stemming can improve the recall of your +/// search engine. #[derive(Clone)] pub struct TokenizerManager { tokenizers: Arc>>>,