diff --git a/src/aggregation/bucket/range.rs b/src/aggregation/bucket/range.rs index 08fd6e5cf..465022171 100644 --- a/src/aggregation/bucket/range.rs +++ b/src/aggregation/bucket/range.rs @@ -240,17 +240,15 @@ impl SegmentRangeCollector { /// more computational expensive when many documents are hit. fn to_u64_range(range: &RangeAggregationRange, field_type: &Type) -> crate::Result> { let start = if let Some(from) = range.from { - f64_to_fastfield_u64(from, field_type).ok_or::( - TantivyError::InvalidArgument("invalid field type".to_string()), - )? + f64_to_fastfield_u64(from, field_type) + .ok_or_else(|| TantivyError::InvalidArgument("invalid field type".to_string()))? } else { u64::MIN }; let end = if let Some(to) = range.to { - f64_to_fastfield_u64(to, field_type).ok_or::( - TantivyError::InvalidArgument("invalid field type".to_string()), - )? + f64_to_fastfield_u64(to, field_type) + .ok_or_else(|| TantivyError::InvalidArgument("invalid field type".to_string()))? } else { u64::MAX }; diff --git a/src/aggregation/collector.rs b/src/aggregation/collector.rs index 11f898423..ec581d6a7 100644 --- a/src/aggregation/collector.rs +++ b/src/aggregation/collector.rs @@ -124,7 +124,7 @@ impl AggregationSegmentCollector { agg: &Aggregations, reader: &SegmentReader, ) -> crate::Result { - let aggs_with_accessor = get_aggs_with_accessor_and_validate(&agg, reader)?; + let aggs_with_accessor = get_aggs_with_accessor_and_validate(agg, reader)?; let result = SegmentAggregationResultsCollector::from_req_and_validate(&aggs_with_accessor)?; Ok(AggregationSegmentCollector { diff --git a/src/aggregation/metric/stats.rs b/src/aggregation/metric/stats.rs index 02003e415..b596cd7a5 100644 --- a/src/aggregation/metric/stats.rs +++ b/src/aggregation/metric/stats.rs @@ -87,11 +87,8 @@ impl IntermediateStats { } pub(crate) fn standard_deviation(&self) -> Option { - if let Some(average) = self.avg() { - Some((self.square_mean() - average * average).sqrt()) - } else { - None - } + self.avg() + .map(|average| (self.square_mean() - average * average).sqrt()) } /// Merge data from other stats into this instance. diff --git a/src/aggregation/mod.rs b/src/aggregation/mod.rs index 26ff8b090..d4f77fa94 100644 --- a/src/aggregation/mod.rs +++ b/src/aggregation/mod.rs @@ -918,8 +918,8 @@ mod tests { let collector = AggregationCollector::from_aggs(agg_req_1); let searcher = reader.searcher(); - let agg_res = searcher.search(&AllQuery, &collector).unwrap_err(); - agg_res + + searcher.search(&AllQuery, &collector).unwrap_err() }; let agg_res = avg_on_field("text"); diff --git a/src/query/boolean_query/block_wand.rs b/src/query/boolean_query/block_wand.rs index ed97e8cfc..92bb2e8b1 100644 --- a/src/query/boolean_query/block_wand.rs +++ b/src/query/boolean_query/block_wand.rs @@ -47,7 +47,7 @@ fn find_pivot_doc( /// scorer in scorers[..pivot_len] and `scorer.doc()` for scorer in scorers[pivot_len..]. /// Note: before and after calling this method, scorers need to be sorted by their `.doc()`. fn block_max_was_too_low_advance_one_scorer( - scorers: &mut Vec, + scorers: &mut [TermScorerWithMaxScore], pivot_len: usize, ) { debug_assert!(is_sorted(scorers.iter().map(|scorer| scorer.doc()))); @@ -82,7 +82,7 @@ fn block_max_was_too_low_advance_one_scorer( // Given a list of term_scorers and a `ord` and assuming that `term_scorers[ord]` is sorted // except term_scorers[ord] that might be in advance compared to its ranks, // bubble up term_scorers[ord] in order to restore the ordering. -fn restore_ordering(term_scorers: &mut Vec, ord: usize) { +fn restore_ordering(term_scorers: &mut [TermScorerWithMaxScore], ord: usize) { let doc = term_scorers[ord].doc(); for i in ord + 1..term_scorers.len() { if term_scorers[i].doc() >= doc { diff --git a/src/tokenizer/ascii_folding_filter.rs b/src/tokenizer/ascii_folding_filter.rs index 542519e12..3e800971b 100644 --- a/src/tokenizer/ascii_folding_filter.rs +++ b/src/tokenizer/ascii_folding_filter.rs @@ -1527,7 +1527,7 @@ fn fold_non_ascii_char(c: char) -> Option<&'static str> { } // https://github.com/apache/lucene-solr/blob/master/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.java#L187 -fn to_ascii(text: &mut String, output: &mut String) { +fn to_ascii(text: &str, output: &mut String) { output.clear(); for c in text.chars() { diff --git a/src/tokenizer/lower_caser.rs b/src/tokenizer/lower_caser.rs index 3373948e3..2136b0063 100644 --- a/src/tokenizer/lower_caser.rs +++ b/src/tokenizer/lower_caser.rs @@ -22,7 +22,7 @@ pub struct LowerCaserTokenStream<'a> { } // writes a lowercased version of text into output. -fn to_lowercase_unicode(text: &mut String, output: &mut String) { +fn to_lowercase_unicode(text: &str, output: &mut String) { output.clear(); for c in text.chars() { // Contrary to the std, we do not take care of sigma special case.