Merge pull request #1297 from PSeitz/fix_clippy

fix clippy issues
This commit is contained in:
PSeitz
2022-03-02 10:11:56 +01:00
committed by GitHub
7 changed files with 13 additions and 18 deletions

View File

@@ -240,17 +240,15 @@ impl SegmentRangeCollector {
/// more computational expensive when many documents are hit.
fn to_u64_range(range: &RangeAggregationRange, field_type: &Type) -> crate::Result<Range<u64>> {
let start = if let Some(from) = range.from {
f64_to_fastfield_u64(from, field_type).ok_or::<TantivyError>(
TantivyError::InvalidArgument("invalid field type".to_string()),
)?
f64_to_fastfield_u64(from, field_type)
.ok_or_else(|| TantivyError::InvalidArgument("invalid field type".to_string()))?
} else {
u64::MIN
};
let end = if let Some(to) = range.to {
f64_to_fastfield_u64(to, field_type).ok_or::<TantivyError>(
TantivyError::InvalidArgument("invalid field type".to_string()),
)?
f64_to_fastfield_u64(to, field_type)
.ok_or_else(|| TantivyError::InvalidArgument("invalid field type".to_string()))?
} else {
u64::MAX
};

View File

@@ -124,7 +124,7 @@ impl AggregationSegmentCollector {
agg: &Aggregations,
reader: &SegmentReader,
) -> crate::Result<Self> {
let aggs_with_accessor = get_aggs_with_accessor_and_validate(&agg, reader)?;
let aggs_with_accessor = get_aggs_with_accessor_and_validate(agg, reader)?;
let result =
SegmentAggregationResultsCollector::from_req_and_validate(&aggs_with_accessor)?;
Ok(AggregationSegmentCollector {

View File

@@ -87,11 +87,8 @@ impl IntermediateStats {
}
pub(crate) fn standard_deviation(&self) -> Option<f64> {
if let Some(average) = self.avg() {
Some((self.square_mean() - average * average).sqrt())
} else {
None
}
self.avg()
.map(|average| (self.square_mean() - average * average).sqrt())
}
/// Merge data from other stats into this instance.

View File

@@ -918,8 +918,8 @@ mod tests {
let collector = AggregationCollector::from_aggs(agg_req_1);
let searcher = reader.searcher();
let agg_res = searcher.search(&AllQuery, &collector).unwrap_err();
agg_res
searcher.search(&AllQuery, &collector).unwrap_err()
};
let agg_res = avg_on_field("text");

View File

@@ -47,7 +47,7 @@ fn find_pivot_doc(
/// scorer in scorers[..pivot_len] and `scorer.doc()` for scorer in scorers[pivot_len..].
/// Note: before and after calling this method, scorers need to be sorted by their `.doc()`.
fn block_max_was_too_low_advance_one_scorer(
scorers: &mut Vec<TermScorerWithMaxScore>,
scorers: &mut [TermScorerWithMaxScore],
pivot_len: usize,
) {
debug_assert!(is_sorted(scorers.iter().map(|scorer| scorer.doc())));
@@ -82,7 +82,7 @@ fn block_max_was_too_low_advance_one_scorer(
// Given a list of term_scorers and a `ord` and assuming that `term_scorers[ord]` is sorted
// except term_scorers[ord] that might be in advance compared to its ranks,
// bubble up term_scorers[ord] in order to restore the ordering.
fn restore_ordering(term_scorers: &mut Vec<TermScorerWithMaxScore>, ord: usize) {
fn restore_ordering(term_scorers: &mut [TermScorerWithMaxScore], ord: usize) {
let doc = term_scorers[ord].doc();
for i in ord + 1..term_scorers.len() {
if term_scorers[i].doc() >= doc {

View File

@@ -1527,7 +1527,7 @@ fn fold_non_ascii_char(c: char) -> Option<&'static str> {
}
// https://github.com/apache/lucene-solr/blob/master/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.java#L187
fn to_ascii(text: &mut String, output: &mut String) {
fn to_ascii(text: &str, output: &mut String) {
output.clear();
for c in text.chars() {

View File

@@ -22,7 +22,7 @@ pub struct LowerCaserTokenStream<'a> {
}
// writes a lowercased version of text into output.
fn to_lowercase_unicode(text: &mut String, output: &mut String) {
fn to_lowercase_unicode(text: &str, output: &mut String) {
output.clear();
for c in text.chars() {
// Contrary to the std, we do not take care of sigma special case.