diff --git a/src/tokenizer/ascii_folding_filter.rs b/src/tokenizer/ascii_folding_filter.rs index 89ba04bc9..3e800971b 100644 --- a/src/tokenizer/ascii_folding_filter.rs +++ b/src/tokenizer/ascii_folding_filter.rs @@ -1527,7 +1527,7 @@ fn fold_non_ascii_char(c: char) -> Option<&'static str> { } // https://github.com/apache/lucene-solr/blob/master/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.java#L187 -fn to_ascii(text: &mut str, output: &mut String) { +fn to_ascii(text: &str, output: &mut String) { output.clear(); for c in text.chars() { diff --git a/src/tokenizer/lower_caser.rs b/src/tokenizer/lower_caser.rs index af2389301..2136b0063 100644 --- a/src/tokenizer/lower_caser.rs +++ b/src/tokenizer/lower_caser.rs @@ -22,7 +22,7 @@ pub struct LowerCaserTokenStream<'a> { } // writes a lowercased version of text into output. -fn to_lowercase_unicode(text: &mut str, output: &mut String) { +fn to_lowercase_unicode(text: &str, output: &mut String) { output.clear(); for c in text.chars() { // Contrary to the std, we do not take care of sigma special case.