mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-06 09:12:55 +00:00
Remove unused imports.
This commit is contained in:
@@ -20,8 +20,7 @@ use crate::reader::IndexReaderBuilder;
|
||||
use crate::schema::Field;
|
||||
use crate::schema::FieldType;
|
||||
use crate::schema::Schema;
|
||||
use crate::tokenizer::Tokenizer;
|
||||
use crate::tokenizer::{TextAnalyzer, TextAnalyzerT, TokenizerManager};
|
||||
use crate::tokenizer::{TextAnalyzerT, TokenizerManager};
|
||||
use crate::IndexWriter;
|
||||
use std::collections::HashSet;
|
||||
use std::fmt;
|
||||
|
||||
@@ -10,10 +10,9 @@ use crate::schema::FieldType;
|
||||
use crate::schema::Schema;
|
||||
use crate::schema::Term;
|
||||
use crate::schema::Value;
|
||||
use crate::schema::{Field, FieldEntry};
|
||||
use crate::tokenizer::PreTokenizedStream;
|
||||
use crate::tokenizer::{DynTokenStreamChain, TokenStreamChain, Tokenizer};
|
||||
use crate::tokenizer::{FacetTokenizer, TextAnalyzer, TextAnalyzerT, Token};
|
||||
use crate::tokenizer::{DynTokenStreamChain, Tokenizer};
|
||||
use crate::tokenizer::{FacetTokenizer, TextAnalyzerT, Token};
|
||||
use crate::Opstamp;
|
||||
use crate::{DocId, SegmentComponent};
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
#![cfg_attr(feature = "cargo-clippy", allow(clippy::module_inception))]
|
||||
#![doc(test(attr(allow(unused_variables), deny(warnings))))]
|
||||
#![warn(missing_docs)]
|
||||
#![allow(unused_imports)]
|
||||
|
||||
//! # `tantivy`
|
||||
//!
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use crate::query::Query;
|
||||
use crate::schema::Field;
|
||||
use crate::schema::Value;
|
||||
use crate::tokenizer::{TextAnalyzerT, Token, Tokenizer};
|
||||
use crate::tokenizer::{TextAnalyzerT, Token};
|
||||
use crate::Searcher;
|
||||
use crate::{Document, Score};
|
||||
use htmlescape::encode_minimal;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use super::{analyzer_builder, Token, TokenFilter};
|
||||
use super::{Token, TokenFilter};
|
||||
use std::mem;
|
||||
|
||||
/// This class converts alphabetic, numeric, and symbolic Unicode characters
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use super::{analyzer_builder, TextAnalyzerT, Token, TokenFilter};
|
||||
use super::{Token, TokenFilter};
|
||||
use std::mem;
|
||||
|
||||
impl TokenFilter for LowerCaser {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use super::{Token, Tokenizer};
|
||||
use std::str::CharIndices;
|
||||
|
||||
/// Tokenize the text by splitting on whitespaces and punctuation.
|
||||
#[derive(Clone, Debug)]
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::tokenizer::{Token, Tokenizer};
|
||||
use crate::tokenizer::Token;
|
||||
|
||||
const POSITION_GAP: usize = 2;
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::tokenizer::{DynTokenStreamChain, TokenStreamChain};
|
||||
use crate::tokenizer::TokenStreamChain;
|
||||
use serde::{Deserialize, Serialize};
|
||||
/// The tokenizer module contains all of the tools used to process
|
||||
/// text in `tantivy`.
|
||||
|
||||
Reference in New Issue
Block a user