mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-09 02:22:54 +00:00
Added handling of pre-tokenized text fields (#642).
This commit is contained in:
126
examples/pre_tokenized_text.rs
Normal file
126
examples/pre_tokenized_text.rs
Normal file
@@ -0,0 +1,126 @@
|
||||
// # Pre-tokenized text example
|
||||
//
|
||||
// This example shows how to use pre-tokenized text. Sometimes yout might
|
||||
// want to index and search through text which is already split into
|
||||
// tokens by some external tool.
|
||||
//
|
||||
// In this example we will:
|
||||
// - use tantivy tokenizer to create tokens and load them directly into tantivy,
|
||||
// - import tokenized text straight from json,
|
||||
// - perform a search on documents with pre-tokenized text
|
||||
|
||||
use tantivy::tokenizer::{SimpleTokenizer, Token, TokenStream, TokenizedString, Tokenizer};
|
||||
|
||||
use tantivy::collector::{Count, TopDocs};
|
||||
use tantivy::query::TermQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index, ReloadPolicy};
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn tokenize_it(text: &str) -> Vec<Token> {
|
||||
let mut ts = SimpleTokenizer.token_stream(text);
|
||||
let mut tokens = vec![];
|
||||
while ts.advance() {
|
||||
tokens.push(ts.token().clone());
|
||||
}
|
||||
tokens
|
||||
}
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
let index_path = TempDir::new()?;
|
||||
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
// now we add `TOKENIZED` `TextOptions` to mark field as pre-tokenized
|
||||
// in addition the title will be also stored, so we can see it in
|
||||
// returned results
|
||||
schema_builder.add_text_field("title", TEXT | STORED | TOKENIZED);
|
||||
schema_builder.add_text_field("body", TEXT | TOKENIZED);
|
||||
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_dir(&index_path, schema.clone())?;
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
|
||||
// We can create a document manually, by setting the fields
|
||||
// one by one in a Document object.
|
||||
let title = schema.get_field("title").unwrap();
|
||||
let body = schema.get_field("body").unwrap();
|
||||
|
||||
let title_text = "The Old Man and the Sea";
|
||||
let body_text = "He was an old man who fished alone in a skiff in the Gulf Stream";
|
||||
|
||||
// Content of our first document
|
||||
// We create `TokenizedString` which contains original text and vector of tokens
|
||||
let title_tok = TokenizedString {
|
||||
text: String::from(title_text),
|
||||
tokens: tokenize_it(title_text),
|
||||
};
|
||||
|
||||
println!(
|
||||
"Original text: \"{}\" and tokens: {:?}",
|
||||
title_tok.text, title_tok.tokens
|
||||
);
|
||||
|
||||
let body_tok = TokenizedString {
|
||||
text: String::from(body_text),
|
||||
tokens: tokenize_it(body_text),
|
||||
};
|
||||
|
||||
// Now lets create a document and add our `TokenizedString` using
|
||||
// `add_tokenized_text` method of `Document`
|
||||
let mut old_man_doc = Document::default();
|
||||
old_man_doc.add_tokenized_text(title, &title_tok);
|
||||
old_man_doc.add_tokenized_text(body, &body_tok);
|
||||
|
||||
// ... now let's just add it to the IndexWriter
|
||||
index_writer.add_document(old_man_doc);
|
||||
|
||||
// `Document` can be obtained directly from JSON:
|
||||
let short_man_json = r#"{
|
||||
"title":[{
|
||||
"text":"The Old Man",
|
||||
"tokens":[
|
||||
{"offset_from":0,"offset_to":3,"position":0,"text":"The","position_length":1},
|
||||
{"offset_from":4,"offset_to":7,"position":1,"text":"Old","position_length":1},
|
||||
{"offset_from":8,"offset_to":11,"position":2,"text":"Man","position_length":1}
|
||||
]
|
||||
}]
|
||||
}"#;
|
||||
|
||||
let short_man_doc = schema.parse_document(&short_man_json)?;
|
||||
|
||||
index_writer.add_document(short_man_doc);
|
||||
|
||||
// Let's commit changes
|
||||
index_writer.commit()?;
|
||||
|
||||
// ... and now is the time to query our index
|
||||
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
.try_into()?;
|
||||
|
||||
let searcher = reader.searcher();
|
||||
|
||||
// We want to get documents with token "Man", we will use TermQuery to do it
|
||||
let query = TermQuery::new(
|
||||
Term::from_field_text(title, "Man"),
|
||||
IndexRecordOption::Basic,
|
||||
);
|
||||
|
||||
let (top_docs, count) = searcher
|
||||
.search(&query, &(TopDocs::with_limit(2), Count))
|
||||
.unwrap();
|
||||
|
||||
println!("Docs counts: {}", count);
|
||||
|
||||
for (_score, doc_address) in top_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
println!("Document: {}", schema.to_json(&retrieved_doc));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -285,6 +285,6 @@ mod tests {
|
||||
payload: None,
|
||||
};
|
||||
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
|
||||
assert_eq!(json, r#"{"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","tokenizer":"default"},"stored":false}}],"opstamp":0}"#);
|
||||
assert_eq!(json, r#"{"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","tokenizer":"default"},"stored":false,"tokenized":false}}],"opstamp":0}"#);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ use crate::schema::{Field, FieldEntry};
|
||||
use crate::tokenizer::BoxedTokenizer;
|
||||
use crate::tokenizer::FacetTokenizer;
|
||||
use crate::tokenizer::{TokenStream, Tokenizer};
|
||||
use crate::tokenizer::{TokenizedStream, TokenizedString};
|
||||
use crate::DocId;
|
||||
use crate::Opstamp;
|
||||
use crate::Result;
|
||||
@@ -157,26 +158,46 @@ impl SegmentWriter {
|
||||
}
|
||||
}
|
||||
}
|
||||
FieldType::Str(_) => {
|
||||
let num_tokens = if let Some(ref mut tokenizer) =
|
||||
self.tokenizers[field.field_id() as usize]
|
||||
{
|
||||
let texts: Vec<&str> = field_values
|
||||
FieldType::Str(ref text_options) => {
|
||||
let num_tokens = if text_options.is_tokenized() {
|
||||
let tok_strings: Vec<&TokenizedString> = field_values
|
||||
.iter()
|
||||
.flat_map(|field_value| match *field_value.value() {
|
||||
Value::Str(ref text) => Some(text.as_str()),
|
||||
Value::TokStr(ref tok_str) => Some(tok_str),
|
||||
_ => None,
|
||||
})
|
||||
.collect();
|
||||
if texts.is_empty() {
|
||||
if tok_strings.is_empty() {
|
||||
0
|
||||
} else {
|
||||
let mut token_stream = tokenizer.token_stream_texts(&texts[..]);
|
||||
let mut token_stream =
|
||||
TokenizedStream::chain_tokenized_strings(&tok_strings[..]);
|
||||
self.multifield_postings
|
||||
.index_text(doc_id, field, &mut token_stream)
|
||||
}
|
||||
} else {
|
||||
0
|
||||
if let Some(ref mut tokenizer) = self.tokenizers[field.field_id() as usize]
|
||||
{
|
||||
let texts: Vec<&str> = field_values
|
||||
.iter()
|
||||
.flat_map(|field_value| match *field_value.value() {
|
||||
Value::Str(ref text) => Some(text.as_str()),
|
||||
_ => None,
|
||||
})
|
||||
.collect();
|
||||
if texts.is_empty() {
|
||||
0
|
||||
} else {
|
||||
let mut token_stream = tokenizer.token_stream_texts(&texts[..]);
|
||||
self.multifield_postings.index_text(
|
||||
doc_id,
|
||||
field,
|
||||
&mut token_stream,
|
||||
)
|
||||
}
|
||||
} else {
|
||||
0
|
||||
}
|
||||
};
|
||||
self.fieldnorms_writer.record(doc_id, field, num_tokens);
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use super::*;
|
||||
use crate::common::BinarySerializable;
|
||||
use crate::common::VInt;
|
||||
use crate::tokenizer::TokenizedString;
|
||||
use crate::DateTime;
|
||||
use itertools::Itertools;
|
||||
use std::io::{self, Read, Write};
|
||||
@@ -78,6 +79,12 @@ impl Document {
|
||||
self.add(FieldValue::new(field, value));
|
||||
}
|
||||
|
||||
/// Add a text field with tokens.
|
||||
pub fn add_tokenized_text(&mut self, field: Field, tokenized_text: &TokenizedString) {
|
||||
let value = Value::TokStr(tokenized_text.clone());
|
||||
self.add(FieldValue::new(field, value));
|
||||
}
|
||||
|
||||
/// Add a u64 field
|
||||
pub fn add_u64(&mut self, field: Field, value: u64) {
|
||||
self.add(FieldValue::new(field, Value::U64(value)));
|
||||
|
||||
@@ -280,7 +280,8 @@ mod tests {
|
||||
"record": "position",
|
||||
"tokenizer": "default"
|
||||
},
|
||||
"stored": false
|
||||
"stored": false,
|
||||
"tokenized": false
|
||||
}
|
||||
}"#;
|
||||
let field_value_json = serde_json::to_string_pretty(&field_value).unwrap();
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use base64::decode;
|
||||
|
||||
use crate::schema::{IntOptions, TextOptions};
|
||||
|
||||
use crate::schema::Facet;
|
||||
use crate::schema::IndexRecordOption;
|
||||
use crate::schema::TextFieldIndexing;
|
||||
use crate::schema::Value;
|
||||
use crate::schema::{IntOptions, TextOptions};
|
||||
use crate::tokenizer::TokenizedString;
|
||||
use serde_json::Value as JsonValue;
|
||||
|
||||
/// Possible error that may occur while parsing a field value
|
||||
@@ -169,6 +169,36 @@ impl FieldType {
|
||||
Err(ValueParsingError::TypeError(msg))
|
||||
}
|
||||
},
|
||||
JsonValue::Object(_) => match *self {
|
||||
FieldType::Str(ref text_options) => {
|
||||
if text_options.is_tokenized() {
|
||||
if let Ok(tok_str_val) =
|
||||
serde_json::from_value::<TokenizedString>(json.clone())
|
||||
{
|
||||
Ok(Value::TokStr(tok_str_val))
|
||||
} else {
|
||||
let msg = format!(
|
||||
"Json value {:?} cannot be translated to TokenizedString.",
|
||||
json
|
||||
);
|
||||
Err(ValueParsingError::TypeError(msg))
|
||||
}
|
||||
} else {
|
||||
let msg = format!(
|
||||
"Json value not supported error {:?}. Expected {:?}",
|
||||
json, self
|
||||
);
|
||||
Err(ValueParsingError::TypeError(msg))
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
let msg = format!(
|
||||
"Json value not supported error {:?}. Expected {:?}",
|
||||
json, self
|
||||
);
|
||||
Err(ValueParsingError::TypeError(msg))
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
let msg = format!(
|
||||
"Json value not supported error {:?}. Expected {:?}",
|
||||
|
||||
@@ -141,6 +141,7 @@ pub use self::text_options::TextFieldIndexing;
|
||||
pub use self::text_options::TextOptions;
|
||||
pub use self::text_options::STRING;
|
||||
pub use self::text_options::TEXT;
|
||||
pub use self::text_options::TOKENIZED;
|
||||
|
||||
pub use self::flags::{FAST, INDEXED, STORED};
|
||||
pub use self::int_options::Cardinality;
|
||||
|
||||
@@ -443,7 +443,8 @@ mod tests {
|
||||
"record": "position",
|
||||
"tokenizer": "default"
|
||||
},
|
||||
"stored": false
|
||||
"stored": false,
|
||||
"tokenized": false
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -454,7 +455,8 @@ mod tests {
|
||||
"record": "basic",
|
||||
"tokenizer": "raw"
|
||||
},
|
||||
"stored": false
|
||||
"stored": false,
|
||||
"tokenized": false
|
||||
}
|
||||
},
|
||||
{
|
||||
|
||||
@@ -9,6 +9,7 @@ use std::ops::BitOr;
|
||||
pub struct TextOptions {
|
||||
indexing: Option<TextFieldIndexing>,
|
||||
stored: bool,
|
||||
tokenized: bool,
|
||||
}
|
||||
|
||||
impl TextOptions {
|
||||
@@ -33,6 +34,17 @@ impl TextOptions {
|
||||
self.indexing = Some(indexing);
|
||||
self
|
||||
}
|
||||
|
||||
/// Returns true if the text is already tokenized in the form of TokenString
|
||||
pub fn is_tokenized(&self) -> bool {
|
||||
self.tokenized
|
||||
}
|
||||
|
||||
/// Sets the field as already tokenized
|
||||
pub fn set_tokenized(mut self) -> TextOptions {
|
||||
self.tokenized = true;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for TextOptions {
|
||||
@@ -40,6 +52,7 @@ impl Default for TextOptions {
|
||||
TextOptions {
|
||||
indexing: None,
|
||||
stored: false,
|
||||
tokenized: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -100,6 +113,7 @@ pub const STRING: TextOptions = TextOptions {
|
||||
record: IndexRecordOption::Basic,
|
||||
}),
|
||||
stored: false,
|
||||
tokenized: false,
|
||||
};
|
||||
|
||||
/// The field will be tokenized and indexed
|
||||
@@ -109,6 +123,14 @@ pub const TEXT: TextOptions = TextOptions {
|
||||
record: IndexRecordOption::WithFreqsAndPositions,
|
||||
}),
|
||||
stored: false,
|
||||
tokenized: false,
|
||||
};
|
||||
|
||||
/// The field is already tokenized, should come as TokenizedString
|
||||
pub const TOKENIZED: TextOptions = TextOptions {
|
||||
indexing: None,
|
||||
stored: false,
|
||||
tokenized: true,
|
||||
};
|
||||
|
||||
impl<T: Into<TextOptions>> BitOr<T> for TextOptions {
|
||||
@@ -119,6 +141,7 @@ impl<T: Into<TextOptions>> BitOr<T> for TextOptions {
|
||||
let mut res = TextOptions::default();
|
||||
res.indexing = self.indexing.or(other.indexing);
|
||||
res.stored = self.stored | other.stored;
|
||||
res.tokenized = self.tokenized | other.tokenized;
|
||||
res
|
||||
}
|
||||
}
|
||||
@@ -134,6 +157,7 @@ impl From<StoredFlag> for TextOptions {
|
||||
TextOptions {
|
||||
indexing: None,
|
||||
stored: true,
|
||||
tokenized: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -158,8 +182,14 @@ mod tests {
|
||||
{
|
||||
let field_options = STORED | TEXT;
|
||||
assert!(field_options.is_stored());
|
||||
assert!(!field_options.is_tokenized());
|
||||
assert!(field_options.get_indexing_options().is_some());
|
||||
}
|
||||
{
|
||||
let field_options = STORED | TOKENIZED;
|
||||
assert!(field_options.is_stored());
|
||||
assert!(field_options.is_tokenized());
|
||||
}
|
||||
{
|
||||
let mut schema_builder = Schema::builder();
|
||||
schema_builder.add_text_field("body", TEXT);
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use crate::schema::Facet;
|
||||
use crate::tokenizer::TokenizedString;
|
||||
use crate::DateTime;
|
||||
use serde::de::Visitor;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
@@ -10,6 +11,8 @@ use std::{cmp::Ordering, fmt};
|
||||
pub enum Value {
|
||||
/// The str type is used for any text information.
|
||||
Str(String),
|
||||
/// Tokenized str type,
|
||||
TokStr(TokenizedString),
|
||||
/// Unsigned 64-bits Integer `u64`
|
||||
U64(u64),
|
||||
/// Signed 64-bits Integer `i64`
|
||||
@@ -29,6 +32,7 @@ impl Ord for Value {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
match (self, other) {
|
||||
(Value::Str(l), Value::Str(r)) => l.cmp(r),
|
||||
(Value::TokStr(l), Value::TokStr(r)) => l.cmp(r),
|
||||
(Value::U64(l), Value::U64(r)) => l.cmp(r),
|
||||
(Value::I64(l), Value::I64(r)) => l.cmp(r),
|
||||
(Value::Date(l), Value::Date(r)) => l.cmp(r),
|
||||
@@ -44,6 +48,8 @@ impl Ord for Value {
|
||||
}
|
||||
(Value::Str(_), _) => Ordering::Less,
|
||||
(_, Value::Str(_)) => Ordering::Greater,
|
||||
(Value::TokStr(_), _) => Ordering::Less,
|
||||
(_, Value::TokStr(_)) => Ordering::Greater,
|
||||
(Value::U64(_), _) => Ordering::Less,
|
||||
(_, Value::U64(_)) => Ordering::Greater,
|
||||
(Value::I64(_), _) => Ordering::Less,
|
||||
@@ -65,6 +71,7 @@ impl Serialize for Value {
|
||||
{
|
||||
match *self {
|
||||
Value::Str(ref v) => serializer.serialize_str(v),
|
||||
Value::TokStr(ref v) => v.serialize(serializer),
|
||||
Value::U64(u) => serializer.serialize_u64(u),
|
||||
Value::I64(u) => serializer.serialize_i64(u),
|
||||
Value::F64(u) => serializer.serialize_f64(u),
|
||||
@@ -221,6 +228,7 @@ mod binary_serialize {
|
||||
use super::Value;
|
||||
use crate::common::{f64_to_u64, u64_to_f64, BinarySerializable};
|
||||
use crate::schema::Facet;
|
||||
use crate::tokenizer::TokenizedString;
|
||||
use chrono::{TimeZone, Utc};
|
||||
use std::io::{self, Read, Write};
|
||||
|
||||
@@ -231,6 +239,11 @@ mod binary_serialize {
|
||||
const BYTES_CODE: u8 = 4;
|
||||
const DATE_CODE: u8 = 5;
|
||||
const F64_CODE: u8 = 6;
|
||||
const EXT_CODE: u8 = 7;
|
||||
|
||||
// extended types
|
||||
|
||||
const TOK_STR_CODE: u8 = 0;
|
||||
|
||||
impl BinarySerializable for Value {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
@@ -239,6 +252,18 @@ mod binary_serialize {
|
||||
TEXT_CODE.serialize(writer)?;
|
||||
text.serialize(writer)
|
||||
}
|
||||
Value::TokStr(ref tok_str) => {
|
||||
EXT_CODE.serialize(writer)?;
|
||||
TOK_STR_CODE.serialize(writer)?;
|
||||
if let Ok(text) = serde_json::to_string(tok_str) {
|
||||
text.serialize(writer)
|
||||
} else {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"Failed to dump Value::TokStr(_) to json.",
|
||||
))
|
||||
}
|
||||
}
|
||||
Value::U64(ref val) => {
|
||||
U64_CODE.serialize(writer)?;
|
||||
val.serialize(writer)
|
||||
@@ -290,6 +315,29 @@ mod binary_serialize {
|
||||
}
|
||||
HIERARCHICAL_FACET_CODE => Ok(Value::Facet(Facet::deserialize(reader)?)),
|
||||
BYTES_CODE => Ok(Value::Bytes(Vec::<u8>::deserialize(reader)?)),
|
||||
EXT_CODE => {
|
||||
let ext_type_code = u8::deserialize(reader)?;
|
||||
match ext_type_code {
|
||||
TOK_STR_CODE => {
|
||||
let str_val = String::deserialize(reader)?;
|
||||
if let Ok(value) = serde_json::from_str::<TokenizedString>(&str_val) {
|
||||
Ok(Value::TokStr(value))
|
||||
} else {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"Failed to parse string data as Value::TokStr(_).",
|
||||
))
|
||||
}
|
||||
}
|
||||
_ => Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"No extened field type is associated with code {:?}",
|
||||
ext_type_code
|
||||
),
|
||||
)),
|
||||
}
|
||||
}
|
||||
_ => Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("No field type is associated with code {:?}", type_code),
|
||||
|
||||
@@ -136,6 +136,7 @@ mod simple_tokenizer;
|
||||
mod stemmer;
|
||||
mod stop_word_filter;
|
||||
mod token_stream_chain;
|
||||
mod tokenized_string;
|
||||
mod tokenizer;
|
||||
mod tokenizer_manager;
|
||||
|
||||
@@ -152,7 +153,9 @@ pub use self::stop_word_filter::StopWordFilter;
|
||||
pub(crate) use self::token_stream_chain::TokenStreamChain;
|
||||
pub use self::tokenizer::BoxedTokenizer;
|
||||
|
||||
pub use self::tokenized_string::{TokenizedStream, TokenizedString};
|
||||
pub use self::tokenizer::{Token, TokenFilter, TokenStream, Tokenizer};
|
||||
|
||||
pub use self::tokenizer_manager::TokenizerManager;
|
||||
|
||||
/// Maximum authorized len (in bytes) for a token.
|
||||
|
||||
188
src/tokenizer/tokenized_string.rs
Normal file
188
src/tokenizer/tokenized_string.rs
Normal file
@@ -0,0 +1,188 @@
|
||||
use crate::tokenizer::{Token, TokenStream, TokenStreamChain};
|
||||
use std::cmp::Ordering;
|
||||
|
||||
/// Struct representing tokenized text
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
|
||||
pub struct TokenizedString {
|
||||
/// Original text
|
||||
pub text: String,
|
||||
/// Tokens derived from the text
|
||||
pub tokens: Vec<Token>,
|
||||
}
|
||||
|
||||
impl Ord for TokenizedString {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
self.text.cmp(&other.text)
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for TokenizedString {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
/// TokenStream implementation which wraps TokenizedString
|
||||
pub struct TokenizedStream {
|
||||
tokenized_string: TokenizedString,
|
||||
current_token: i64,
|
||||
}
|
||||
|
||||
impl From<&TokenizedString> for TokenizedStream {
|
||||
fn from(s: &TokenizedString) -> TokenizedStream {
|
||||
TokenizedStream {
|
||||
tokenized_string: s.clone(),
|
||||
current_token: -1,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TokenizedStream {
|
||||
/// Creates a TokenStream from TokenizedString array
|
||||
pub fn chain_tokenized_strings<'a>(
|
||||
tok_strings: &'a [&'a TokenizedString],
|
||||
) -> Box<dyn TokenStream + 'a> {
|
||||
if tok_strings.len() == 1 {
|
||||
Box::new(TokenizedStream::from(tok_strings[0]))
|
||||
} else {
|
||||
let mut offsets = vec![];
|
||||
let mut total_offset = 0;
|
||||
for &tok_string in tok_strings {
|
||||
offsets.push(total_offset);
|
||||
let offset = match tok_string.tokens.last() {
|
||||
Some(token) => token.offset_to,
|
||||
None => 0,
|
||||
};
|
||||
total_offset += offset;
|
||||
}
|
||||
let token_streams: Vec<_> = tok_strings
|
||||
.iter()
|
||||
.map(|tok_string| TokenizedStream::from(*tok_string))
|
||||
.collect();
|
||||
Box::new(TokenStreamChain::new(offsets, token_streams))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TokenStream for TokenizedStream {
|
||||
fn advance(&mut self) -> bool {
|
||||
self.current_token += 1;
|
||||
self.current_token < self.tokenized_string.tokens.len() as i64
|
||||
}
|
||||
|
||||
fn token(&self) -> &Token {
|
||||
if self.current_token < 0 {
|
||||
panic!("TokenStream not initialized. You should call advance() at least once.")
|
||||
}
|
||||
&self.tokenized_string.tokens[self.current_token as usize]
|
||||
}
|
||||
|
||||
fn token_mut(&mut self) -> &mut Token {
|
||||
if self.current_token < 0 {
|
||||
panic!("TokenStream not initialized. You should call advance() at least once.")
|
||||
}
|
||||
&mut self.tokenized_string.tokens[self.current_token as usize]
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
use crate::tokenizer::Token;
|
||||
|
||||
#[test]
|
||||
fn test_tokenized_stream() {
|
||||
let tok_text = TokenizedString {
|
||||
text: String::from("A a"),
|
||||
tokens: vec![
|
||||
Token {
|
||||
offset_from: 0,
|
||||
offset_to: 1,
|
||||
position: 0,
|
||||
text: String::from("A"),
|
||||
position_length: 1,
|
||||
},
|
||||
Token {
|
||||
offset_from: 2,
|
||||
offset_to: 3,
|
||||
position: 1,
|
||||
text: String::from("a"),
|
||||
position_length: 1,
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
let mut tok_stream = TokenizedStream::from(&tok_text);
|
||||
|
||||
let mut i = 0;
|
||||
while tok_stream.advance() {
|
||||
assert!(*tok_stream.token() == tok_text.tokens[i]);
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_chain_tokenized_strings() {
|
||||
let tok_text = TokenizedString {
|
||||
text: String::from("A a"),
|
||||
tokens: vec![
|
||||
Token {
|
||||
offset_from: 0,
|
||||
offset_to: 1,
|
||||
position: 0,
|
||||
text: String::from("A"),
|
||||
position_length: 1,
|
||||
},
|
||||
Token {
|
||||
offset_from: 2,
|
||||
offset_to: 3,
|
||||
position: 1,
|
||||
text: String::from("a"),
|
||||
position_length: 1,
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
let chain_parts = vec![&tok_text, &tok_text];
|
||||
|
||||
let mut tok_stream = TokenizedStream::chain_tokenized_strings(&chain_parts[..]);
|
||||
|
||||
let expected_tokens = vec![
|
||||
Token {
|
||||
offset_from: 0,
|
||||
offset_to: 1,
|
||||
position: 0,
|
||||
text: String::from("A"),
|
||||
position_length: 1,
|
||||
},
|
||||
Token {
|
||||
offset_from: 2,
|
||||
offset_to: 3,
|
||||
position: 1,
|
||||
text: String::from("a"),
|
||||
position_length: 1,
|
||||
},
|
||||
Token {
|
||||
offset_from: 3,
|
||||
offset_to: 4,
|
||||
position: 3,
|
||||
text: String::from("A"),
|
||||
position_length: 1,
|
||||
},
|
||||
Token {
|
||||
offset_from: 5,
|
||||
offset_to: 6,
|
||||
position: 4,
|
||||
text: String::from("a"),
|
||||
position_length: 1,
|
||||
},
|
||||
];
|
||||
let mut i = 0;
|
||||
while tok_stream.advance() {
|
||||
assert!(*tok_stream.token() == expected_tokens[i]);
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -4,7 +4,7 @@ use crate::tokenizer::TokenStreamChain;
|
||||
use std::borrow::{Borrow, BorrowMut};
|
||||
|
||||
/// Token
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
|
||||
pub struct Token {
|
||||
/// Offset (byte index) of the first character of the token.
|
||||
/// Offsets shall not be modified by token filters.
|
||||
|
||||
Reference in New Issue
Block a user