From 03345f0aa222331f2a9bfb7cd4ff4e370991dd98 Mon Sep 17 00:00:00 2001 From: PSeitz Date: Fri, 10 Feb 2023 00:42:32 +0800 Subject: [PATCH] fmt code, update lz4_flex (#1838) formatting on nightly changed --- Cargo.toml | 2 +- src/indexer/segment_writer.rs | 31 +++++++++++++++++-------------- 2 files changed, 18 insertions(+), 15 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 1acc2076c..aa9ef5f57 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,7 +23,7 @@ regex = { version = "1.5.5", default-features = false, features = ["std", "unico aho-corasick = "0.7" tantivy-fst = "0.4.0" memmap2 = { version = "0.5.3", optional = true } -lz4_flex = { version = "0.9.2", default-features = false, features = ["checked-decode"], optional = true } +lz4_flex = { version = "0.10", default-features = false, features = ["checked-decode"], optional = true } brotli = { version = "3.3.4", optional = true } zstd = { version = "0.12", optional = true, default-features = false } snap = { version = "1.0.5", optional = true } diff --git a/src/indexer/segment_writer.rs b/src/indexer/segment_writer.rs index f0471aa15..ecffa3bf8 100644 --- a/src/indexer/segment_writer.rs +++ b/src/indexer/segment_writer.rs @@ -819,20 +819,23 @@ mod tests { // This is a bit of a contrived example. let tokens = PreTokenizedString { text: "contrived-example".to_string(), //< I can't think of a use case where this corner case happens in real life. - tokens: vec![Token { // Not the last token, yet ends after the last token. - offset_from: 0, - offset_to: 14, - position: 0, - text: "long_token".to_string(), - position_length: 3, - }, - Token { - offset_from: 0, - offset_to: 14, - position: 1, - text: "short".to_string(), - position_length: 1, - }], + tokens: vec![ + Token { + // Not the last token, yet ends after the last token. + offset_from: 0, + offset_to: 14, + position: 0, + text: "long_token".to_string(), + position_length: 3, + }, + Token { + offset_from: 0, + offset_to: 14, + position: 1, + text: "short".to_string(), + position_length: 1, + }, + ], }; doc.add_pre_tokenized_text(text, tokens); doc.add_text(text, "hello");