mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2025-12-23 02:29:57 +00:00
clippy (#2700)
* clippy * clippy * clippy * clippy + fmt --------- Co-authored-by: Pascal Seitz <pascal.seitz@datadoghq.com>
This commit is contained in:
@@ -48,7 +48,7 @@ impl BitPacker {
|
||||
|
||||
pub fn flush<TWrite: io::Write + ?Sized>(&mut self, output: &mut TWrite) -> io::Result<()> {
|
||||
if self.mini_buffer_written > 0 {
|
||||
let num_bytes = (self.mini_buffer_written + 7) / 8;
|
||||
let num_bytes = self.mini_buffer_written.div_ceil(8);
|
||||
let bytes = self.mini_buffer.to_le_bytes();
|
||||
output.write_all(&bytes[..num_bytes])?;
|
||||
self.mini_buffer_written = 0;
|
||||
@@ -138,7 +138,7 @@ impl BitUnpacker {
|
||||
|
||||
// We use `usize` here to avoid overflow issues.
|
||||
let end_bit_read = (end_idx as usize) * self.num_bits;
|
||||
let end_byte_read = (end_bit_read + 7) / 8;
|
||||
let end_byte_read = end_bit_read.div_ceil(8);
|
||||
assert!(
|
||||
end_byte_read <= data.len(),
|
||||
"Requested index is out of bounds."
|
||||
|
||||
@@ -140,10 +140,10 @@ impl BlockedBitpacker {
|
||||
pub fn iter(&self) -> impl Iterator<Item = u64> + '_ {
|
||||
// todo performance: we could decompress a whole block and cache it instead
|
||||
let bitpacked_elems = self.offset_and_bits.len() * BLOCK_SIZE;
|
||||
let iter = (0..bitpacked_elems)
|
||||
|
||||
(0..bitpacked_elems)
|
||||
.map(move |idx| self.get(idx))
|
||||
.chain(self.buffer.iter().cloned());
|
||||
iter
|
||||
.chain(self.buffer.iter().cloned())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -105,10 +105,11 @@ fn get_num_values_iterator<'a>(
|
||||
) -> Box<dyn Iterator<Item = u32> + 'a> {
|
||||
match column_index {
|
||||
ColumnIndex::Empty { .. } => Box::new(std::iter::empty()),
|
||||
ColumnIndex::Full => Box::new(std::iter::repeat(1u32).take(num_docs as usize)),
|
||||
ColumnIndex::Optional(optional_index) => {
|
||||
Box::new(std::iter::repeat(1u32).take(optional_index.num_non_nulls() as usize))
|
||||
}
|
||||
ColumnIndex::Full => Box::new(std::iter::repeat_n(1u32, num_docs as usize)),
|
||||
ColumnIndex::Optional(optional_index) => Box::new(std::iter::repeat_n(
|
||||
1u32,
|
||||
optional_index.num_non_nulls() as usize,
|
||||
)),
|
||||
ColumnIndex::Multivalued(multivalued_index) => Box::new(
|
||||
multivalued_index
|
||||
.get_start_index_column()
|
||||
|
||||
@@ -185,11 +185,11 @@ impl CompactSpaceBuilder {
|
||||
let mut covered_space = Vec::with_capacity(self.blanks.len());
|
||||
|
||||
// beginning of the blanks
|
||||
if let Some(first_blank_start) = self.blanks.first().map(RangeInclusive::start) {
|
||||
if *first_blank_start != 0 {
|
||||
if let Some(first_blank_start) = self.blanks.first().map(RangeInclusive::start)
|
||||
&& *first_blank_start != 0
|
||||
{
|
||||
covered_space.push(0..=first_blank_start - 1);
|
||||
}
|
||||
}
|
||||
|
||||
// Between the blanks
|
||||
let between_blanks = self.blanks.iter().tuple_windows().map(|(left, right)| {
|
||||
@@ -202,11 +202,11 @@ impl CompactSpaceBuilder {
|
||||
covered_space.extend(between_blanks);
|
||||
|
||||
// end of the blanks
|
||||
if let Some(last_blank_end) = self.blanks.last().map(RangeInclusive::end) {
|
||||
if *last_blank_end != u128::MAX {
|
||||
if let Some(last_blank_end) = self.blanks.last().map(RangeInclusive::end)
|
||||
&& *last_blank_end != u128::MAX
|
||||
{
|
||||
covered_space.push(last_blank_end + 1..=u128::MAX);
|
||||
}
|
||||
}
|
||||
|
||||
if covered_space.is_empty() {
|
||||
covered_space.push(0..=0); // empty data case
|
||||
|
||||
@@ -105,7 +105,7 @@ impl ColumnCodecEstimator for BitpackedCodecEstimator {
|
||||
|
||||
fn estimate(&self, stats: &ColumnStats) -> Option<u64> {
|
||||
let num_bits_per_value = num_bits(stats);
|
||||
Some(stats.num_bytes() + (stats.num_rows as u64 * (num_bits_per_value as u64) + 7) / 8)
|
||||
Some(stats.num_bytes() + (stats.num_rows as u64 * (num_bits_per_value as u64)).div_ceil(8))
|
||||
}
|
||||
|
||||
fn serialize(
|
||||
|
||||
@@ -117,7 +117,7 @@ impl ColumnCodecEstimator for LinearCodecEstimator {
|
||||
Some(
|
||||
stats.num_bytes()
|
||||
+ linear_params.num_bytes()
|
||||
+ (num_bits as u64 * stats.num_rows as u64 + 7) / 8,
|
||||
+ (num_bits as u64 * stats.num_rows as u64).div_ceil(8),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -244,7 +244,7 @@ impl SymbolValue for UnorderedId {
|
||||
|
||||
fn compute_num_bytes_for_u64(val: u64) -> usize {
|
||||
let msb = (64u32 - val.leading_zeros()) as usize;
|
||||
(msb + 7) / 8
|
||||
msb.div_ceil(8)
|
||||
}
|
||||
|
||||
fn encode_zig_zag(n: i64) -> u64 {
|
||||
|
||||
@@ -183,7 +183,7 @@ pub struct BitSet {
|
||||
}
|
||||
|
||||
fn num_buckets(max_val: u32) -> u32 {
|
||||
(max_val + 63u32) / 64u32
|
||||
max_val.div_ceil(64u32)
|
||||
}
|
||||
|
||||
impl BitSet {
|
||||
|
||||
@@ -305,15 +305,14 @@ fn term_group_infallible(inp: &str) -> JResult<&str, UserInputAst> {
|
||||
let (inp, (field_name, _, _, _)) =
|
||||
tuple((field_name, multispace0, char('('), multispace0))(inp).expect("precondition failed");
|
||||
|
||||
let res = delimited_infallible(
|
||||
delimited_infallible(
|
||||
nothing,
|
||||
map(ast_infallible, |(mut ast, errors)| {
|
||||
ast.set_default_field(field_name.to_string());
|
||||
(ast, errors)
|
||||
}),
|
||||
opt_i_err(char(')'), "expected ')'"),
|
||||
)(inp);
|
||||
res
|
||||
)(inp)
|
||||
}
|
||||
|
||||
fn exists(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
|
||||
@@ -308,10 +308,9 @@ impl<TSSTable: SSTable> Dictionary<TSSTable> {
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("Unsupported sstable version, expected one of [2, 3], found {version}"),
|
||||
));
|
||||
return Err(io::Error::other(format!(
|
||||
"Unsupported sstable version, expected one of [2, 3], found {version}"
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -54,15 +54,15 @@ pub fn merge_sstable<SST: SSTable, W: io::Write, M: ValueMerger<SST::Value>>(
|
||||
}
|
||||
}
|
||||
for _ in 0..len - 1 {
|
||||
if let Some(mut head) = heap.peek_mut() {
|
||||
if head.0.key() == writer.last_inserted_key() {
|
||||
if let Some(mut head) = heap.peek_mut()
|
||||
&& head.0.key() == writer.last_inserted_key()
|
||||
{
|
||||
value_merger.add(head.0.value());
|
||||
if !head.0.advance()? {
|
||||
PeekMut::pop(head);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
let value = value_merger.finish();
|
||||
|
||||
@@ -394,7 +394,7 @@ impl SSTableIndexBuilder {
|
||||
|
||||
fn fst_error_to_io_error(error: tantivy_fst::Error) -> io::Error {
|
||||
match error {
|
||||
tantivy_fst::Error::Fst(fst_error) => io::Error::new(io::ErrorKind::Other, fst_error),
|
||||
tantivy_fst::Error::Fst(fst_error) => io::Error::other(fst_error),
|
||||
tantivy_fst::Error::Io(ioerror) => ioerror,
|
||||
}
|
||||
}
|
||||
@@ -438,7 +438,7 @@ impl BlockAddrBlockMetadata {
|
||||
let ordinal_addr = range_start_addr + self.range_start_nbits as usize;
|
||||
let range_end_addr = range_start_addr + num_bits;
|
||||
|
||||
if (range_end_addr + self.range_start_nbits as usize + 7) / 8 > data.len() {
|
||||
if (range_end_addr + self.range_start_nbits as usize).div_ceil(8) > data.len() {
|
||||
return None;
|
||||
}
|
||||
|
||||
|
||||
@@ -274,8 +274,8 @@ impl SharedArenaHashMap {
|
||||
let kv: KeyValue = self.table[bucket];
|
||||
if kv.is_empty() {
|
||||
return None;
|
||||
} else if kv.hash == hash {
|
||||
if let Some(val_addr) =
|
||||
} else if kv.hash == hash
|
||||
&& let Some(val_addr) =
|
||||
self.get_value_addr_if_key_match(key, kv.key_value_addr, memory_arena)
|
||||
{
|
||||
let v = memory_arena.read(val_addr);
|
||||
@@ -283,7 +283,6 @@ impl SharedArenaHashMap {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// `update` create a new entry for a given key if it does not exist
|
||||
/// or updates the existing entry.
|
||||
@@ -334,8 +333,8 @@ impl SharedArenaHashMap {
|
||||
self.set_bucket(hash, key_addr, bucket);
|
||||
return val;
|
||||
}
|
||||
if kv.hash == hash {
|
||||
if let Some(val_addr) =
|
||||
if kv.hash == hash
|
||||
&& let Some(val_addr) =
|
||||
self.get_value_addr_if_key_match(key, kv.key_value_addr, memory_arena)
|
||||
{
|
||||
let v = memory_arena.read(val_addr);
|
||||
@@ -343,7 +342,6 @@ impl SharedArenaHashMap {
|
||||
memory_arena.write_at(val_addr, new_v);
|
||||
return new_v;
|
||||
}
|
||||
}
|
||||
// This allows fetching the next bucket before the loop jmp
|
||||
bucket = probe.next_probe();
|
||||
kv = self.table[bucket];
|
||||
|
||||
Reference in New Issue
Block a user