fix clippy (#1725)

* fix clippy

* fix clippy fastfield codecs

* fix clippy bitpacker

* fix clippy common

* fix clippy stacker

* fix clippy sstable

* fmt
This commit is contained in:
PSeitz
2022-12-20 07:30:06 +01:00
committed by GitHub
parent a2cf6a79b4
commit f9171a3981
42 changed files with 81 additions and 102 deletions

View File

@@ -25,15 +25,14 @@ impl BitPacker {
num_bits: u8,
output: &mut TWrite,
) -> io::Result<()> {
let val_u64 = val as u64;
let num_bits = num_bits as usize;
if self.mini_buffer_written + num_bits > 64 {
self.mini_buffer |= val_u64.wrapping_shl(self.mini_buffer_written as u32);
self.mini_buffer |= val.wrapping_shl(self.mini_buffer_written as u32);
output.write_all(self.mini_buffer.to_le_bytes().as_ref())?;
self.mini_buffer = val_u64.wrapping_shr((64 - self.mini_buffer_written) as u32);
self.mini_buffer = val.wrapping_shr((64 - self.mini_buffer_written) as u32);
self.mini_buffer_written = self.mini_buffer_written + num_bits - 64;
} else {
self.mini_buffer |= val_u64 << self.mini_buffer_written;
self.mini_buffer |= val << self.mini_buffer_written;
self.mini_buffer_written += num_bits;
if self.mini_buffer_written == 64 {
output.write_all(self.mini_buffer.to_le_bytes().as_ref())?;
@@ -102,7 +101,7 @@ impl BitUnpacker {
.try_into()
.unwrap();
let val_unshifted_unmasked: u64 = u64::from_le_bytes(bytes);
let val_shifted = (val_unshifted_unmasked >> bit_shift) as u64;
let val_shifted = (val_unshifted_unmasked >> bit_shift);
val_shifted & self.mask
}
}

View File

@@ -84,7 +84,7 @@ impl BlockedBitpacker {
#[inline]
pub fn add(&mut self, val: u64) {
self.buffer.push(val);
if self.buffer.len() == BLOCK_SIZE as usize {
if self.buffer.len() == BLOCK_SIZE {
self.flush();
}
}
@@ -126,8 +126,8 @@ impl BlockedBitpacker {
}
#[inline]
pub fn get(&self, idx: usize) -> u64 {
let metadata_pos = idx / BLOCK_SIZE as usize;
let pos_in_block = idx % BLOCK_SIZE as usize;
let metadata_pos = idx / BLOCK_SIZE;
let pos_in_block = idx % BLOCK_SIZE;
if let Some(metadata) = self.offset_and_bits.get(metadata_pos) {
let unpacked = BitUnpacker::new(metadata.num_bits()).get(
pos_in_block as u32,

View File

@@ -151,7 +151,7 @@ impl TinySet {
if self.is_empty() {
None
} else {
let lowest = self.0.trailing_zeros() as u32;
let lowest = self.0.trailing_zeros();
self.0 ^= TinySet::singleton(lowest).0;
Some(lowest)
}
@@ -421,7 +421,7 @@ mod tests {
bitset.serialize(&mut out).unwrap();
let bitset = ReadOnlyBitSet::open(OwnedBytes::new(out));
assert_eq!(bitset.len() as usize, i as usize);
assert_eq!(bitset.len(), i as usize);
}
}
@@ -432,7 +432,7 @@ mod tests {
bitset.serialize(&mut out).unwrap();
let bitset = ReadOnlyBitSet::open(OwnedBytes::new(out));
assert_eq!(bitset.len() as usize, 64);
assert_eq!(bitset.len(), 64);
}
#[test]

View File

@@ -75,7 +75,7 @@ impl FastFieldCodec for BlockwiseLinearCodec {
if column.num_vals() < 10 * CHUNK_SIZE as u32 {
return None;
}
let mut first_chunk: Vec<u64> = column.iter().take(CHUNK_SIZE as usize).collect();
let mut first_chunk: Vec<u64> = column.iter().take(CHUNK_SIZE).collect();
let line = Line::train(&VecColumn::from(&first_chunk));
for (i, buffer_val) in first_chunk.iter_mut().enumerate() {
let interpolated_val = line.eval(i as u32);

View File

@@ -208,7 +208,7 @@ impl CompactSpaceBuilder {
};
let covered_range_len = range_mapping.range_length();
ranges_mapping.push(range_mapping);
compact_start += covered_range_len as u64;
compact_start += covered_range_len;
}
// println!("num ranges {}", ranges_mapping.len());
CompactSpace { ranges_mapping }

View File

@@ -97,7 +97,7 @@ impl BinarySerializable for CompactSpace {
};
let range_length = range_mapping.range_length();
ranges_mapping.push(range_mapping);
compact_start += range_length as u64;
compact_start += range_length;
}
Ok(Self { ranges_mapping })
@@ -407,10 +407,10 @@ impl CompactSpaceDecompressor {
let idx2 = idx + 1;
let idx3 = idx + 2;
let idx4 = idx + 3;
let val1 = get_val(idx1 as u32);
let val2 = get_val(idx2 as u32);
let val3 = get_val(idx3 as u32);
let val4 = get_val(idx4 as u32);
let val1 = get_val(idx1);
let val2 = get_val(idx2);
let val3 = get_val(idx3);
let val4 = get_val(idx4);
push_if_in_range(idx1, val1);
push_if_in_range(idx2, val2);
push_if_in_range(idx3, val3);
@@ -419,14 +419,13 @@ impl CompactSpaceDecompressor {
// handle rest
for idx in cutoff..position_range.end {
push_if_in_range(idx, get_val(idx as u32));
push_if_in_range(idx, get_val(idx));
}
}
#[inline]
fn iter_compact(&self) -> impl Iterator<Item = u64> + '_ {
(0..self.params.num_vals)
.map(move |idx| self.params.bit_unpacker.get(idx, &self.data) as u64)
(0..self.params.num_vals).map(move |idx| self.params.bit_unpacker.get(idx, &self.data))
}
#[inline]
@@ -569,7 +568,7 @@ mod tests {
let decomp = CompactSpaceDecompressor::open(data).unwrap();
let complete_range = 0..vals.len() as u32;
for (pos, val) in vals.iter().enumerate() {
let val = *val as u128;
let val = *val;
let pos = pos as u32;
let mut positions = Vec::new();
decomp.get_positions_for_value_range(val..=val, pos..pos + 1, &mut positions);
@@ -666,7 +665,7 @@ mod tests {
get_positions_for_value_range_helper(
&decomp,
4_000_211_221u128..=5_000_000_000u128,
complete_range.clone()
complete_range
),
vec![6, 7]
);
@@ -703,7 +702,7 @@ mod tests {
vec![0]
);
assert_eq!(
get_positions_for_value_range_helper(&decomp, 0..=105, complete_range.clone()),
get_positions_for_value_range_helper(&decomp, 0..=105, complete_range),
vec![0]
);
}
@@ -756,11 +755,7 @@ mod tests {
);
assert_eq!(
get_positions_for_value_range_helper(
&*decomp,
1_000_000..=1_000_000,
complete_range.clone()
),
get_positions_for_value_range_helper(&*decomp, 1_000_000..=1_000_000, complete_range),
vec![11]
);
}

View File

@@ -401,7 +401,7 @@ impl SegmentHistogramCollector {
debug_assert_eq!(
self.buckets[bucket_pos].key,
get_bucket_val(val, self.interval, self.offset) as f64
get_bucket_val(val, self.interval, self.offset)
);
self.increment_bucket(bucket_pos, doc, &bucket_with_accessor.sub_aggregation)?;
}
@@ -428,7 +428,7 @@ impl SegmentHistogramCollector {
if bounds.contains(val) {
debug_assert_eq!(
self.buckets[bucket_pos].key,
get_bucket_val(val, self.interval, self.offset) as f64
get_bucket_val(val, self.interval, self.offset)
);
self.increment_bucket(bucket_pos, doc, bucket_with_accessor)?;

View File

@@ -282,8 +282,8 @@ impl IntermediateBucketResult {
IntermediateBucketResult::Range(range_res) => {
let mut buckets: Vec<RangeBucketEntry> = range_res
.buckets
.into_iter()
.map(|(_, bucket)| {
.into_values()
.map(|bucket| {
bucket.into_final_bucket_entry(
&req.sub_aggregation,
schema,

View File

@@ -451,9 +451,9 @@ mod tests {
text_field_id => term.to_string(),
string_field_id => term.to_string(),
score_field => i as u64,
score_field_f64 => i as f64,
score_field_f64 => i,
score_field_i64 => i as i64,
fraction_field => i as f64/100.0,
fraction_field => i/100.0,
))?;
}
index_writer.commit()?;

View File

@@ -305,7 +305,7 @@ impl BucketCount {
}
pub(crate) fn add_count(&self, count: u32) {
self.bucket_count
.fetch_add(count as u32, std::sync::atomic::Ordering::Relaxed);
.fetch_add(count, std::sync::atomic::Ordering::Relaxed);
}
pub(crate) fn get_count(&self) -> u32 {
self.bucket_count.load(std::sync::atomic::Ordering::Relaxed)

View File

@@ -357,7 +357,7 @@ impl SegmentCollector for FacetSegmentCollector {
let mut facet = vec![];
let facet_ord = self.collapse_facet_ords[collapsed_facet_ord];
// TODO handle errors.
if facet_dict.ord_to_term(facet_ord as u64, &mut facet).is_ok() {
if facet_dict.ord_to_term(facet_ord, &mut facet).is_ok() {
if let Ok(facet) = Facet::from_encoded(facet) {
facet_counts.insert(facet, count);
}

View File

@@ -170,7 +170,7 @@ pub trait Collector: Sync + Send {
segment_ord: u32,
reader: &SegmentReader,
) -> crate::Result<<Self::Child as SegmentCollector>::Fruit> {
let mut segment_collector = self.for_segment(segment_ord as u32, reader)?;
let mut segment_collector = self.for_segment(segment_ord, reader)?;
match (reader.alive_bitset(), self.requires_scoring()) {
(Some(alive_bitset), true) => {

View File

@@ -813,7 +813,7 @@ mod tests {
let field = schema.get_field("num_likes").unwrap();
let tempdir = TempDir::new().unwrap();
let tempdir_path = PathBuf::from(tempdir.path());
let index = Index::create_in_dir(&tempdir_path, schema).unwrap();
let index = Index::create_in_dir(tempdir_path, schema).unwrap();
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::OnCommit)

View File

@@ -75,7 +75,7 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
let mut prev_offset = 0;
for (file_addr, offset) in self.offsets {
VInt((offset - prev_offset) as u64).serialize(&mut self.write)?;
VInt(offset - prev_offset).serialize(&mut self.write)?;
file_addr.serialize(&mut self.write)?;
prev_offset = offset;
}

View File

@@ -38,7 +38,7 @@ impl Footer {
counting_write.write_all(serde_json::to_string(&self)?.as_ref())?;
let footer_payload_len = counting_write.written_bytes();
BinarySerializable::serialize(&(footer_payload_len as u32), write)?;
BinarySerializable::serialize(&(FOOTER_MAGIC_NUMBER as u32), write)?;
BinarySerializable::serialize(&FOOTER_MAGIC_NUMBER, write)?;
Ok(())
}
@@ -90,9 +90,10 @@ impl Footer {
));
}
let footer: Footer = serde_json::from_slice(&file.read_bytes_slice(
file.len() - total_footer_size..file.len() - footer_metadata_len as usize,
)?)?;
let footer: Footer =
serde_json::from_slice(&file.read_bytes_slice(
file.len() - total_footer_size..file.len() - footer_metadata_len,
)?)?;
let body = file.slice_to(file.len() - total_footer_size);
Ok((footer, body))

View File

@@ -388,7 +388,7 @@ mod tests_mmap_specific {
let tempdir_path = PathBuf::from(tempdir.path());
let living_files = HashSet::new();
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mmap_directory = MmapDirectory::open(tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::wrap(Box::new(mmap_directory)).unwrap();
let mut write = managed_directory.open_write(test_path1).unwrap();
write.write_all(&[0u8, 1u8]).unwrap();

View File

@@ -341,7 +341,7 @@ impl Directory for MmapDirectory {
/// removed before the file is deleted.
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
let full_path = self.resolve_path(path);
fs::remove_file(&full_path).map_err(|e| {
fs::remove_file(full_path).map_err(|e| {
if e.kind() == io::ErrorKind::NotFound {
DeleteError::FileDoesNotExist(path.to_owned())
} else {
@@ -395,7 +395,7 @@ impl Directory for MmapDirectory {
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
let full_path = self.resolve_path(path);
let mut buffer = Vec::new();
match File::open(&full_path) {
match File::open(full_path) {
Ok(mut file) => {
file.read_to_end(&mut buffer).map_err(|io_error| {
OpenReadError::wrap_io_error(io_error, path.to_path_buf())
@@ -425,7 +425,7 @@ impl Directory for MmapDirectory {
let file: File = OpenOptions::new()
.write(true)
.create(true) //< if the file does not exist yet, create it.
.open(&full_path)
.open(full_path)
.map_err(LockError::wrap_io_error)?;
if lock.is_blocking {
file.lock_exclusive().map_err(LockError::wrap_io_error)?;

View File

@@ -64,9 +64,7 @@ impl FacetReader {
facet_ord: TermOrdinal,
output: &mut Facet,
) -> crate::Result<()> {
let found_term = self
.term_dict
.ord_to_term(facet_ord as u64, &mut self.buffer)?;
let found_term = self.term_dict.ord_to_term(facet_ord, &mut self.buffer)?;
assert!(found_term, "Term ordinal {} no found.", facet_ord);
let facet_str = str::from_utf8(&self.buffer[..])
.map_err(|utf8_err| DataCorruption::comment_only(utf8_err.to_string()))?;

View File

@@ -473,7 +473,7 @@ mod tests {
let fast_field_reader = open::<u64>(data)?;
for a in 0..n {
assert_eq!(fast_field_reader.get_val(a as u32), permutation[a as usize]);
assert_eq!(fast_field_reader.get_val(a as u32), permutation[a]);
}
}
Ok(())

View File

@@ -264,7 +264,7 @@ fn iter_remapped_multivalue_index<'a, C: Column>(
std::iter::once(0).chain(doc_id_map.iter_old_doc_ids().map(move |old_doc| {
let num_vals_for_doc = column.get_val(old_doc + 1) - column.get_val(old_doc);
offset += num_vals_for_doc;
offset as u64
offset
}))
}

View File

@@ -360,20 +360,10 @@ impl U128FastFieldWriter {
.map(|idx| self.vals[idx as usize])
};
serializer.create_u128_fast_field_with_idx(
self.field,
iter_gen,
self.val_count as u32,
0,
)?;
serializer.create_u128_fast_field_with_idx(self.field, iter_gen, self.val_count, 0)?;
} else {
let iter_gen = || self.vals.iter().cloned();
serializer.create_u128_fast_field_with_idx(
self.field,
iter_gen,
self.val_count as u32,
0,
)?;
serializer.create_u128_fast_field_with_idx(self.field, iter_gen, self.val_count, 0)?;
}
Ok(())

View File

@@ -252,8 +252,8 @@ mod tests {
&demux_mapping,
target_settings,
vec![
Box::new(RamDirectory::default()),
Box::new(RamDirectory::default()),
Box::<RamDirectory>::default(),
Box::<RamDirectory>::default(),
],
)?;

View File

@@ -152,7 +152,7 @@ pub(crate) fn advance_deletes(
let num_deleted_docs = max_doc - num_alive_docs;
if num_deleted_docs > num_deleted_docs_before {
// There are new deletes. We need to write a new delete file.
segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp);
segment = segment.with_delete_meta(num_deleted_docs, target_opstamp);
let mut alive_doc_file = segment.open_write(SegmentComponent::Delete)?;
write_alive_bitset(&alive_bitset, &mut alive_doc_file)?;
alive_doc_file.terminate()?;
@@ -984,7 +984,7 @@ mod tests {
"LogMergePolicy { min_num_segments: 8, max_docs_before_merge: 10000000, \
min_layer_size: 10000, level_log_size: 0.75, del_docs_ratio_before_merge: 1.0 }"
);
let merge_policy = Box::new(NoMergePolicy::default());
let merge_policy = Box::<NoMergePolicy>::default();
index_writer.set_merge_policy(merge_policy);
assert_eq!(
format!("{:?}", index_writer.get_merge_policy()),
@@ -1813,8 +1813,8 @@ mod tests {
}
let num_docs_expected = expected_ids_and_num_occurrences
.iter()
.map(|(_, id_occurrences)| *id_occurrences as usize)
.values()
.map(|id_occurrences| *id_occurrences as usize)
.sum::<usize>();
assert_eq!(searcher.num_docs() as usize, num_docs_expected);
assert_eq!(old_searcher.num_docs() as usize, num_docs_expected);

View File

@@ -366,7 +366,7 @@ impl IndexMerger {
.map(|doc| reader.num_vals(doc))
.sum()
} else {
reader.total_num_vals() as u32
reader.total_num_vals()
}
})
.sum();
@@ -968,7 +968,7 @@ impl IndexMerger {
let doc_bytes = doc_bytes_res?;
store_writer.store_bytes(&doc_bytes)?;
} else {
return Err(DataCorruption::comment_only(&format!(
return Err(DataCorruption::comment_only(format!(
"unexpected missing document in docstore on merge, doc address \
{old_doc_addr:?}",
))

View File

@@ -866,7 +866,7 @@ mod tests {
}
assert_eq!(indices.len(), 3);
let output_directory: Box<dyn Directory> = Box::new(RamDirectory::default());
let output_directory: Box<dyn Directory> = Box::<RamDirectory>::default();
let index = merge_indices(&indices, output_directory)?;
assert_eq!(index.schema(), schema);

View File

@@ -16,11 +16,11 @@ mod atomic_impl {
impl AtomicU64Wrapper {
pub fn new(first_opstamp: Opstamp) -> AtomicU64Wrapper {
AtomicU64Wrapper(AtomicU64::new(first_opstamp as u64))
AtomicU64Wrapper(AtomicU64::new(first_opstamp))
}
pub fn fetch_add(&self, val: u64, order: Ordering) -> u64 {
self.0.fetch_add(val as u64, order) as u64
self.0.fetch_add(val, order) as u64
}
pub fn revert(&self, val: u64, order: Ordering) -> u64 {
@@ -77,7 +77,7 @@ impl Stamper {
}
pub fn stamp(&self) -> Opstamp {
self.0.fetch_add(1u64, Ordering::SeqCst) as u64
self.0.fetch_add(1u64, Ordering::SeqCst)
}
/// Given a desired count `n`, `stamps` returns an iterator that

View File

@@ -177,7 +177,7 @@ impl DateTime {
/// The given date/time is converted to UTC and the actual
/// time zone is discarded.
pub const fn from_utc(dt: OffsetDateTime) -> Self {
let timestamp_micros = dt.unix_timestamp() as i64 * 1_000_000 + dt.microsecond() as i64;
let timestamp_micros = dt.unix_timestamp() * 1_000_000 + dt.microsecond() as i64;
Self { timestamp_micros }
}

View File

@@ -71,7 +71,7 @@ impl PositionReader {
.map(|num_bits| num_bits as usize)
.sum();
let num_bytes_to_skip = num_bits * COMPRESSION_BLOCK_SIZE / 8;
self.bit_widths.advance(num_blocks as usize);
self.bit_widths.advance(num_blocks);
self.positions.advance(num_bytes_to_skip);
self.anchor_offset += (num_blocks * COMPRESSION_BLOCK_SIZE) as u64;
}

View File

@@ -51,7 +51,7 @@ fn posting_writer_from_field_entry(field_entry: &FieldEntry) -> Box<dyn Postings
| FieldType::Date(_)
| FieldType::Bytes(_)
| FieldType::IpAddr(_)
| FieldType::Facet(_) => Box::new(SpecializedPostingsWriter::<DocIdRecorder>::default()),
| FieldType::Facet(_) => Box::<SpecializedPostingsWriter<DocIdRecorder>>::default(),
FieldType::JsonObject(ref json_object_options) => {
if let Some(text_indexing_option) = json_object_options.get_text_indexing_options() {
match text_indexing_option.index_option() {

View File

@@ -465,7 +465,7 @@ impl<W: Write> PostingsSerializer<W> {
/// When called after writing the postings of a term, this value is used as a
/// end offset.
fn written_bytes(&self) -> u64 {
self.output_write.written_bytes() as u64
self.output_write.written_bytes()
}
fn clear(&mut self) {

View File

@@ -47,7 +47,7 @@ impl From<BitSet> for BitSetDocSet {
impl DocSet for BitSetDocSet {
fn advance(&mut self) -> DocId {
if let Some(lower) = self.cursor_tinybitset.pop_lowest() {
self.doc = (self.cursor_bucket as u32 * 64u32) | lower;
self.doc = (self.cursor_bucket * 64u32) | lower;
return self.doc;
}
if let Some(cursor_bucket) = self.docs.first_non_empty_bucket(self.cursor_bucket + 1) {

View File

@@ -126,7 +126,7 @@ impl VecCursor {
}
#[inline]
fn current(&self) -> Option<u32> {
self.docs.get(self.current_pos).map(|el| *el as u32)
self.docs.get(self.current_pos).map(|el| *el)
}
fn get_cleared_data(&mut self) -> &mut Vec<u32> {
self.docs.clear();
@@ -268,9 +268,9 @@ impl DocSet for IpRangeDocSet {
#[inline]
fn advance(&mut self) -> DocId {
if let Some(docid) = self.loaded_docs.next() {
docid as u32
docid
} else {
if self.next_fetch_start >= self.ip_addr_fast_field.num_docs() as u32 {
if self.next_fetch_start >= self.ip_addr_fast_field.num_docs() {
return TERMINATED;
}
self.fetch_block();
@@ -282,7 +282,7 @@ impl DocSet for IpRangeDocSet {
fn doc(&self) -> DocId {
self.loaded_docs
.current()
.map(|el| el as u32)
.map(|el| el)
.unwrap_or(TERMINATED)
}

View File

@@ -43,7 +43,7 @@ fn refill<TScorer: Scorer, TScoreCombiner: ScoreCombiner>(
min_doc: DocId,
) {
unordered_drain_filter(scorers, |scorer| {
let horizon = min_doc + HORIZON as u32;
let horizon = min_doc + HORIZON;
loop {
let doc = scorer.doc();
if doc >= horizon {

View File

@@ -236,7 +236,7 @@ mod tests {
)
.unwrap();
let date_options_json = serde_json::to_value(&date_options).unwrap();
let date_options_json = serde_json::to_value(date_options).unwrap();
assert_eq!(
date_options_json,
serde_json::json!({

View File

@@ -193,8 +193,8 @@ mod tests {
(0..max_len)
.prop_flat_map(move |len: usize| {
(
proptest::collection::vec(1usize..20, len as usize).prop_map(integrate_delta),
proptest::collection::vec(1usize..26, len as usize).prop_map(integrate_delta),
proptest::collection::vec(1usize..20, len).prop_map(integrate_delta),
proptest::collection::vec(1usize..26, len).prop_map(integrate_delta),
)
.prop_map(|(docs, offsets)| {
(0..docs.len() - 1)

View File

@@ -66,11 +66,7 @@ impl BlockCache {
#[cfg(test)]
fn peek_lru(&self) -> Option<usize> {
self.cache
.lock()
.unwrap()
.peek_lru()
.map(|(&k, _)| k as usize)
self.cache.lock().unwrap().peek_lru().map(|(&k, _)| k)
}
}

View File

@@ -142,7 +142,7 @@ impl BlockCompressorImpl {
}
fn close(mut self) -> io::Result<()> {
let header_offset: u64 = self.writer.written_bytes() as u64;
let header_offset: u64 = self.writer.written_bytes();
let docstore_footer =
DocStoreFooter::new(header_offset, Decompressor::from(self.compressor));
self.offset_index_writer.serialize_into(&mut self.writer)?;

View File

@@ -69,7 +69,7 @@ impl TermInfoBlockMeta {
let posting_end_addr = posting_start_addr + num_bits;
let positions_start_addr = posting_start_addr + self.postings_offset_nbits as usize;
// the position_end is the positions_start of the next term info.
let positions_end_addr = positions_start_addr + num_bits as usize;
let positions_end_addr = positions_start_addr + num_bits;
let doc_freq_addr = positions_start_addr + self.positions_offset_nbits as usize;

View File

@@ -80,7 +80,7 @@ where W: Write
self.term_info_store_writer
.serialize(&mut counting_writer)?;
let footer_size = counting_writer.written_bytes();
(footer_size as u64).serialize(&mut counting_writer)?;
footer_size.serialize(&mut counting_writer)?;
}
Ok(file)
}

View File

@@ -133,7 +133,7 @@ mod tests {
super::find_shorter_str_in_between(&mut left_buf, right);
assert!(left_buf.len() <= left.len());
assert!(left <= &left_buf);
assert!(&left_buf[..] < &right);
assert!(&left_buf[..] < right);
}
#[test]

View File

@@ -85,7 +85,7 @@ impl ValueReader for U64MonotonicReader {
self.vals.clear();
let mut prev_val = 0u64;
for _ in 0..len {
let delta = reader.deserialize_u64() as u64;
let delta = reader.deserialize_u64();
let val = prev_val + delta;
self.vals.push(val);
prev_val = val;

View File

@@ -62,7 +62,7 @@ fn len_to_capacity(len: u32) -> CapacityResult {
pub struct ExpUnrolledLinkedList {
len: u32,
tail: Addr,
inlined_data: [u8; INLINED_BLOCK_LEN as usize],
inlined_data: [u8; INLINED_BLOCK_LEN],
}
pub struct ExpUnrolledLinkedListWriter<'a> {
@@ -125,7 +125,7 @@ impl Default for ExpUnrolledLinkedList {
ExpUnrolledLinkedList {
len: 0u32,
tail: Addr::null_pointer(),
inlined_data: [0u8; INLINED_BLOCK_LEN as usize],
inlined_data: [0u8; INLINED_BLOCK_LEN],
}
}
}