chore: upgrade rust toolchain to latest nightly (#2049)

* chore: upgrade rust toolchain to latest nightly

* rebase develop

* update rust toolchain in ci
This commit is contained in:
LFC
2023-08-08 15:17:51 +08:00
committed by GitHub
parent 7d0d8dc6e3
commit 46fa3eb629
127 changed files with 551 additions and 339 deletions

View File

@@ -346,7 +346,9 @@ impl ChunkReaderBuilder {
/// Build time range predicate from schema and filters.
fn build_time_range_predicate(&self) -> TimestampRange {
let Some(ts_col) = self.schema.user_schema().timestamp_column() else { return TimestampRange::min_to_max() };
let Some(ts_col) = self.schema.user_schema().timestamp_column() else {
return TimestampRange::min_to_max();
};
let unit = ts_col
.data_type
.as_timestamp()
@@ -361,7 +363,9 @@ impl ChunkReaderBuilder {
return true;
}
// end_timestamp of sst file is inclusive.
let Some((start, end)) = *file.time_range() else { return true; };
let Some((start, end)) = *file.time_range() else {
return true;
};
let file_ts_range = TimestampRange::new_inclusive(Some(start), Some(end));
file_ts_range.intersects(predicate)
}

View File

@@ -46,7 +46,9 @@ pub(crate) fn get_expired_ssts(
ttl: Option<Duration>,
now: Timestamp,
) -> Result<Vec<FileHandle>> {
let Some(ttl) = ttl else { return Ok(vec![]); };
let Some(ttl) = ttl else {
return Ok(vec![]);
};
let expire_time = now.sub_duration(ttl).context(TtlCalculationSnafu)?;

View File

@@ -131,8 +131,8 @@ impl<S: LogStore> CompactionTaskImpl<S> {
let edit = RegionEdit {
region_version,
flushed_sequence: None,
files_to_add: Vec::from_iter(output.into_iter()),
files_to_remove: Vec::from_iter(input.into_iter()),
files_to_add: Vec::from_iter(output),
files_to_remove: Vec::from_iter(input),
compaction_time_window: self.compaction_time_window,
};
debug!(

View File

@@ -162,7 +162,10 @@ impl Memtable for BTreeMemtable {
let Some(timestamp_type) = ts_meta.desc.data_type.as_timestamp() else {
// safety: timestamp column always has timestamp type, otherwise it's a bug.
panic!("Timestamp column is not a valid timestamp type: {:?}", self.schema);
panic!(
"Timestamp column is not a valid timestamp type: {:?}",
self.schema
);
};
MemtableStats {
@@ -472,7 +475,9 @@ impl InnerKey {
#[inline]
fn is_in_time_range(&self, range: &Option<TimestampRange>) -> bool {
let Some(range) = range else { return true; };
let Some(range) = range else {
return true;
};
range.contains(
&self
.timestamp()

View File

@@ -63,7 +63,9 @@ where
{
async fn next_batch(&mut self) -> Result<Option<Batch>> {
let _window_scan_elapsed = timer!(crate::metrics::WINDOW_SCAN_ELAPSED);
let Some(mut reader) = self.readers.pop() else { return Ok(None); };
let Some(mut reader) = self.readers.pop() else {
return Ok(None);
};
let store_schema = self.schema.schema_to_read();
let mut batches = vec![];
@@ -78,11 +80,13 @@ where
}
let Some(num_columns) = batches.get(0).map(|b| b.len()) else {
// the reader does not yield data, a batch of empty vectors must be returned instead of
// the reader does not yield data, a batch of empty vectors must be returned instead of
// an empty batch without any column.
let empty_columns = store_schema.columns().iter().map(|s| {
s.desc.data_type.create_mutable_vector(0).to_vector()
}).collect();
let empty_columns = store_schema
.columns()
.iter()
.map(|s| s.desc.data_type.create_mutable_vector(0).to_vector())
.collect();
return Ok(Some(Batch::new(empty_columns)));
};
let mut vectors_in_batch = Vec::with_capacity(num_columns);

View File

@@ -542,7 +542,7 @@ async fn create_store_config(region_name: &str, root: &str) -> StoreConfig<NoopL
.unwrap(),
);
let log_store = Arc::new(NoopLogStore::default());
let log_store = Arc::new(NoopLogStore);
let file_purger = Arc::new(LocalScheduler::new(
SchedulerConfig::default(),

View File

@@ -268,7 +268,13 @@ impl CompactionTester {
MockFilePurgeHandler::default(),
));
let Some(region) = RegionImpl::open(REGION_NAME.to_string(), store_config, &OpenOptions::default()).await? else {
let Some(region) = RegionImpl::open(
REGION_NAME.to_string(),
store_config,
&OpenOptions::default(),
)
.await?
else {
return Ok(false);
};
self.base = Some(FileTesterBase::with_region(region));

View File

@@ -208,7 +208,9 @@ impl LevelMeta {
self.files
.iter()
.filter_map(|(_, v)| {
let Some((_, end)) = v.time_range() else { return None; };
let Some((_, end)) = v.time_range() else {
return None;
};
if end < expire_time {
Some(v.clone())
} else {

View File

@@ -145,7 +145,10 @@ fn decode_timestamp_range(
file_meta: &FileMetaData,
schema: &datatypes::schema::SchemaRef,
) -> Result<Option<(Timestamp, Timestamp)>> {
let (Some(ts_col_idx), Some(ts_col)) = (schema.timestamp_index(), schema.timestamp_column()) else { return Ok(None); };
let (Some(ts_col_idx), Some(ts_col)) = (schema.timestamp_index(), schema.timestamp_column())
else {
return Ok(None);
};
let ts_datatype = &ts_col.data_type;
decode_timestamp_range_inner(file_meta, ts_col_idx, ts_datatype)
}
@@ -176,9 +179,16 @@ fn decode_timestamp_range_inner(
.context(DecodeParquetTimeRangeSnafu {
msg: format!("Cannot find ts column by index: {ts_index}"),
})?
.meta_data else { return Ok(None) };
let Some(stats) = &metadata.statistics else { return Ok(None) };
let (Some(min_value), Some(max_value)) = (&stats.min_value, &stats.max_value) else { return Ok(None); };
.meta_data
else {
return Ok(None);
};
let Some(stats) = &metadata.statistics else {
return Ok(None);
};
let (Some(min_value), Some(max_value)) = (&stats.min_value, &stats.max_value) else {
return Ok(None);
};
// according to [parquet's spec](https://parquet.apache.org/docs/file-format/data-pages/encodings/), min/max value in stats uses plain encoding with little endian.
// also see https://github.com/apache/arrow-rs/blob/5fb337db04a1a19f7d40da46f19b7b5fd4051593/parquet/src/file/statistics.rs#L172