chore: update toolchain to 20231219 (#2932)

* update toolchain file, remove unused feature gates

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix clippy

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix format

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update action file

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* update to 12-19

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

---------

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
This commit is contained in:
Ruihang Xia
2023-12-19 15:24:08 +08:00
committed by GitHub
parent 3e6a564f8e
commit 6b8dbcfb54
38 changed files with 82 additions and 66 deletions

View File

@@ -13,7 +13,7 @@ on:
name: Build API docs
env:
RUST_TOOLCHAIN: nightly-2023-10-21
RUST_TOOLCHAIN: nightly-2023-12-19
jobs:
apidoc:

View File

@@ -29,7 +29,7 @@ concurrency:
cancel-in-progress: true
env:
RUST_TOOLCHAIN: nightly-2023-10-21
RUST_TOOLCHAIN: nightly-2023-12-19
jobs:
typos:

View File

@@ -12,7 +12,7 @@ concurrency:
cancel-in-progress: true
env:
RUST_TOOLCHAIN: nightly-2023-10-21
RUST_TOOLCHAIN: nightly-2023-12-19
jobs:
sqlness:

View File

@@ -82,7 +82,7 @@ on:
# Use env variables to control all the release process.
env:
# The arguments of building greptime.
RUST_TOOLCHAIN: nightly-2023-10-21
RUST_TOOLCHAIN: nightly-2023-12-19
CARGO_PROFILE: nightly
# Controls whether to run tests, include unit-test, integration-test and sqlness.

View File

@@ -1,2 +1,2 @@
[toolchain]
channel = "nightly-2023-10-21"
channel = "nightly-2023-12-19"

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(trait_upcasting)]
#![feature(assert_matches)]
#![feature(try_blocks)]

View File

@@ -549,7 +549,7 @@ mod tests {
let batch_get: BatchGet = req.try_into().unwrap();
let keys = batch_get.keys;
assert_eq!(b"k1".to_vec(), keys.get(0).unwrap().clone());
assert_eq!(b"k1".to_vec(), keys.first().unwrap().clone());
assert_eq!(b"k2".to_vec(), keys.get(1).unwrap().clone());
assert_eq!(b"k3".to_vec(), keys.get(2).unwrap().clone());
}
@@ -566,7 +566,7 @@ mod tests {
let batch_put: BatchPut = req.try_into().unwrap();
let kv = batch_put.kvs.get(0).unwrap();
let kv = batch_put.kvs.first().unwrap();
assert_eq!(b"test_key", kv.key());
assert_eq!(b"test_value", kv.value());
let _ = batch_put.options.unwrap();
@@ -582,7 +582,7 @@ mod tests {
let batch_delete: BatchDelete = req.try_into().unwrap();
assert_eq!(batch_delete.keys.len(), 3);
assert_eq!(b"k1".to_vec(), batch_delete.keys.get(0).unwrap().clone());
assert_eq!(b"k1".to_vec(), batch_delete.keys.first().unwrap().clone());
assert_eq!(b"k2".to_vec(), batch_delete.keys.get(1).unwrap().clone());
assert_eq!(b"k3".to_vec(), batch_delete.keys.get(2).unwrap().clone());
let _ = batch_delete.options.unwrap();

View File

@@ -916,7 +916,7 @@ mod tests {
let into_req: PbBatchGetRequest = req.into();
assert!(into_req.header.is_none());
assert_eq!(b"test_key1".as_slice(), into_req.keys.get(0).unwrap());
assert_eq!(b"test_key1".as_slice(), into_req.keys.first().unwrap());
assert_eq!(b"test_key2".as_slice(), into_req.keys.get(1).unwrap());
assert_eq!(b"test_key3".as_slice(), into_req.keys.get(2).unwrap());
}
@@ -946,10 +946,10 @@ mod tests {
let into_req: PbBatchPutRequest = req.into();
assert!(into_req.header.is_none());
assert_eq!(b"test_key1".to_vec(), into_req.kvs.get(0).unwrap().key);
assert_eq!(b"test_key1".to_vec(), into_req.kvs.first().unwrap().key);
assert_eq!(b"test_key2".to_vec(), into_req.kvs.get(1).unwrap().key);
assert_eq!(b"test_key3".to_vec(), into_req.kvs.get(2).unwrap().key);
assert_eq!(b"test_value1".to_vec(), into_req.kvs.get(0).unwrap().value);
assert_eq!(b"test_value1".to_vec(), into_req.kvs.first().unwrap().value);
assert_eq!(b"test_value2".to_vec(), into_req.kvs.get(1).unwrap().value);
assert_eq!(b"test_value3".to_vec(), into_req.kvs.get(2).unwrap().value);
assert!(into_req.prev_kv);
@@ -981,7 +981,7 @@ mod tests {
let into_req: PbBatchDeleteRequest = req.into();
assert!(into_req.header.is_none());
assert_eq!(&b"test_key1".to_vec(), into_req.keys.get(0).unwrap());
assert_eq!(&b"test_key1".to_vec(), into_req.keys.first().unwrap());
assert_eq!(&b"test_key2".to_vec(), into_req.keys.get(1).unwrap());
assert_eq!(&b"test_key3".to_vec(), into_req.keys.get(2).unwrap());
assert!(into_req.prev_kv);

View File

@@ -13,7 +13,6 @@
// limitations under the License.
#![feature(let_chains)]
#![feature(trait_upcasting)]
mod df_substrait;
pub mod error;

View File

@@ -13,7 +13,6 @@
// limitations under the License.
#![feature(assert_matches)]
#![feature(trait_upcasting)]
pub mod alive_keeper;
pub mod config;

View File

@@ -13,7 +13,6 @@
// limitations under the License.
pub use crate::data_type::{ConcreteDataType, DataType, DataTypeRef};
pub use crate::macros::*;
pub use crate::scalars::{Scalar, ScalarRef, ScalarVector, ScalarVectorBuilder};
pub use crate::type_id::LogicalTypeId;
pub use crate::types::{LogicalPrimitiveType, WrapperType};

View File

@@ -144,7 +144,9 @@ impl Schema {
let mut column_schemas = Vec::with_capacity(indices.len());
let mut timestamp_index = None;
for index in indices {
if let Some(ts_index) = self.timestamp_index && ts_index == *index {
if let Some(ts_index) = self.timestamp_index
&& ts_index == *index
{
timestamp_index = Some(column_schemas.len());
}
column_schemas.push(self.column_schemas[*index].clone());

View File

@@ -13,7 +13,6 @@
// limitations under the License.
#![feature(assert_matches)]
#![feature(result_option_inspect)]
pub mod config;
pub mod engine;

View File

@@ -13,7 +13,6 @@
// limitations under the License.
#![feature(assert_matches)]
#![feature(trait_upcasting)]
pub mod error;
pub mod frontend;

View File

@@ -308,7 +308,7 @@ mod tests {
let _ = kv_map.get(&stat_key).unwrap();
let stat_val = kv_map.get(&stat_key).unwrap();
let stat = stat_val.stats.get(0).unwrap();
let stat = stat_val.stats.first().unwrap();
assert_eq!(0, stat.cluster_id);
assert_eq!(100, stat.id);

View File

@@ -190,7 +190,7 @@ mod tests {
cluster_id: 3,
node_id: 101,
};
let key: Vec<u8> = key.try_into().unwrap();
let key: Vec<u8> = key.into();
let res = ctx.in_memory.get(&key).await.unwrap();
let kv = res.unwrap();
let key: StatKey = kv.key.clone().try_into().unwrap();
@@ -203,7 +203,7 @@ mod tests {
handle_request_many_times(ctx.clone(), &handler, 10).await;
let key: Vec<u8> = key.try_into().unwrap();
let key: Vec<u8> = key.into();
let res = ctx.in_memory.get(&key).await.unwrap();
let kv = res.unwrap();
let val: StatValue = kv.value.try_into().unwrap();

View File

@@ -314,7 +314,7 @@ mod tests {
node_id: 1,
};
let key_bytes: Vec<u8> = key.try_into().unwrap();
let key_bytes: Vec<u8> = key.into();
let new_key: StatKey = key_bytes.try_into().unwrap();
assert_eq!(0, new_key.cluster_id);
@@ -338,7 +338,7 @@ mod tests {
assert_eq!(1, stats.len());
let stat = stats.get(0).unwrap();
let stat = stats.first().unwrap();
assert_eq!(0, stat.cluster_id);
assert_eq!(101, stat.id);
assert_eq!(100, stat.region_num);
@@ -452,7 +452,7 @@ mod tests {
region_id: 2,
};
let key_bytes: Vec<u8> = key.try_into().unwrap();
let key_bytes: Vec<u8> = key.into();
let new_key: InactiveRegionKey = key_bytes.try_into().unwrap();
assert_eq!(new_key, key);

View File

@@ -128,13 +128,13 @@ mod tests {
let mut stat_vals = vec![stat_value1, stat_value2];
stat_vals = filter_by_addr(stat_vals, "127.0.0.1:3002");
assert_eq!(stat_vals.len(), 1);
assert_eq!(stat_vals.get(0).unwrap().stats.len(), 3);
assert_eq!(stat_vals.first().unwrap().stats.len(), 3);
assert_eq!(
stat_vals
.get(0)
.first()
.unwrap()
.stats
.get(0)
.first()
.unwrap()
.timestamp_millis,
3

View File

@@ -83,7 +83,9 @@ impl TwcsPicker {
) -> Vec<CompactionOutput> {
let mut output = vec![];
for (window, files) in time_windows {
if let Some(active_window) = active_window && *window == active_window {
if let Some(active_window) = active_window
&& *window == active_window
{
if files.len() > self.max_active_window_files {
output.push(CompactionOutput {
output_file_id: FileId::random(),
@@ -102,7 +104,11 @@ impl TwcsPicker {
inputs: files.clone(),
});
} else {
debug!("No enough files, current: {}, max_inactive_window_files: {}", files.len(), self.max_inactive_window_files)
debug!(
"No enough files, current: {}, max_inactive_window_files: {}",
files.len(),
self.max_inactive_window_files
)
}
}
}
@@ -207,7 +213,9 @@ fn find_latest_window_in_seconds<'a>(
let mut latest_timestamp = None;
for f in files {
let (_, end) = f.time_range();
if let Some(latest) = latest_timestamp && end > latest {
if let Some(latest) = latest_timestamp
&& end > latest
{
latest_timestamp = Some(end);
} else {
latest_timestamp = Some(end);
@@ -542,11 +550,17 @@ mod tests {
.iter(),
3,
);
assert_eq!(files[0], windows.get(&0).unwrap().get(0).unwrap().file_id());
assert_eq!(files[1], windows.get(&3).unwrap().get(0).unwrap().file_id());
assert_eq!(
files[0],
windows.get(&0).unwrap().first().unwrap().file_id()
);
assert_eq!(
files[1],
windows.get(&3).unwrap().first().unwrap().file_id()
);
assert_eq!(
files[2],
windows.get(&12).unwrap().get(0).unwrap().file_id()
windows.get(&12).unwrap().first().unwrap().file_id()
);
}

View File

@@ -1041,7 +1041,7 @@ mod tests {
v0.extend(
values
.fields
.get(0)
.first()
.unwrap()
.as_any()
.downcast_ref::<Int64Vector>()
@@ -1125,7 +1125,7 @@ mod tests {
assert_eq!(1, batch.fields().len());
let v0 = batch
.fields()
.get(0)
.first()
.unwrap()
.data
.as_any()

View File

@@ -119,7 +119,7 @@ impl WriteRequest {
let row_size = self
.rows
.rows
.get(0)
.first()
.map(|row| row.encoded_len())
.unwrap_or(0);
row_size * self.rows.rows.len()

View File

@@ -74,7 +74,7 @@ async fn test_object_list(store: &ObjectStore) -> Result<()> {
// Only o2 is exists
let entries = store.list("/").await?;
assert_eq!(1, entries.len());
assert_eq!(p2, entries.get(0).unwrap().path());
assert_eq!(p2, entries.first().unwrap().path());
let content = store.read(p2).await?;
assert_eq!("Hello, object2!", String::from_utf8(content)?);

View File

@@ -202,7 +202,7 @@ mod tests {
}
fn find_region(&self, values: &[Value]) -> Result<RegionNumber> {
let val = values.get(0).unwrap().clone();
let val = values.first().unwrap().clone();
let val = match val {
Value::String(v) => v.as_utf8().to_string(),
_ => unreachable!(),
@@ -229,7 +229,7 @@ mod tests {
}
fn find_region(&self, values: &[Value]) -> Result<RegionNumber> {
let val = values.get(0).unwrap().clone();
let val = values.first().unwrap().clone();
let val = match val {
Value::Null => 1,
_ => 0,

View File

@@ -624,7 +624,9 @@ impl HistogramFoldStream {
))
})?;
for (i, v) in le_as_f64_array.iter().enumerate() {
if let Some(v) = v && v == f64::INFINITY {
if let Some(v) = v
&& v == f64::INFINITY
{
return Ok(i);
}
}

View File

@@ -360,7 +360,9 @@ impl InstantManipulateStream {
let curr = ts_column.value(cursor);
match curr.cmp(&expected_ts) {
Ordering::Equal => {
if let Some(field_column) = &field_column && field_column.value(cursor).is_nan() {
if let Some(field_column) = &field_column
&& field_column.value(cursor).is_nan()
{
// ignore the NaN value
} else {
take_indices.push(cursor as u64);
@@ -393,7 +395,8 @@ impl InstantManipulateStream {
if prev_ts + self.lookback_delta >= expected_ts {
// only use the point in the time range
if let Some(field_column) = &field_column
&& field_column.value(prev_cursor).is_nan() {
&& field_column.value(prev_cursor).is_nan()
{
// if the newest value is NaN, it means the value is stale, so we should not use it
continue;
}
@@ -402,7 +405,9 @@ impl InstantManipulateStream {
aligned_ts.push(expected_ts);
}
}
} else if let Some(field_column) = &field_column && field_column.value(cursor).is_nan() {
} else if let Some(field_column) = &field_column
&& field_column.value(cursor).is_nan()
{
// if the newest value is NaN, it means the value is stale, so we should not use it
} else {
// use this point

View File

@@ -419,7 +419,7 @@ impl PromPlanner {
.time_index_column
.clone()
.expect("time index should be set in `setup_context`"),
self.ctx.field_columns.get(0).cloned(),
self.ctx.field_columns.first().cloned(),
normalize,
);
LogicalPlan::Extension(Extension {

View File

@@ -158,17 +158,19 @@ impl PlanRewriter {
}
Commutativity::ConditionalCommutative(transformer) => {
if let Some(transformer) = transformer
&& let Some(plan) = transformer(plan) {
&& let Some(plan) = transformer(plan)
{
self.stage.push(plan)
}
},
}
Commutativity::TransformedCommutative(transformer) => {
if let Some(transformer) = transformer
&& let Some(plan) = transformer(plan) {
&& let Some(plan) = transformer(plan)
{
self.stage.push(plan)
}
},
| Commutativity::NonCommutative
}
Commutativity::NonCommutative
| Commutativity::Unimplemented
| Commutativity::Unsupported => {
return true;

View File

@@ -114,7 +114,7 @@ impl Function for PyUDF {
_input_types: &[datatypes::prelude::ConcreteDataType],
) -> common_query::error::Result<datatypes::prelude::ConcreteDataType> {
// TODO(discord9): use correct return annotation if exist
match self.copr.return_types.get(0) {
match self.copr.return_types.first() {
Some(Some(AnnotationInfo {
datatype: Some(ty), ..
})) => Ok(ty.clone()),

View File

@@ -272,7 +272,7 @@ fn parse_keywords(keywords: &Vec<ast::Keyword<()>>) -> Result<DecoratorArgs> {
"Expect between {len_min} and {len_max} keyword argument, found {}.",
keywords.len()
),
loc: keywords.get(0).map(|s| s.location)
loc: keywords.first().map(|s| s.location)
}
);
let mut ret_args = DecoratorArgs::default();

View File

@@ -55,11 +55,7 @@ impl From<DataPointRequest> for DataPoint {
fn from(request: DataPointRequest) -> Self {
let ts_millis = DataPoint::timestamp_to_millis(request.timestamp);
let tags = request
.tags
.into_iter()
.map(|(k, v)| (k, v))
.collect::<Vec<(String, String)>>();
let tags = request.tags.into_iter().collect::<Vec<(String, String)>>();
DataPoint::new(request.metric, ts_millis, request.value, tags)
}

View File

@@ -211,7 +211,7 @@ mod tests {
.unwrap()
{
Statement::CreateTable(CreateTable { columns, .. }) => {
let ts_col = columns.get(0).unwrap();
let ts_col = columns.first().unwrap();
assert_eq!(
expected_type,
sql_data_type_to_concrete_data_type(&ts_col.data_type).unwrap()

View File

@@ -33,7 +33,9 @@ impl<'a> ParserContext<'a> {
pub(crate) fn parse_copy(&mut self) -> Result<Statement> {
let _ = self.parser.next_token();
let next = self.parser.peek_token();
let copy = if let Word(word) = next.token && word.keyword == Keyword::DATABASE {
let copy = if let Word(word) = next.token
&& word.keyword == Keyword::DATABASE
{
let _ = self.parser.next_token();
let copy_database = self.parser_copy_database()?;
crate::statements::copy::Copy::CopyDatabase(copy_database)

View File

@@ -783,7 +783,7 @@ fn ensure_value_lists_strictly_increased<'a>(
/// Ensure that value list's length matches the column list.
fn ensure_value_list_len_matches_columns(
partitions: &Partitions,
partition_columns: &Vec<&ColumnDef>,
partition_columns: &[&ColumnDef],
) -> Result<()> {
for entry in partitions.entries.iter() {
ensure!(

View File

@@ -107,7 +107,7 @@ impl Insert {
}
}
fn sql_exprs_to_values(exprs: &Vec<Vec<Expr>>) -> Result<Vec<Vec<Value>>> {
fn sql_exprs_to_values(exprs: &[Vec<Expr>]) -> Result<Vec<Vec<Value>>> {
let mut values = Vec::with_capacity(exprs.len());
for es in exprs.iter() {
let mut vs = Vec::with_capacity(es.len());

View File

@@ -433,7 +433,6 @@ mod tests {
use common_test_util::temp_dir::{create_temp_dir, TempDir};
use datafusion::parquet::arrow::ArrowWriter;
pub use datafusion::parquet::schema::types::BasicTypeInfo;
use datafusion_common::{Column, ScalarValue};
use datafusion_expr::{col, lit, BinaryExpr, Literal, Operator};
use datatypes::arrow::array::Int32Array;

View File

@@ -61,7 +61,7 @@ mod test {
ctx.clone(),
)
.await
.get(0)
.first()
.unwrap()
.is_ok());

View File

@@ -63,7 +63,7 @@ mod tests {
ctx.clone(),
)
.await
.get(0)
.first()
.unwrap()
.is_ok());

View File

@@ -240,7 +240,7 @@ CREATE TABLE my_table (
PARTITION r3 VALUES LESS THAN (MAXVALUE),
)";
let result = cluster.frontend.do_query(sql, QueryContext::arc()).await;
result.get(0).unwrap().as_ref().unwrap();
result.first().unwrap().as_ref().unwrap();
let table = cluster
.frontend