chore: replace result assertions (#1840)

* s/assert!\((.*)\.is_ok\(\)\);/\1.unwrap\(\);/g

* s/assert!\((.*)\.is_some\(\)\);/\1.unwrap\(\);/g
This commit is contained in:
Lei, HUANG
2023-06-27 19:14:48 +08:00
committed by GitHub
parent b737a240de
commit f287d3115b
92 changed files with 269 additions and 304 deletions

View File

@@ -392,6 +392,6 @@ mod tests {
#[test]
fn test_table_global_value_compatibility() {
let s = r#"{"node_id":1,"regions_id_map":{"1":[0]},"table_info":{"ident":{"table_id":1098,"version":1},"name":"container_cpu_limit","desc":"Created on insertion","catalog_name":"greptime","schema_name":"dd","meta":{"schema":{"column_schemas":[{"name":"container_id","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"container_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"docker_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"host","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_tag","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"interval","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"runtime","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"short_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"type","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"dd_value","data_type":{"Float64":{}},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"ts","data_type":{"Timestamp":{"Millisecond":null}},"is_nullable":false,"is_time_index":true,"default_constraint":null,"metadata":{"greptime:time_index":"true"}},{"name":"git.repository_url","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}}],"timestamp_index":11,"version":1},"primary_key_indices":[0,1,2,3,4,5,6,7,8,9,12],"value_indices":[10,11],"engine":"mito","next_column_id":12,"region_numbers":[],"engine_options":{},"options":{},"created_on":"1970-01-01T00:00:00Z"},"table_type":"Base"}}"#;
assert!(TableGlobalValue::parse(s).is_ok());
let _ = TableGlobalValue::parse(s).unwrap();
}
}

View File

@@ -344,7 +344,7 @@ mod tests {
table: Arc::new(NumbersTable::default()),
};
assert!(catalog_list.register_table(register_request).await.is_ok());
let _ = catalog_list.register_table(register_request).await.unwrap();
let table = catalog_list
.table(
DEFAULT_CATALOG_NAME,
@@ -353,7 +353,7 @@ mod tests {
)
.await
.unwrap();
assert!(table.is_some());
let _ = table.unwrap();
assert!(catalog_list
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "not_exists")
.await
@@ -393,7 +393,7 @@ mod tests {
new_table_name: new_table_name.to_string(),
table_id,
};
assert!(catalog.rename_table(rename_request).await.is_ok());
let _ = catalog.rename_table(rename_request).await.unwrap();
// test old table name not exist
assert!(!catalog
@@ -495,7 +495,7 @@ mod tests {
table_id: 2333,
table: Arc::new(NumbersTable::default()),
};
assert!(catalog.register_table(register_table_req).await.is_ok());
let _ = catalog.register_table(register_table_req).await.unwrap();
assert!(catalog
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
.await

View File

@@ -677,7 +677,7 @@ mod test {
let region = 1;
assert!(keeper.find_handle(&region).await.is_none());
keeper.register_region(region).await;
assert!(keeper.find_handle(&region).await.is_some());
let _ = keeper.find_handle(&region).await.unwrap();
let ten_seconds_later = || Instant::now() + Duration::from_secs(10);
@@ -720,7 +720,7 @@ mod test {
let tx = handle.tx.clone();
// assert countdown task is running
assert!(tx.send(CountdownCommand::Start(5000)).await.is_ok());
tx.send(CountdownCommand::Start(5000)).await.unwrap();
assert!(!finished.load(Ordering::Relaxed));
drop(handle);
@@ -772,7 +772,7 @@ mod test {
};
let table_engine = Arc::new(MockTableEngine::default());
assert!(table_engine.create_table(ctx, request).await.is_ok());
let _ = table_engine.create_table(ctx, request).await.unwrap();
let table_ident = TableIdent {
catalog: catalog.to_string(),

View File

@@ -136,14 +136,14 @@ mod tests {
table: Cow::Borrowed("table_name"),
};
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_ok());
let _ = result.unwrap();
let table_ref = TableReference::Partial {
schema: Cow::Borrowed("public"),
table: Cow::Borrowed("table_name"),
};
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_ok());
let _ = result.unwrap();
let table_ref = TableReference::Partial {
schema: Cow::Borrowed("wrong_schema"),
@@ -158,7 +158,7 @@ mod tests {
table: Cow::Borrowed("table_name"),
};
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_ok());
let _ = result.unwrap();
let table_ref = TableReference::Full {
catalog: Cow::Borrowed("wrong_catalog"),
@@ -172,14 +172,14 @@ mod tests {
schema: Cow::Borrowed("information_schema"),
table: Cow::Borrowed("columns"),
};
assert!(table_provider.resolve_table_ref(table_ref).is_ok());
let _ = table_provider.resolve_table_ref(table_ref).unwrap();
let table_ref = TableReference::Full {
catalog: Cow::Borrowed("greptime"),
schema: Cow::Borrowed("information_schema"),
table: Cow::Borrowed("columns"),
};
assert!(table_provider.resolve_table_ref(table_ref).is_ok());
let _ = table_provider.resolve_table_ref(table_ref).unwrap();
let table_ref = TableReference::Full {
catalog: Cow::Borrowed("dummy"),

View File

@@ -109,7 +109,7 @@ mod tests {
.unwrap();
let ret = backend.get(b"__c-greptime").await.unwrap();
assert!(ret.is_some());
let _ = ret.unwrap();
let _ = backend
.compare_and_set(
@@ -121,13 +121,11 @@ mod tests {
.unwrap();
let ret = backend.get(b"__c-greptime").await.unwrap();
assert!(ret.is_some());
assert_eq!(&b"123"[..], &(ret.as_ref().unwrap().1));
let _ = backend.set(b"__c-greptime", b"1234").await;
let ret = backend.get(b"__c-greptime").await.unwrap();
assert!(ret.is_some());
assert_eq!(&b"1234"[..], &(ret.as_ref().unwrap().1));
backend.delete(b"__c-greptime").await.unwrap();

View File

@@ -339,19 +339,15 @@ mod tests {
};
let plugins = load_frontend_plugins(&command.user_provider);
assert!(plugins.is_ok());
let plugins = plugins.unwrap();
let provider = plugins.get::<UserProviderRef>();
assert!(provider.is_some());
let provider = provider.unwrap();
let provider = plugins.get::<UserProviderRef>().unwrap();
let result = provider
.authenticate(
Identity::UserId("test", None),
Password::PlainText("test".to_string().into()),
)
.await;
assert!(result.is_ok());
let _ = result.unwrap();
}
#[test]

View File

@@ -356,18 +356,15 @@ mod tests {
};
let plugins = load_frontend_plugins(&command.user_provider);
assert!(plugins.is_ok());
let plugins = plugins.unwrap();
let provider = plugins.get::<UserProviderRef>();
assert!(provider.is_some());
let provider = provider.unwrap();
let provider = plugins.get::<UserProviderRef>().unwrap();
let result = provider
.authenticate(
Identity::UserId("test", None),
Password::PlainText("test".to_string().into()),
)
.await;
assert!(result.is_ok());
let _ = result.unwrap();
}
#[test]

View File

@@ -27,7 +27,7 @@ mod tests {
impl Repl {
fn send_line(&mut self, line: &str) {
assert!(self.repl.send_line(line).is_ok());
let _ = self.repl.send_line(line).unwrap();
// read a line to consume the prompt
let _ = self.read_line();
@@ -105,7 +105,7 @@ mod tests {
test_select(repl);
datanode.kill().unwrap();
assert!(datanode.wait().is_ok());
let _ = datanode.wait().unwrap();
}
fn test_create_database(repl: &mut Repl) {

View File

@@ -119,7 +119,7 @@ mod tests {
#[test]
fn [<test_read_write_ $num_ty _from_vec_buffer>]() {
let mut buf = vec![];
assert!(buf.[<write_ $num_ty _le>]($num_ty::MAX).is_ok());
let _ = buf.[<write_ $num_ty _le>]($num_ty::MAX).unwrap();
assert_eq!($num_ty::MAX, buf.as_slice().[<read_ $num_ty _le>]().unwrap());
}
}
@@ -132,7 +132,7 @@ mod tests {
#[test]
pub fn test_peek_write_from_vec_buffer() {
let mut buf: Vec<u8> = vec![];
assert!(buf.write_from_slice("hello".as_bytes()).is_ok());
buf.write_from_slice("hello".as_bytes()).unwrap();
let mut slice = buf.as_slice();
assert_eq!(104, slice.peek_u8_le().unwrap());
slice.advance_by(1);

View File

@@ -158,19 +158,19 @@ mod test {
fn test_update_batch() {
// test update empty batch, expect not updating anything
let mut argmax = Argmax::<i32>::default();
assert!(argmax.update_batch(&[]).is_ok());
argmax.update_batch(&[]).unwrap();
assert_eq!(Value::Null, argmax.evaluate().unwrap());
// test update one not-null value
let mut argmax = Argmax::<i32>::default();
let v: Vec<VectorRef> = vec![Arc::new(Int32Vector::from(vec![Some(42)]))];
assert!(argmax.update_batch(&v).is_ok());
argmax.update_batch(&v).unwrap();
assert_eq!(Value::from(0_u64), argmax.evaluate().unwrap());
// test update one null value
let mut argmax = Argmax::<i32>::default();
let v: Vec<VectorRef> = vec![Arc::new(Int32Vector::from(vec![Option::<i32>::None]))];
assert!(argmax.update_batch(&v).is_ok());
argmax.update_batch(&v).unwrap();
assert_eq!(Value::Null, argmax.evaluate().unwrap());
// test update no null-value batch
@@ -180,7 +180,7 @@ mod test {
Some(1),
Some(3),
]))];
assert!(argmax.update_batch(&v).is_ok());
argmax.update_batch(&v).unwrap();
assert_eq!(Value::from(2_u64), argmax.evaluate().unwrap());
// test update null-value batch
@@ -190,7 +190,7 @@ mod test {
None,
Some(4),
]))];
assert!(argmax.update_batch(&v).is_ok());
argmax.update_batch(&v).unwrap();
assert_eq!(Value::from(2_u64), argmax.evaluate().unwrap());
// test update with constant vector
@@ -199,7 +199,7 @@ mod test {
Arc::new(Int32Vector::from_vec(vec![4])),
10,
))];
assert!(argmax.update_batch(&v).is_ok());
argmax.update_batch(&v).unwrap();
assert_eq!(Value::from(0_u64), argmax.evaluate().unwrap());
}
}

View File

@@ -166,19 +166,19 @@ mod test {
fn test_update_batch() {
// test update empty batch, expect not updating anything
let mut argmin = Argmin::<i32>::default();
assert!(argmin.update_batch(&[]).is_ok());
argmin.update_batch(&[]).unwrap();
assert_eq!(Value::Null, argmin.evaluate().unwrap());
// test update one not-null value
let mut argmin = Argmin::<i32>::default();
let v: Vec<VectorRef> = vec![Arc::new(Int32Vector::from(vec![Some(42)]))];
assert!(argmin.update_batch(&v).is_ok());
argmin.update_batch(&v).unwrap();
assert_eq!(Value::from(0_u32), argmin.evaluate().unwrap());
// test update one null value
let mut argmin = Argmin::<i32>::default();
let v: Vec<VectorRef> = vec![Arc::new(Int32Vector::from(vec![Option::<i32>::None]))];
assert!(argmin.update_batch(&v).is_ok());
argmin.update_batch(&v).unwrap();
assert_eq!(Value::Null, argmin.evaluate().unwrap());
// test update no null-value batch
@@ -188,7 +188,7 @@ mod test {
Some(1),
Some(3),
]))];
assert!(argmin.update_batch(&v).is_ok());
argmin.update_batch(&v).unwrap();
assert_eq!(Value::from(0_u32), argmin.evaluate().unwrap());
// test update null-value batch
@@ -198,7 +198,7 @@ mod test {
None,
Some(4),
]))];
assert!(argmin.update_batch(&v).is_ok());
argmin.update_batch(&v).unwrap();
assert_eq!(Value::from(0_u32), argmin.evaluate().unwrap());
// test update with constant vector
@@ -207,7 +207,7 @@ mod test {
Arc::new(Int32Vector::from_vec(vec![4])),
10,
))];
assert!(argmin.update_batch(&v).is_ok());
argmin.update_batch(&v).unwrap();
assert_eq!(Value::from(0_u32), argmin.evaluate().unwrap());
}
}

View File

@@ -192,20 +192,20 @@ mod test {
fn test_update_batch() {
// test update empty batch, expect not updating anything
let mut diff = Diff::<i32, i64>::default();
assert!(diff.update_batch(&[]).is_ok());
diff.update_batch(&[]).unwrap();
assert!(diff.values.is_empty());
assert_eq!(Value::Null, diff.evaluate().unwrap());
// test update one not-null value
let mut diff = Diff::<i32, i64>::default();
let v: Vec<VectorRef> = vec![Arc::new(Int32Vector::from(vec![Some(42)]))];
assert!(diff.update_batch(&v).is_ok());
diff.update_batch(&v).unwrap();
assert_eq!(Value::Null, diff.evaluate().unwrap());
// test update one null value
let mut diff = Diff::<i32, i64>::default();
let v: Vec<VectorRef> = vec![Arc::new(Int32Vector::from(vec![Option::<i32>::None]))];
assert!(diff.update_batch(&v).is_ok());
diff.update_batch(&v).unwrap();
assert_eq!(Value::Null, diff.evaluate().unwrap());
// test update no null-value batch
@@ -216,7 +216,7 @@ mod test {
Some(2),
]))];
let values = vec![Value::from(2_i64), Value::from(1_i64)];
assert!(diff.update_batch(&v).is_ok());
diff.update_batch(&v).unwrap();
assert_eq!(
Value::List(ListValue::new(
Some(Box::new(values)),
@@ -234,7 +234,7 @@ mod test {
Some(4),
]))];
let values = vec![Value::from(5_i64), Value::from(1_i64)];
assert!(diff.update_batch(&v).is_ok());
diff.update_batch(&v).unwrap();
assert_eq!(
Value::List(ListValue::new(
Some(Box::new(values)),
@@ -250,7 +250,7 @@ mod test {
4,
))];
let values = vec![Value::from(0_i64), Value::from(0_i64), Value::from(0_i64)];
assert!(diff.update_batch(&v).is_ok());
diff.update_batch(&v).unwrap();
assert_eq!(
Value::List(ListValue::new(
Some(Box::new(values)),

View File

@@ -188,19 +188,19 @@ mod test {
fn test_update_batch() {
// test update empty batch, expect not updating anything
let mut mean = Mean::<i32>::default();
assert!(mean.update_batch(&[]).is_ok());
mean.update_batch(&[]).unwrap();
assert_eq!(Value::Null, mean.evaluate().unwrap());
// test update one not-null value
let mut mean = Mean::<i32>::default();
let v: Vec<VectorRef> = vec![Arc::new(Int32Vector::from(vec![Some(42)]))];
assert!(mean.update_batch(&v).is_ok());
mean.update_batch(&v).unwrap();
assert_eq!(Value::from(42.0_f64), mean.evaluate().unwrap());
// test update one null value
let mut mean = Mean::<i32>::default();
let v: Vec<VectorRef> = vec![Arc::new(Int32Vector::from(vec![Option::<i32>::None]))];
assert!(mean.update_batch(&v).is_ok());
mean.update_batch(&v).unwrap();
assert_eq!(Value::Null, mean.evaluate().unwrap());
// test update no null-value batch
@@ -210,7 +210,7 @@ mod test {
Some(1),
Some(2),
]))];
assert!(mean.update_batch(&v).is_ok());
mean.update_batch(&v).unwrap();
assert_eq!(Value::from(0.6666666666666666), mean.evaluate().unwrap());
// test update null-value batch
@@ -221,7 +221,7 @@ mod test {
Some(3),
Some(4),
]))];
assert!(mean.update_batch(&v).is_ok());
mean.update_batch(&v).unwrap();
assert_eq!(Value::from(1.6666666666666667), mean.evaluate().unwrap());
// test update with constant vector
@@ -230,7 +230,7 @@ mod test {
Arc::new(Int32Vector::from_vec(vec![4])),
10,
))];
assert!(mean.update_batch(&v).is_ok());
mean.update_batch(&v).unwrap();
assert_eq!(Value::from(4.0), mean.evaluate().unwrap());
}
}

View File

@@ -299,7 +299,7 @@ mod test {
fn test_update_batch() {
// test update empty batch, expect not updating anything
let mut percentile = Percentile::<i32>::default();
assert!(percentile.update_batch(&[]).is_ok());
percentile.update_batch(&[]).unwrap();
assert!(percentile.not_greater.is_empty());
assert!(percentile.greater.is_empty());
assert_eq!(Value::Null, percentile.evaluate().unwrap());
@@ -310,7 +310,7 @@ mod test {
Arc::new(Int32Vector::from(vec![Some(42)])),
Arc::new(Float64Vector::from(vec![Some(100.0_f64)])),
];
assert!(percentile.update_batch(&v).is_ok());
percentile.update_batch(&v).unwrap();
assert_eq!(Value::from(42.0_f64), percentile.evaluate().unwrap());
// test update one null value
@@ -319,7 +319,7 @@ mod test {
Arc::new(Int32Vector::from(vec![Option::<i32>::None])),
Arc::new(Float64Vector::from(vec![Some(100.0_f64)])),
];
assert!(percentile.update_batch(&v).is_ok());
percentile.update_batch(&v).unwrap();
assert_eq!(Value::Null, percentile.evaluate().unwrap());
// test update no null-value batch
@@ -332,7 +332,7 @@ mod test {
Some(100.0_f64),
])),
];
assert!(percentile.update_batch(&v).is_ok());
percentile.update_batch(&v).unwrap();
assert_eq!(Value::from(2_f64), percentile.evaluate().unwrap());
// test update null-value batch
@@ -346,7 +346,7 @@ mod test {
Some(100.0_f64),
])),
];
assert!(percentile.update_batch(&v).is_ok());
percentile.update_batch(&v).unwrap();
assert_eq!(Value::from(4_f64), percentile.evaluate().unwrap());
// test update with constant vector
@@ -358,7 +358,7 @@ mod test {
)),
Arc::new(Float64Vector::from(vec![Some(100.0_f64), Some(100.0_f64)])),
];
assert!(percentile.update_batch(&v).is_ok());
percentile.update_batch(&v).unwrap();
assert_eq!(Value::from(4_f64), percentile.evaluate().unwrap());
// test left border
@@ -371,7 +371,7 @@ mod test {
Some(0.0_f64),
])),
];
assert!(percentile.update_batch(&v).is_ok());
percentile.update_batch(&v).unwrap();
assert_eq!(Value::from(-1.0_f64), percentile.evaluate().unwrap());
// test medium
@@ -384,7 +384,7 @@ mod test {
Some(50.0_f64),
])),
];
assert!(percentile.update_batch(&v).is_ok());
percentile.update_batch(&v).unwrap();
assert_eq!(Value::from(1.0_f64), percentile.evaluate().unwrap());
// test right border
@@ -397,7 +397,7 @@ mod test {
Some(100.0_f64),
])),
];
assert!(percentile.update_batch(&v).is_ok());
percentile.update_batch(&v).unwrap();
assert_eq!(Value::from(2.0_f64), percentile.evaluate().unwrap());
// the following is the result of numpy.percentile
@@ -414,7 +414,7 @@ mod test {
Some(40.0_f64),
])),
];
assert!(percentile.update_batch(&v).is_ok());
percentile.update_batch(&v).unwrap();
assert_eq!(Value::from(6.400000000_f64), percentile.evaluate().unwrap());
// the following is the result of numpy.percentile
@@ -430,7 +430,7 @@ mod test {
Some(95.0_f64),
])),
];
assert!(percentile.update_batch(&v).is_ok());
percentile.update_batch(&v).unwrap();
assert_eq!(
Value::from(9.700_000_000_000_001_f64),
percentile.evaluate().unwrap()

View File

@@ -267,7 +267,7 @@ mod test {
fn test_update_batch() {
// test update empty batch, expect not updating anything
let mut polyval = Polyval::<i32, i64>::default();
assert!(polyval.update_batch(&[]).is_ok());
polyval.update_batch(&[]).unwrap();
assert!(polyval.values.is_empty());
assert_eq!(Value::Null, polyval.evaluate().unwrap());
@@ -277,7 +277,7 @@ mod test {
Arc::new(Int32Vector::from(vec![Some(3)])),
Arc::new(Int64Vector::from(vec![Some(2_i64)])),
];
assert!(polyval.update_batch(&v).is_ok());
polyval.update_batch(&v).unwrap();
assert_eq!(Value::Int64(3), polyval.evaluate().unwrap());
// test update one null value
@@ -286,7 +286,7 @@ mod test {
Arc::new(Int32Vector::from(vec![Option::<i32>::None])),
Arc::new(Int64Vector::from(vec![Some(2_i64)])),
];
assert!(polyval.update_batch(&v).is_ok());
polyval.update_batch(&v).unwrap();
assert_eq!(Value::Null, polyval.evaluate().unwrap());
// test update no null-value batch
@@ -299,7 +299,7 @@ mod test {
Some(2_i64),
])),
];
assert!(polyval.update_batch(&v).is_ok());
polyval.update_batch(&v).unwrap();
assert_eq!(Value::Int64(13), polyval.evaluate().unwrap());
// test update null-value batch
@@ -313,7 +313,7 @@ mod test {
Some(2_i64),
])),
];
assert!(polyval.update_batch(&v).is_ok());
polyval.update_batch(&v).unwrap();
assert_eq!(Value::Int64(13), polyval.evaluate().unwrap());
// test update with constant vector
@@ -325,7 +325,7 @@ mod test {
)),
Arc::new(Int64Vector::from(vec![Some(5_i64), Some(5_i64)])),
];
assert!(polyval.update_batch(&v).is_ok());
polyval.update_batch(&v).unwrap();
assert_eq!(Value::Int64(24), polyval.evaluate().unwrap());
}
}

View File

@@ -231,7 +231,7 @@ mod test {
fn test_update_batch() {
// test update empty batch, expect not updating anything
let mut scipy_stats_norm_cdf = ScipyStatsNormCdf::<i32>::default();
assert!(scipy_stats_norm_cdf.update_batch(&[]).is_ok());
scipy_stats_norm_cdf.update_batch(&[]).unwrap();
assert!(scipy_stats_norm_cdf.values.is_empty());
assert_eq!(Value::Null, scipy_stats_norm_cdf.evaluate().unwrap());
@@ -245,7 +245,7 @@ mod test {
Some(2.0_f64),
])),
];
assert!(scipy_stats_norm_cdf.update_batch(&v).is_ok());
scipy_stats_norm_cdf.update_batch(&v).unwrap();
assert_eq!(
Value::from(0.8086334555398362),
scipy_stats_norm_cdf.evaluate().unwrap()
@@ -262,7 +262,7 @@ mod test {
Some(2.0_f64),
])),
];
assert!(scipy_stats_norm_cdf.update_batch(&v).is_ok());
scipy_stats_norm_cdf.update_batch(&v).unwrap();
assert_eq!(
Value::from(0.5412943699039795),
scipy_stats_norm_cdf.evaluate().unwrap()

View File

@@ -232,7 +232,7 @@ mod test {
fn test_update_batch() {
// test update empty batch, expect not updating anything
let mut scipy_stats_norm_pdf = ScipyStatsNormPdf::<i32>::default();
assert!(scipy_stats_norm_pdf.update_batch(&[]).is_ok());
scipy_stats_norm_pdf.update_batch(&[]).unwrap();
assert!(scipy_stats_norm_pdf.values.is_empty());
assert_eq!(Value::Null, scipy_stats_norm_pdf.evaluate().unwrap());
@@ -246,7 +246,7 @@ mod test {
Some(2.0_f64),
])),
];
assert!(scipy_stats_norm_pdf.update_batch(&v).is_ok());
scipy_stats_norm_pdf.update_batch(&v).unwrap();
assert_eq!(
Value::from(0.17843340219081558),
scipy_stats_norm_pdf.evaluate().unwrap()
@@ -263,7 +263,7 @@ mod test {
Some(2.0_f64),
])),
];
assert!(scipy_stats_norm_pdf.update_batch(&v).is_ok());
scipy_stats_norm_pdf.update_batch(&v).unwrap();
assert_eq!(
Value::from(0.12343972049858312),
scipy_stats_norm_pdf.evaluate().unwrap()

View File

@@ -94,7 +94,7 @@ mod tests {
assert!(registry.get_function("test_and").is_none());
assert!(registry.functions().is_empty());
registry.register(func);
assert!(registry.get_function("test_and").is_some());
let _ = registry.get_function("test_and").unwrap();
assert_eq!(1, registry.functions().len());
}
}

View File

@@ -27,7 +27,7 @@ async fn do_bench_channel_manager() {
for _ in 0..10000 {
let idx = rand::random::<usize>() % 100;
let ret = m_clone.get(format!("{idx}"));
assert!(ret.is_ok());
let _ = ret.unwrap();
}
});
joins.push(join);

View File

@@ -577,7 +577,7 @@ mod tests {
let res = mgr.build_endpoint("test_addr");
assert!(res.is_ok());
let _ = res.unwrap();
}
#[tokio::test]
@@ -586,7 +586,7 @@ mod tests {
let addr = "test_addr";
let res = mgr.get(addr);
assert!(res.is_ok());
let _ = res.unwrap();
mgr.retain_channel(|addr, channel| {
assert_eq!("test_addr", addr);
@@ -604,7 +604,7 @@ mod tests {
}),
);
assert!(res.is_ok());
let _ = res.unwrap();
mgr.retain_channel(|addr, channel| {
assert_eq!("test_addr", addr);

View File

@@ -265,7 +265,7 @@ mod test {
let FlightMessage::Schema(decoded_schema) = message else { unreachable!() };
assert_eq!(decoded_schema, schema);
assert!(decoder.schema.is_some());
let _ = decoder.schema.as_ref().unwrap();
let message = decoder.try_decode(d2.clone()).unwrap();
assert!(matches!(message, FlightMessage::Recordbatch(_)));

View File

@@ -38,9 +38,8 @@ async fn test_mtls_config() {
client_key_path: "tests/tls/corrupted".to_string(),
});
let re = ChannelManager::with_tls_config(config);
assert!(re.is_ok());
let re = re.unwrap().get("127.0.0.1:0");
let re = ChannelManager::with_tls_config(config).unwrap();
let re = re.get("127.0.0.1:0");
assert!(re.is_err());
// success
@@ -50,8 +49,7 @@ async fn test_mtls_config() {
client_key_path: "tests/tls/client.key.pem".to_string(),
});
let re = ChannelManager::with_tls_config(config);
assert!(re.is_ok());
let re = re.unwrap().get("127.0.0.1:0");
assert!(re.is_ok());
let re = ChannelManager::with_tls_config(config).unwrap();
let re = re.get("127.0.0.1:0");
let _ = re.unwrap();
}

View File

@@ -742,7 +742,7 @@ mod tests {
manager.recover().await.unwrap();
// The manager should submit the root procedure.
assert!(manager.procedure_state(root_id).await.unwrap().is_some());
let _ = manager.procedure_state(root_id).await.unwrap().unwrap();
// Since the mocked root procedure actually doesn't submit subprocedures, so there is no
// related state.
assert!(manager.procedure_state(child_id).await.unwrap().is_none());

View File

@@ -388,6 +388,6 @@ mod tests {
StatusCode::Unexpected,
))));
assert!(state.is_failed());
assert!(state.error().is_some());
let _ = state.error().unwrap();
}
}

View File

@@ -373,7 +373,7 @@ mod test {
Arc::new(Schema::try_from(df_schema.clone()).unwrap()),
Arc::new(EmptyExec::new(true, df_schema.clone())),
);
assert!(plan.df_plan.as_any().downcast_ref::<EmptyExec>().is_some());
let _ = plan.df_plan.as_any().downcast_ref::<EmptyExec>().unwrap();
let df_plan = DfPhysicalPlanAdapter(Arc::new(plan));
assert_eq!(df_schema, df_plan.schema());

View File

@@ -212,7 +212,6 @@ mod tests {
.worker_threads(2)
.thread_name("test_spawn_join")
.build();
assert!(runtime.is_ok());
Arc::new(runtime.unwrap())
}

View File

@@ -136,6 +136,6 @@ pub(crate) async fn create_test_table(
table_id: table.table_info().ident.table_id,
table: table.clone(),
};
assert!(instance.catalog_manager.register_table(req).await.is_ok());
let _ = instance.catalog_manager.register_table(req).await.unwrap();
Ok(table)
}

View File

@@ -280,7 +280,7 @@ mod tests {
let field = Field::try_from(&column_schema).unwrap();
assert_eq!("v1", field.metadata().get("k1").unwrap());
assert!(field.metadata().get(DEFAULT_CONSTRAINT_KEY).is_some());
let _ = field.metadata().get(DEFAULT_CONSTRAINT_KEY).unwrap();
let new_column_schema = ColumnSchema::try_from(&field).unwrap();
assert_eq!(column_schema, new_column_schema);

View File

@@ -318,9 +318,7 @@ mod tests {
Arc::new(Int32Array::from(vec![2])),
Arc::new(Int32Array::from(vec![3])),
];
let vectors = Helper::try_into_vectors(&arrays);
assert!(vectors.is_ok());
let vectors = vectors.unwrap();
let vectors = Helper::try_into_vectors(&arrays).unwrap();
vectors.iter().for_each(|v| assert_eq!(1, v.len()));
assert_eq!(Value::Int32(1), vectors[0].get(0));
assert_eq!(Value::Int32(2), vectors[1].get(0));

View File

@@ -711,7 +711,7 @@ mod tests {
..Default::default()
};
// If nullable is true, it doesn't matter whether the insert request has the column.
assert!(validate_insert_request(&schema, &request).is_ok());
validate_insert_request(&schema, &request).unwrap();
let schema = Schema::new(vec![
ColumnSchema::new("a", ConcreteDataType::int32_datatype(), false)
@@ -735,7 +735,7 @@ mod tests {
};
// If nullable is false, but the column is defined with default value,
// it also doesn't matter whether the insert request has the column.
assert!(validate_insert_request(&schema, &request).is_ok());
validate_insert_request(&schema, &request).unwrap();
let request = InsertRequest {
columns: vec![Column {
@@ -772,7 +772,7 @@ mod tests {
assert_eq!(stmts.len(), 4);
for stmt in stmts {
let re = check_permission(plugins.clone(), &stmt, &query_ctx);
assert!(re.is_ok());
re.unwrap();
}
let sql = r#"
@@ -783,13 +783,13 @@ mod tests {
assert_eq!(stmts.len(), 2);
for stmt in stmts {
let re = check_permission(plugins.clone(), &stmt, &query_ctx);
assert!(re.is_ok());
re.unwrap();
}
let sql = "USE randomschema";
let stmts = parse_stmt(sql, &GreptimeDbDialect {}).unwrap();
let re = check_permission(plugins.clone(), &stmts[0], &query_ctx);
assert!(re.is_ok());
re.unwrap();
fn replace_test(template_sql: &str, plugins: Arc<Plugins>, query_ctx: &QueryContextRef) {
// test right
@@ -823,7 +823,7 @@ mod tests {
let stmt = &parse_stmt(sql, &GreptimeDbDialect {}).unwrap()[0];
let re = check_permission(plugins, stmt, query_ctx);
if is_ok {
assert!(re.is_ok());
re.unwrap();
} else {
assert!(re.is_err());
}
@@ -849,8 +849,7 @@ mod tests {
// test show tables
let sql = "SHOW TABLES FROM public";
let stmt = parse_stmt(sql, &GreptimeDbDialect {}).unwrap();
let re = check_permission(plugins.clone(), &stmt[0], &query_ctx);
assert!(re.is_ok());
check_permission(plugins.clone(), &stmt[0], &query_ctx).unwrap();
let sql = "SHOW TABLES FROM wrongschema";
let stmt = parse_stmt(sql, &GreptimeDbDialect {}).unwrap();

View File

@@ -130,7 +130,7 @@ mod tests {
async fn test_noop_logstore() {
let store = NoopLogStore::default();
let e = store.entry("".as_bytes(), 1, NamespaceImpl::default());
assert!(store.append(e.clone()).await.is_ok());
let _ = store.append(e.clone()).await.unwrap();
assert!(store
.append_batch(&NamespaceImpl::default(), vec![e])
.await

View File

@@ -537,7 +537,7 @@ mod tests {
let namespace = Namespace::with_id(42);
for id in 0..4096 {
let entry = Entry::create(id, namespace.id(), [b'x'; 4096].to_vec());
assert!(logstore.append(entry).await.is_ok());
let _ = logstore.append(entry).await.unwrap();
}
let before_purge = wal_dir_usage(dir.path().to_str().unwrap()).await;
@@ -569,7 +569,7 @@ mod tests {
let namespace = Namespace::with_id(42);
for id in 0..1024 {
let entry = Entry::create(id, namespace.id(), [b'x'; 4096].to_vec());
assert!(logstore.append(entry).await.is_ok());
let _ = logstore.append(entry).await.unwrap();
}
logstore.obsolete(namespace.clone(), 100).await.unwrap();

View File

@@ -421,7 +421,7 @@ mod tests {
.with_value(format!("{}-{}", "value", i).into_bytes())
.with_prev_kv();
let res = self.client.put(req).await;
assert!(res.is_ok());
let _ = res.unwrap();
}
}
@@ -429,7 +429,7 @@ mod tests {
let req =
DeleteRangeRequest::new().with_prefix(format!("{}-{}", TEST_KEY_PREFIX, self.ns));
let res = self.client.delete_range(req).await;
assert!(res.is_ok());
let _ = res.unwrap();
}
}
@@ -446,7 +446,7 @@ mod tests {
let mut meta_client = MetaClientBuilder::new(0, 0, Role::Datanode)
.enable_heartbeat()
.build();
assert!(meta_client.heartbeat_client().is_ok());
let _ = meta_client.heartbeat_client().unwrap();
assert!(meta_client.router_client().is_err());
assert!(meta_client.store_client().is_err());
meta_client.start(urls).await.unwrap();
@@ -456,7 +456,7 @@ mod tests {
.enable_router()
.build();
assert!(meta_client.heartbeat_client().is_err());
assert!(meta_client.router_client().is_ok());
let _ = meta_client.router_client().unwrap();
assert!(meta_client.store_client().is_err());
meta_client.start(urls).await.unwrap();
assert!(meta_client.router_client().unwrap().is_started().await);
@@ -466,7 +466,7 @@ mod tests {
.build();
assert!(meta_client.heartbeat_client().is_err());
assert!(meta_client.router_client().is_err());
assert!(meta_client.store_client().is_ok());
let _ = meta_client.store_client().unwrap();
meta_client.start(urls).await.unwrap();
assert!(meta_client.store_client().unwrap().is_started().await);
@@ -477,9 +477,9 @@ mod tests {
.build();
assert_eq!(1, meta_client.id().0);
assert_eq!(2, meta_client.id().1);
assert!(meta_client.heartbeat_client().is_ok());
assert!(meta_client.router_client().is_ok());
assert!(meta_client.store_client().is_ok());
let _ = meta_client.heartbeat_client().unwrap();
let _ = meta_client.router_client().unwrap();
let _ = meta_client.store_client().unwrap();
meta_client.start(urls).await.unwrap();
assert!(meta_client.heartbeat_client().unwrap().is_started().await);
assert!(meta_client.router_client().unwrap().is_started().await);
@@ -568,8 +568,7 @@ mod tests {
#[tokio::test]
async fn test_ask_leader() {
let tc = new_client("test_ask_leader").await;
let res = tc.client.ask_leader().await;
assert!(res.is_ok());
tc.client.ask_leader().await.unwrap();
}
#[tokio::test]
@@ -650,7 +649,7 @@ mod tests {
let req = DeleteRequest::new(table_name.clone());
let res = client.delete_route(req).await;
assert!(res.is_ok());
let _ = res.unwrap();
}
#[tokio::test]

View File

@@ -267,7 +267,7 @@ mod tests {
let kv_map = to_stat_kv_map(vec![kv]).unwrap();
assert_eq!(1, kv_map.len());
assert!(kv_map.get(&stat_key).is_some());
let _ = kv_map.get(&stat_key).unwrap();
let stat_val = kv_map.get(&stat_key).unwrap();
let stat = stat_val.stats.get(0).unwrap();
@@ -284,8 +284,7 @@ mod tests {
error: None,
..Default::default()
});
let result = check_resp_header(&header, mock_ctx());
assert!(result.is_ok());
check_resp_header(&header, mock_ctx()).unwrap();
let result = check_resp_header(&None, mock_ctx());
assert!(result.is_err());

View File

@@ -385,7 +385,7 @@ mod tests {
fd.heartbeat(0);
fd.heartbeat(1000);
fd.heartbeat(1100);
assert!(fd.last_heartbeat_millis.is_some());
let _ = fd.last_heartbeat_millis.unwrap();
assert!(fd.is_available(1200));
}

View File

@@ -272,7 +272,7 @@ mod tests {
{
let mut iter = container.iter();
assert!(iter.next().is_some());
let _ = iter.next().unwrap();
assert!(iter.next().is_none());
}

View File

@@ -164,7 +164,6 @@ mod tests {
node_id: 101,
};
let res = ctx.in_memory.get(key.try_into().unwrap()).await.unwrap();
assert!(res.is_some());
let kv = res.unwrap();
let key: StatKey = kv.key.clone().try_into().unwrap();
assert_eq!(3, key.cluster_id);
@@ -176,7 +175,6 @@ mod tests {
handle_request_many_times(ctx.clone(), &handler, 10).await;
let res = ctx.in_memory.get(key.try_into().unwrap()).await.unwrap();
assert!(res.is_some());
let kv = res.unwrap();
let val: StatValue = kv.value.try_into().unwrap();
// refresh every 10 stats

View File

@@ -133,17 +133,19 @@ mod tests {
let kv_store = Arc::new(MemStore::default());
let service = DefaultMetadataService::new(kv_store.clone());
let result = service.create_schema("catalog", "public", false).await;
assert!(result.is_ok());
service
.create_schema("catalog", "public", false)
.await
.unwrap();
verify_result(kv_store.clone()).await;
let result = service.create_schema("catalog", "public", false).await;
assert!(result.is_err());
let result = service.create_schema("catalog", "public", true).await;
assert!(result.is_ok());
service
.create_schema("catalog", "public", true)
.await
.unwrap();
verify_result(kv_store.clone()).await;
}
@@ -156,7 +158,6 @@ mod tests {
let result = kv_store.get(key.clone()).await.unwrap();
assert!(result.is_some());
let kv = result.unwrap();
assert_eq!(key, kv.key);
@@ -170,7 +171,6 @@ mod tests {
let result = kv_store.get(key.clone()).await.unwrap();
assert!(result.is_some());
let kv = result.unwrap();
assert_eq!(key, kv.key);

View File

@@ -112,7 +112,7 @@ pub async fn mock(
}
}),
);
assert!(res.is_ok());
let _ = res.unwrap();
MockInfo {
server_addr,

View File

@@ -124,7 +124,7 @@ mod tests {
let req = RangeRequest::default();
let res = meta_srv.range(req.into_request()).await;
assert!(res.is_ok());
let _ = res.unwrap();
}
#[tokio::test]
@@ -134,7 +134,7 @@ mod tests {
let req = PutRequest::default();
let res = meta_srv.put(req.into_request()).await;
assert!(res.is_ok());
let _ = res.unwrap();
}
#[tokio::test]
@@ -144,7 +144,7 @@ mod tests {
let req = BatchGetRequest::default();
let res = meta_srv.batch_get(req.into_request()).await;
assert!(res.is_ok());
let _ = res.unwrap();
}
#[tokio::test]
@@ -154,7 +154,7 @@ mod tests {
let req = BatchPutRequest::default();
let res = meta_srv.batch_put(req.into_request()).await;
assert!(res.is_ok());
let _ = res.unwrap();
}
#[tokio::test]
@@ -164,7 +164,7 @@ mod tests {
let req = BatchDeleteRequest::default();
let res = meta_srv.batch_delete(req.into_request()).await;
assert!(res.is_ok());
let _ = res.unwrap();
}
#[tokio::test]
@@ -174,7 +174,7 @@ mod tests {
let req = CompareAndPutRequest::default();
let res = meta_srv.compare_and_put(req.into_request()).await;
assert!(res.is_ok());
let _ = res.unwrap();
}
#[tokio::test]
@@ -184,7 +184,7 @@ mod tests {
let req = DeleteRangeRequest::default();
let res = meta_srv.delete_range(req.into_request()).await;
assert!(res.is_ok());
let _ = res.unwrap();
}
#[tokio::test]
@@ -194,6 +194,6 @@ mod tests {
let req = MoveValueRequest::default();
let res = meta_srv.move_value(req.into_request()).await;
assert!(res.is_ok());
let _ = res.unwrap();
}
}

View File

@@ -745,7 +745,7 @@ mod tests {
let get: Get = req.try_into().unwrap();
assert_eq!(b"test_key".to_vec(), get.key);
assert!(get.options.is_some());
let _ = get.options.unwrap();
}
#[test]
@@ -761,7 +761,7 @@ mod tests {
assert_eq!(b"test_key".to_vec(), put.key);
assert_eq!(b"test_value".to_vec(), put.value);
assert!(put.options.is_some());
let _ = put.options.unwrap();
}
#[test]
@@ -794,7 +794,7 @@ mod tests {
assert_eq!(b"test_key".to_vec(), batch_put.kvs.get(0).unwrap().key);
assert_eq!(b"test_value".to_vec(), batch_put.kvs.get(0).unwrap().value);
assert!(batch_put.options.is_some());
let _ = batch_put.options.unwrap();
}
#[test]
@@ -811,7 +811,7 @@ mod tests {
assert_eq!(b"k1".to_vec(), batch_delete.keys.get(0).unwrap().clone());
assert_eq!(b"k2".to_vec(), batch_delete.keys.get(1).unwrap().clone());
assert_eq!(b"k3".to_vec(), batch_delete.keys.get(2).unwrap().clone());
assert!(batch_delete.options.is_some());
let _ = batch_delete.options.unwrap();
}
#[test]
@@ -828,7 +828,7 @@ mod tests {
assert_eq!(b"test_key".to_vec(), compare_and_put.key);
assert_eq!(b"test_expect".to_vec(), compare_and_put.expect);
assert_eq!(b"test_value".to_vec(), compare_and_put.value);
assert!(compare_and_put.put_options.is_some());
let _ = compare_and_put.put_options.unwrap();
}
#[test]
@@ -843,7 +843,7 @@ mod tests {
let delete: Delete = req.try_into().unwrap();
assert_eq!(b"test_key".to_vec(), delete.key);
assert!(delete.options.is_some());
let _ = delete.options.unwrap();
}
#[test]
@@ -858,6 +858,6 @@ mod tests {
assert_eq!(b"test_from_key".to_vec(), move_value.from_key);
assert_eq!(b"test_to_key".to_vec(), move_value.to_key);
assert!(move_value.delete_options.is_some());
let _ = move_value.delete_options.unwrap();
}
}

View File

@@ -152,7 +152,6 @@ mod tests {
.delete("test_key1".as_bytes().to_vec(), true)
.await
.unwrap();
assert!(prev_kv.is_some());
assert_eq!("test_key1".as_bytes(), prev_kv.unwrap().key);
}

View File

@@ -301,7 +301,7 @@ mod tests {
async fn test_txn_compare_equal() {
let kv_store = create_kv_store().await;
let key = vec![101u8];
assert!(kv_store.delete(key.clone(), false).await.is_ok());
let _ = kv_store.delete(key.clone(), false).await.unwrap();
let txn = Txn::new()
.when(vec![Compare::with_not_exist_value(
@@ -332,7 +332,7 @@ mod tests {
async fn test_txn_compare_greater() {
let kv_store = create_kv_store().await;
let key = vec![102u8];
assert!(kv_store.delete(key.clone(), false).await.is_ok());
let _ = kv_store.delete(key.clone(), false).await.unwrap();
let txn = Txn::new()
.when(vec![Compare::with_not_exist_value(
@@ -375,7 +375,7 @@ mod tests {
async fn test_txn_compare_less() {
let kv_store = create_kv_store().await;
let key = vec![103u8];
assert!(kv_store.delete(vec![3], false).await.is_ok());
let _ = kv_store.delete(vec![3], false).await.unwrap();
let txn = Txn::new()
.when(vec![Compare::with_not_exist_value(
@@ -418,7 +418,7 @@ mod tests {
async fn test_txn_compare_not_equal() {
let kv_store = create_kv_store().await;
let key = vec![104u8];
assert!(kv_store.delete(key.clone(), false).await.is_ok());
let _ = kv_store.delete(key.clone(), false).await.unwrap();
let txn = Txn::new()
.when(vec![Compare::with_not_exist_value(

View File

@@ -358,8 +358,8 @@ mod tests {
assert_eq!(&[0, 4], &new_meta.primary_key_indices[..]);
assert_eq!(&[1, 2, 3, 5], &new_meta.value_indices[..]);
assert!(new_schema.column_schema_by_name("my_tag").is_some());
assert!(new_schema.column_schema_by_name("my_field").is_some());
let _ = new_schema.column_schema_by_name("my_tag").unwrap();
let _ = new_schema.column_schema_by_name("my_field").unwrap();
assert_eq!(new_schema.version(), schema.version() + 1);
assert_eq!(new_meta.next_column_id, old_meta.next_column_id + 2);
@@ -387,7 +387,7 @@ mod tests {
assert_eq!(&[0, 1, 6], &new_meta.primary_key_indices[..]);
assert_eq!(&[2, 3, 4, 5, 7], &new_meta.value_indices[..]);
assert!(new_schema.column_schema_by_name("my_tag_first").is_some());
let _ = new_schema.column_schema_by_name("my_tag_first").unwrap();
assert!(new_schema
.column_schema_by_name("my_field_after_ts")
.is_some());

View File

@@ -226,7 +226,7 @@ fn test_validate_create_table_request() {
.contains("Invalid primary key: time index column can't be included in primary key"));
request.primary_key_indices = vec![0];
assert!(validate_create_table_request(&request).is_ok());
validate_create_table_request(&request).unwrap();
}
#[tokio::test]
@@ -823,7 +823,7 @@ async fn test_drop_table() {
region_numbers: vec![0],
engine: MITO_ENGINE.to_string(),
};
assert!(table_engine.create_table(&ctx, request).await.is_ok());
let _ = table_engine.create_table(&ctx, request).await.unwrap();
assert!(table_engine.table_exists(&engine_ctx, table_id));
}
@@ -854,7 +854,7 @@ async fn test_table_delete_rows() {
let key_column_values =
HashMap::from([("host".to_string(), del_hosts), ("ts".to_string(), del_tss)]);
let del_req = DeleteRequest { key_column_values };
assert!(table.delete(del_req).await.is_ok());
let _ = table.delete(del_req).await.unwrap();
let session_ctx = SessionContext::new();
let stream = table.scan(None, &[], None).await.unwrap();

View File

@@ -183,13 +183,13 @@ mod tests {
#[test]
fn test_table_manifest_compatibility() {
let table_change = r#"{"table_info":{"ident":{"table_id":0,"version":0},"name":"demo","desc":null,"catalog_name":"greptime","schema_name":"public","meta":{"schema":{"column_schemas":[{"name":"host","data_type":{"String":null},"is_nullable":false,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"cpu","data_type":{"Float64":{}},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"memory","data_type":{"Float64":{}},"is_nullable":false,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"ts","data_type":{"Timestamp":{"Millisecond":null}},"is_nullable":true,"is_time_index":true,"default_constraint":null,"metadata":{"greptime:time_index":"true"}}],"timestamp_index":3,"version":0},"primary_key_indices":[0],"value_indices":[1,2,3],"engine":"mito","next_column_id":1,"region_numbers":[],"engine_options":{},"options":{"write_buffer_size":null,"ttl":null,"extra_options":{}},"created_on":"2023-03-06T08:50:34.662020Z"},"table_type":"Base"}}"#;
assert!(serde_json::from_str::<TableChange>(table_change).is_ok());
let _ = serde_json::from_str::<TableChange>(table_change).unwrap();
let table_remove =
r#"{"table_ident":{"table_id":42,"version":0},"table_name":"test_table"}"#;
assert!(serde_json::from_str::<TableRemove>(table_remove).is_ok());
let _ = serde_json::from_str::<TableRemove>(table_remove).unwrap();
let protocol_action = r#"{"min_reader_version":0,"min_writer_version":1}"#;
assert!(serde_json::from_str::<ProtocolAction>(protocol_action).is_ok());
let _ = serde_json::from_str::<ProtocolAction>(protocol_action).unwrap();
}
}

View File

@@ -300,8 +300,8 @@ async fn test_object_store_cache_policy() -> Result<()> {
let p3 = "test_file3";
store.write(p3, "Hello, object3!").await.unwrap();
assert!(store.read(p3).await.is_ok());
assert!(store.range_read(p3, 0..5).await.is_ok());
let _ = store.read(p3).await.unwrap();
let _ = store.range_read(p3, 0..5).await.unwrap();
assert_cache_files(
&cache_store,

View File

@@ -295,7 +295,6 @@ mod tests {
fn test_insert_req_check() {
let right = mock_insert_request();
let ret = check_req(&right);
assert!(ret.is_ok());
assert_eq!(ret.unwrap(), 3);
let wrong = mock_wrong_insert_request();

View File

@@ -415,7 +415,7 @@ mod tests {
table_id: NUMBERS_TABLE_ID,
table: Arc::new(NumbersTable::default()),
};
assert!(catalog_manager.register_table(req).await.is_ok());
let _ = catalog_manager.register_table(req).await.unwrap();
QueryEngineFactory::new(catalog_manager, false).query_engine()
}

View File

@@ -149,7 +149,7 @@ mod test {
.unwrap();
let context = OptimizerContext::default();
assert!(OrderHintRule.try_optimize(&plan, &context).is_ok());
let _ = OrderHintRule.try_optimize(&plan, &context).unwrap();
// should read the first (with `.sort(true, false)`) sort option
let scan_req = adapter.get_scan_req();

View File

@@ -57,8 +57,7 @@ mod tests {
fn test_validate_catalog_and_schema() {
let context = Arc::new(QueryContext::with("greptime", "public"));
let re = validate_catalog_and_schema("greptime", "public", &context);
assert!(re.is_ok());
validate_catalog_and_schema("greptime", "public", &context).unwrap();
let re = validate_catalog_and_schema("greptime", "wrong_schema", &context);
assert!(re.is_err());
let re = validate_catalog_and_schema("wrong_catalog", "public", &context);
@@ -66,6 +65,6 @@ mod tests {
let re = validate_catalog_and_schema("wrong_catalog", "wrong_schema", &context);
assert!(re.is_err());
assert!(validate_catalog_and_schema("greptime", "information_schema", &context).is_ok());
validate_catalog_and_schema("greptime", "information_schema", &context).unwrap();
}
}

View File

@@ -112,7 +112,7 @@ fn catalog_manager() -> Result<Arc<MemoryCatalogManager>> {
table_id: NUMBERS_TABLE_ID,
table: Arc::new(NumbersTable::default()),
};
assert!(catalog_manager.register_table_sync(req).is_ok());
let _ = catalog_manager.register_table_sync(req).unwrap();
Ok(catalog_manager)
}

View File

@@ -201,11 +201,11 @@ def test(n):
// try to find and compile
let script = mgr.try_find_script_and_compile(schema, name).await.unwrap();
assert!(script.is_some());
let _ = script.unwrap();
{
let cached = mgr.compiled.read().unwrap();
assert!(cached.get(name).is_some());
let _ = cached.get(name).unwrap();
}
}
}

View File

@@ -512,6 +512,6 @@ def test(a, b, c, **params):
assert_eq!(copr.return_types, vec![None]);
assert_eq!(copr.kwarg, Some("params".to_string()));
assert_eq!(copr.script, script);
assert!(copr.code_obj.is_some());
let _ = copr.code_obj.unwrap();
}
}

View File

@@ -345,6 +345,6 @@ def a(cpu, mem, **kwargs):
&HashMap::from([("a".to_string(), "1".to_string())]),
);
dbg!(&ret);
assert!(ret.is_ok());
let _ = ret.unwrap();
}
}

View File

@@ -311,7 +311,7 @@ fn run_builtin_fn_testcases() {
let loc = loc.to_str().expect("Fail to parse path");
let mut file = File::open(loc).expect("Fail to open file");
let mut buf = String::new();
assert!(file.read_to_string(&mut buf).is_ok());
let _ = file.read_to_string(&mut buf).unwrap();
let testcases: Vec<TestCase> = from_ron_string(&buf).expect("Fail to convert to testcases");
let cached_vm = rustpython_vm::Interpreter::with_init(Default::default(), |vm| {
vm.add_native_module("greptime", Box::new(greptime_builtin::make_module));

View File

@@ -94,7 +94,7 @@ fn run_ron_testcases() {
let loc = loc.to_str().expect("Fail to parse path");
let mut file = File::open(loc).expect("Fail to open file");
let mut buf = String::new();
assert!(file.read_to_string(&mut buf).is_ok());
let _ = file.read_to_string(&mut buf).unwrap();
let testcases: Vec<TestCase> = from_ron_string(&buf).expect("Fail to convert to testcases");
info!("Read {} testcases from {}", testcases.len(), loc);
for testcase in testcases {

View File

@@ -244,16 +244,16 @@ pub mod test {
Password::PlainText(password.to_string().into()),
)
.await;
assert!(re.is_ok());
let _ = re.unwrap();
}
#[tokio::test]
async fn test_authorize() {
let provider = StaticUserProvider::try_from("cmd:root=123456,admin=654321").unwrap();
let re = provider
provider
.authorize("catalog", "schema", &UserInfo::new("root"))
.await;
assert!(re.is_ok());
.await
.unwrap();
}
#[tokio::test]
@@ -270,7 +270,6 @@ pub mod test {
{
// write a tmp file
let file = File::create(&file_path);
assert!(file.is_ok());
let file = file.unwrap();
let mut lw = LineWriter::new(file);
assert!(lw
@@ -279,7 +278,7 @@ pub mod test {
admin=654321",
)
.is_ok());
assert!(lw.flush().is_ok());
lw.flush().unwrap();
}
let param = format!("file:{file_path}");

View File

@@ -203,11 +203,10 @@ impl<W: AsyncWrite + Send + Sync + Unpin> AsyncMysqlShim<W> for MysqlInstanceShi
async fn on_prepare<'a>(
&'a mut self,
query: &'a str,
raw_query: &'a str,
w: StatementMetaWriter<'a, W>,
) -> Result<()> {
let raw_query = query.clone();
let (query, param_num) = replace_placeholders(query);
let (query, param_num) = replace_placeholders(raw_query);
let statement = validate_query(raw_query).await?;

View File

@@ -199,7 +199,8 @@ mod tests {
.write(b"\r\n")
.build();
let mut conn = Connection::new(mock);
let result = conn.write_line("An OpenTSDB error.".to_string()).await;
assert!(result.is_ok());
conn.write_line("An OpenTSDB error.".to_string())
.await
.unwrap();
}
}

View File

@@ -626,7 +626,7 @@ mod test {
];
let mut builder = DataRowEncoder::new(Arc::new(schema));
for i in values.iter() {
assert!(encode_value(i, &mut builder).is_ok());
encode_value(i, &mut builder).unwrap();
}
let err = encode_value(

View File

@@ -157,7 +157,6 @@ mod tests {
assert!(t.cert_path.is_empty());
let setup = t.setup();
assert!(setup.is_ok());
let setup = setup.unwrap();
assert!(setup.is_none());
}

View File

@@ -123,9 +123,9 @@ async fn test_auth_by_plain_text() {
Identity::UserId("greptime", None),
Password::PlainText("greptime".to_string().into()),
)
.await;
assert!(auth_result.is_ok());
assert_eq!("greptime", auth_result.unwrap().username());
.await
.unwrap();
assert_eq!("greptime", auth_result.username());
// auth failed, unsupported password type
let auth_result = user_provider
@@ -193,6 +193,8 @@ async fn test_schema_validate() {
let re = validator.authorize("greptime", "public", &wrong_user).await;
assert!(re.is_err());
// check ok
let re = validator.authorize("greptime", "public", &right_user).await;
assert!(re.is_ok());
validator
.authorize("greptime", "public", &right_user)
.await
.unwrap();
}

View File

@@ -121,16 +121,17 @@ fn create_grpc_server(table: MemTable) -> Result<Arc<dyn Server>> {
async fn test_grpc_server_startup() {
let server = create_grpc_server(MemTable::default_numbers_table()).unwrap();
let re = server.start(LOCALHOST_WITH_0.parse().unwrap()).await;
assert!(re.is_ok());
let _ = re.unwrap();
}
#[tokio::test]
async fn test_grpc_query() {
let server = create_grpc_server(MemTable::default_numbers_table()).unwrap();
let re = server.start(LOCALHOST_WITH_0.parse().unwrap()).await;
assert!(re.is_ok());
let grpc_client = Client::with_urls(vec![re.unwrap().to_string()]);
let re = server
.start(LOCALHOST_WITH_0.parse().unwrap())
.await
.unwrap();
let grpc_client = Client::with_urls(vec![re.to_string()]);
let mut db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, grpc_client);
let re = db.sql("select * from numbers").await;
@@ -142,5 +143,5 @@ async fn test_grpc_query() {
password: greptime.clone(),
}));
let re = db.sql("select * from numbers").await;
assert!(re.is_ok());
let _ = re.unwrap();
}

View File

@@ -101,7 +101,7 @@ async fn test_whitelist_no_auth() {
// try whitelist path
let req = mock_http_request(None, Some("http://localhost/health")).unwrap();
let req = http_auth.authorize(req).await;
assert!(req.is_ok());
let _ = req.unwrap();
}
// copy from http::authorize

View File

@@ -83,6 +83,5 @@ fn test_grpc_interceptor() {
assert!(fail.is_err());
let req = Request::Inserts(InsertRequests::default());
let success = GrpcQueryInterceptor::pre_execute(&di, &req, ctx);
assert!(success.is_ok());
GrpcQueryInterceptor::pre_execute(&di, &req, ctx).unwrap();
}

View File

@@ -76,7 +76,7 @@ async fn test_start_mysql_server() -> Result<()> {
let mysql_server = create_mysql_server(table, Default::default())?;
let listening = "127.0.0.1:0".parse::<SocketAddr>().unwrap();
let result = mysql_server.start(listening).await;
assert!(result.is_ok());
let _ = result.unwrap();
let result = mysql_server.start(listening).await;
assert!(result
@@ -103,10 +103,10 @@ async fn test_reject_no_database() -> Result<()> {
let fail = create_connection(server_port, None, false).await;
assert!(fail.is_err());
let pass = create_connection(server_port, Some("public"), false).await;
assert!(pass.is_ok());
let result = mysql_server.shutdown().await;
assert!(result.is_ok());
let _ = create_connection(server_port, Some("public"), false)
.await
.unwrap();
mysql_server.shutdown().await.unwrap();
Ok(())
}
@@ -135,10 +135,10 @@ async fn test_schema_validation() -> Result<()> {
})
.await?;
let pass = create_connection_default_db_name(server_port, false).await;
assert!(pass.is_ok());
let result = mysql_server.shutdown().await;
assert!(result.is_ok());
let _ = create_connection_default_db_name(server_port, false)
.await
.unwrap();
mysql_server.shutdown().await.unwrap();
// change to another username
let (mysql_server, server_port) = generate_server(DatabaseAuthInfo {
@@ -150,8 +150,7 @@ async fn test_schema_validation() -> Result<()> {
let fail = create_connection_default_db_name(server_port, false).await;
assert!(fail.is_err());
let result = mysql_server.shutdown().await;
assert!(result.is_ok());
mysql_server.shutdown().await.unwrap();
Ok(())
}
@@ -195,8 +194,7 @@ async fn test_shutdown_mysql_server() -> Result<()> {
}
tokio::time::sleep(Duration::from_millis(100)).await;
let result = mysql_server.shutdown().await;
assert!(result.is_ok());
mysql_server.shutdown().await.unwrap();
for handle in join_handles.iter_mut() {
let result = handle.await.unwrap();
@@ -349,7 +347,7 @@ async fn test_db_name() -> Result<()> {
// None actually uses default database name
let r = create_connection_default_db_name(server_addr.port(), client_tls).await;
assert!(r.is_ok());
let _ = r.unwrap();
let r = create_connection(server_addr.port(), Some("tomcat"), client_tls).await;
assert!(r.is_err());
@@ -515,8 +513,6 @@ async fn test_prepare_all_type(
let output: std::result::Result<Vec<Row>, mysql_async::Error> =
connection.exec(statement.clone(), vec![v]).await;
assert!(output.is_ok());
let rows = output.unwrap();
assert!(!rows.is_empty());
}

View File

@@ -69,7 +69,7 @@ async fn test_start_opentsdb_server() -> Result<()> {
let server = create_opentsdb_server(tx)?;
let listening = "127.0.0.1:0".parse::<SocketAddr>().unwrap();
let result = server.start(listening).await;
assert!(result.is_ok());
let _ = result.unwrap();
let result = server.start(listening).await;
assert!(result

View File

@@ -73,7 +73,7 @@ pub async fn test_start_postgres_server() -> Result<()> {
let pg_server = create_postgres_server(table, false, Default::default(), None)?;
let listening = "127.0.0.1:0".parse::<SocketAddr>().unwrap();
let result = pg_server.start(listening).await;
assert!(result.is_ok());
let _ = result.unwrap();
let result = pg_server.start(listening).await;
assert!(result
@@ -85,8 +85,8 @@ pub async fn test_start_postgres_server() -> Result<()> {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_shutdown_pg_server_range() -> Result<()> {
assert!(test_shutdown_pg_server(false).await.is_ok());
assert!(test_shutdown_pg_server(true).await.is_ok());
test_shutdown_pg_server(false).await.unwrap();
test_shutdown_pg_server(true).await.unwrap();
Ok(())
}
@@ -110,10 +110,8 @@ async fn test_schema_validating() -> Result<()> {
})
.await?;
let pass = create_plain_connection(server_port, true).await;
assert!(pass.is_ok());
let result = pg_server.shutdown().await;
assert!(result.is_ok());
let _ = create_plain_connection(server_port, true).await.unwrap();
pg_server.shutdown().await.unwrap();
let (pg_server, server_port) = generate_server(DatabaseAuthInfo {
catalog: DEFAULT_CATALOG_NAME,
@@ -124,8 +122,7 @@ async fn test_schema_validating() -> Result<()> {
let fail = create_plain_connection(server_port, true).await;
assert!(fail.is_err());
let result = pg_server.shutdown().await;
assert!(result.is_ok());
pg_server.shutdown().await.unwrap();
Ok(())
}
@@ -177,8 +174,7 @@ async fn test_shutdown_pg_server(with_pwd: bool) -> Result<()> {
}
tokio::time::sleep(Duration::from_millis(100)).await;
let result = postgres_server.shutdown().await;
assert!(result.is_ok());
postgres_server.shutdown().await.unwrap();
for handle in join_handles.iter_mut() {
let result = handle.await.unwrap();
@@ -305,7 +301,7 @@ async fn test_using_db() -> Result<()> {
.await
.unwrap();
let result = client.simple_query("SELECT uint32s FROM numbers").await;
assert!(result.is_ok());
let _ = result.unwrap();
let client = create_connection_with_given_catalog_schema(
server_port,
@@ -313,7 +309,7 @@ async fn test_using_db() -> Result<()> {
DEFAULT_SCHEMA_NAME,
)
.await;
assert!(client.is_ok());
let _ = client.unwrap();
let client =
create_connection_with_given_catalog_schema(server_port, "notfound", DEFAULT_SCHEMA_NAME)
@@ -366,11 +362,11 @@ async fn do_simple_query(server_tls: TlsOption, client_tls: bool) -> Result<()>
if !client_tls {
let client = create_plain_connection(server_port, false).await.unwrap();
let result = client.simple_query("SELECT uint32s FROM numbers").await;
assert!(result.is_ok());
let _ = result.unwrap();
} else {
let client = create_secure_connection(server_port, false).await.unwrap();
let result = client.simple_query("SELECT uint32s FROM numbers").await;
assert!(result.is_ok());
let _ = result.unwrap();
}
Ok(())

View File

@@ -931,7 +931,7 @@ PARTITION BY RANGE COLUMNS(b, a) (
)
ENGINE=mito";
let result = ParserContext::create_with_dialect(sql, &GreptimeDbDialect {});
assert!(result.is_ok());
let _ = result.unwrap();
let sql = r"
CREATE TABLE rcx ( a INT, b STRING, c INT )
@@ -1489,6 +1489,6 @@ ENGINE=mito";
create table foo("user" string, i bigint time index)
"#;
let result = ParserContext::create_with_dialect(sql, &GreptimeDbDialect {});
assert!(result.is_ok());
let _ = result.unwrap();
}
}

View File

@@ -34,14 +34,12 @@ rows | protobuf | arrow |
fn encode_arrow(batch: &WriteBatch, dst: &mut Vec<u8>) {
let encoder = codec::PayloadEncoder::new();
let result = encoder.encode(batch.payload(), dst);
assert!(result.is_ok());
encoder.encode(batch.payload(), dst).unwrap();
}
fn decode_arrow(dst: &[u8], mutation_types: &[i32]) {
let decoder = codec::PayloadDecoder::new(mutation_types);
let result = decoder.decode(dst);
assert!(result.is_ok());
let _ = decoder.decode(dst).unwrap();
}
fn bench_wal_decode(c: &mut Criterion) {

View File

@@ -35,8 +35,7 @@ rows | protobuf | arrow |
fn encode_arrow(batch: &WriteBatch) {
let encoder = codec::PayloadEncoder::new();
let mut dst = vec![];
let result = encoder.encode(batch.payload(), &mut dst);
assert!(result.is_ok());
encoder.encode(batch.payload(), &mut dst).unwrap();
}
fn bench_wal_encode(c: &mut Criterion) {

View File

@@ -35,12 +35,10 @@ rows | protobuf | arrow |
fn codec_arrow(batch: &WriteBatch, mutation_types: &[i32]) {
let encoder = codec::PayloadEncoder::new();
let mut dst = vec![];
let result = encoder.encode(batch.payload(), &mut dst);
assert!(result.is_ok());
encoder.encode(batch.payload(), &mut dst).unwrap();
let decoder = codec::PayloadDecoder::new(mutation_types);
let result = decoder.decode(&dst);
assert!(result.is_ok());
let _ = decoder.decode(&dst).unwrap();
}
fn bench_wal_encode_decode(c: &mut Criterion) {

View File

@@ -658,7 +658,7 @@ mod tests {
("ts".to_string(), tsv),
]);
wb.put(put_data).unwrap();
assert!(region.write(&WriteContext::default(), wb).await.is_ok());
let _ = region.write(&WriteContext::default(), wb).await.unwrap();
// Flush memtable to sst.
region.flush(&FlushContext::default()).await.unwrap();

View File

@@ -338,16 +338,16 @@ mod tests {
#[test]
fn test_region_manifest_compatibility() {
let region_edit = r#"{"region_version":0,"flushed_sequence":null,"files_to_add":[{"region_id":4402341478400,"file_name":"4b220a70-2b03-4641-9687-b65d94641208.parquet","time_range":[{"value":1451609210000,"unit":"Millisecond"},{"value":1451609520000,"unit":"Millisecond"}],"level":1}],"files_to_remove":[{"region_id":4402341478400,"file_name":"34b6ebb9-b8a5-4a4b-b744-56f67defad02.parquet","time_range":[{"value":1451609210000,"unit":"Millisecond"},{"value":1451609520000,"unit":"Millisecond"}],"level":0}]}"#;
assert!(serde_json::from_str::<RegionEdit>(region_edit).is_ok());
let _ = serde_json::from_str::<RegionEdit>(region_edit).unwrap();
let region_change = r#" {"committed_sequence":42,"metadata":{"id":0,"name":"region-0","columns":{"columns":[{"cf_id":0,"desc":{"id":2,"name":"k1","data_type":{"Int32":{}},"is_nullable":false,"is_time_index":false,"default_constraint":null,"comment":""}},{"cf_id":0,"desc":{"id":1,"name":"timestamp","data_type":{"Timestamp":{"Millisecond":null}},"is_nullable":false,"is_time_index":true,"default_constraint":null,"comment":""}},{"cf_id":1,"desc":{"id":3,"name":"v1","data_type":{"Float32":{}},"is_nullable":true,"is_time_index":false,"default_constraint":null,"comment":""}},{"cf_id":1,"desc":{"id":2147483649,"name":"__sequence","data_type":{"UInt64":{}},"is_nullable":false,"is_time_index":false,"default_constraint":null,"comment":""}},{"cf_id":1,"desc":{"id":2147483650,"name":"__op_type","data_type":{"UInt8":{}},"is_nullable":false,"is_time_index":false,"default_constraint":null,"comment":""}}],"row_key_end":2,"timestamp_key_index":1,"enable_version_column":false,"user_column_end":3},"column_families":{"column_families":[{"name":"default","cf_id":1,"column_index_start":2,"column_index_end":3}]},"version":0}}"#;
assert!(serde_json::from_str::<RegionChange>(region_change).is_ok());
let _ = serde_json::from_str::<RegionChange>(region_change).unwrap();
let region_remove = r#"{"region_id":42}"#;
assert!(serde_json::from_str::<RegionRemove>(region_remove).is_ok());
let _ = serde_json::from_str::<RegionRemove>(region_remove).unwrap();
let protocol_action = r#"{"min_reader_version":1,"min_writer_version":2}"#;
assert!(serde_json::from_str::<ProtocolAction>(protocol_action).is_ok());
let _ = serde_json::from_str::<ProtocolAction>(protocol_action).unwrap();
}
fn mock_file_meta() -> FileMeta {

View File

@@ -444,7 +444,7 @@ mod tests {
];
for action in actions {
assert!(manifest.update(action).await.is_ok());
let _ = manifest.update(action).await.unwrap();
}
assert!(manifest.last_checkpoint().await.unwrap().is_none());
assert_scan(manifest, 0, 3).await;
@@ -503,7 +503,7 @@ mod tests {
))]),
];
for action in actions {
assert!(manifest.update(action).await.is_ok());
let _ = manifest.update(action).await.unwrap();
}
assert_scan(manifest, 3, 2).await;

View File

@@ -610,9 +610,9 @@ mod tests {
assert_eq!(3, v);
//delete (,4) logs and keep checkpoint 3.
assert!(log_store.delete_until(4, true).await.is_ok());
assert!(log_store.load_checkpoint(3).await.unwrap().is_some());
assert!(log_store.load_last_checkpoint().await.unwrap().is_some());
let _ = log_store.delete_until(4, true).await.unwrap();
let _ = log_store.load_checkpoint(3).await.unwrap().unwrap();
let _ = log_store.load_last_checkpoint().await.unwrap().unwrap();
let mut it = log_store.scan(0, 11).await.unwrap();
let (version, bytes) = it.next_log().await.unwrap().unwrap();
assert_eq!(4, version);
@@ -620,7 +620,7 @@ mod tests {
assert!(it.next_log().await.unwrap().is_none());
// delete all logs and checkpoints
assert!(log_store.delete_until(11, false).await.is_ok());
let _ = log_store.delete_until(11, false).await.unwrap();
assert!(log_store.load_checkpoint(3).await.unwrap().is_none());
assert!(log_store.load_last_checkpoint().await.unwrap().is_none());
let mut it = log_store.scan(0, 11).await.unwrap();

View File

@@ -508,7 +508,7 @@ fn test_iter_after_none() {
};
let mut iter = ctx.memtable.iter(iter_ctx).unwrap();
assert!(iter.next().is_some());
let _ = iter.next().unwrap();
assert!(iter.next().is_none());
assert!(iter.next().is_none());
});

View File

@@ -90,7 +90,7 @@ impl<S: LogStore> TesterBase<S> {
pub async fn checkpoint_manifest(&self) {
let manifest = &self.region.inner.manifest;
manifest.set_flushed_manifest_version(manifest.last_version() - 1);
assert!(manifest.do_checkpoint().await.unwrap().is_some());
let _ = manifest.do_checkpoint().await.unwrap().unwrap();
}
pub async fn close(&self) {

View File

@@ -288,7 +288,7 @@ fn check_schema_names(schema: &SchemaRef, names: &[&str]) {
for (idx, name) in names.iter().enumerate() {
assert_eq!(*name, schema.column_name_by_index(idx));
assert!(schema.column_schema_by_name(name).is_some());
let _ = schema.column_schema_by_name(name).unwrap();
}
}

View File

@@ -142,7 +142,7 @@ impl<S: LogStore> ProjectionTester<S> {
let put_data = new_put_data(len, key_start, ts_start, initial_value);
batch.put(put_data).unwrap();
assert!(self.region.write(&self.write_ctx, batch).await.is_ok());
let _ = self.region.write(&self.write_ctx, batch).await.unwrap();
}
async fn scan(&self, projection: Option<Vec<usize>>) -> Vec<Vec<i64>> {

View File

@@ -443,8 +443,8 @@ mod tests {
handler,
);
assert!(scheduler.schedule(MockRequest { region_id: 1 }).is_ok());
assert!(scheduler.schedule(MockRequest { region_id: 2 }).is_ok());
let _ = scheduler.schedule(MockRequest { region_id: 1 }).unwrap();
let _ = scheduler.schedule(MockRequest { region_id: 2 }).unwrap();
tokio::time::timeout(Duration::from_secs(1), latch.wait())
.await

View File

@@ -167,8 +167,7 @@ mod tests {
let encoder = PayloadEncoder::new();
let mut dst = vec![];
let result = encoder.encode(batch.payload(), &mut dst);
assert!(result.is_ok());
encoder.encode(batch.payload(), &mut dst).unwrap();
let decoder = PayloadDecoder::new(&mutation_types);
let result = decoder.decode(&dst);
@@ -201,8 +200,7 @@ mod tests {
let encoder = PayloadEncoder::new();
let mut dst = vec![];
let result = encoder.encode(batch.payload(), &mut dst);
assert!(result.is_ok());
encoder.encode(batch.payload(), &mut dst).unwrap();
let decoder = PayloadDecoder::new(&mutation_types);
let result = decoder.decode(&dst);

View File

@@ -183,7 +183,7 @@ mod tests {
assert_eq!(schema_new, *batch.schema());
let mutation = &batch.payload().mutations[0];
assert!(mutation.record_batch.column_by_name("v0").is_some());
let _ = mutation.record_batch.column_by_name("v0").unwrap();
}
#[test]

View File

@@ -325,7 +325,7 @@ mod tests {
let rb = RecordBatch::try_new(schema.clone(), vec![name_array, count_array]).unwrap();
writer.write(&rb).unwrap();
}
assert!(writer.close().is_ok());
let _ = writer.close().unwrap();
(path, schema)
}

View File

@@ -41,15 +41,15 @@ mod tests {
engine: MITO_ENGINE.to_string(),
};
let result = instance
instance
.frontend()
.catalog_manager()
.register_system_table(RegisterSystemTableRequest {
create_table_request: request,
open_hook: None,
})
.await;
assert!(result.is_ok());
.await
.unwrap();
assert!(
instance

View File

@@ -54,8 +54,7 @@ mod tests {
],
);
// should create new table "my_metric_1" directly
let result = instance.exec(&data_point1, ctx.clone()).await;
assert!(result.is_ok());
instance.exec(&data_point1, ctx.clone()).await.unwrap();
let data_point2 = DataPoint::new(
"my_metric_1".to_string(),
@@ -67,13 +66,11 @@ mod tests {
],
);
// should create new column "tagk3" directly
let result = instance.exec(&data_point2, ctx.clone()).await;
assert!(result.is_ok());
instance.exec(&data_point2, ctx.clone()).await.unwrap();
let data_point3 = DataPoint::new("my_metric_1".to_string(), 3000, 3.0, vec![]);
// should handle null tags properly
let result = instance.exec(&data_point3, ctx.clone()).await;
assert!(result.is_ok());
instance.exec(&data_point3, ctx.clone()).await.unwrap();
let output = instance
.do_query(

View File

@@ -315,7 +315,7 @@ pub async fn create_test_table(
table_id: table.table_info().ident.table_id,
table,
};
assert!(catalog_manager.register_table(req).await.is_ok());
let _ = catalog_manager.register_table(req).await.unwrap();
Ok(())
}

View File

@@ -328,8 +328,7 @@ pub async fn test_health_check(store_type: StorageType) {
setup_grpc_server(store_type, "auto_create_table").await;
let grpc_client = Client::with_urls(vec![addr]);
let r = grpc_client.health_check().await;
assert!(r.is_ok());
grpc_client.health_check().await.unwrap();
let _ = fe_grpc_server.shutdown().await;
guard.remove_all().await;

View File

@@ -75,7 +75,7 @@ pub async fn test_sql_api(store_type: StorageType) {
let body = serde_json::from_str::<JsonResponse>(&res.text().await).unwrap();
assert_eq!(body.code(), 1004);
assert_eq!(body.error().unwrap(), "sql parameter is required.");
assert!(body.execution_time_ms().is_some());
let _ = body.execution_time_ms().unwrap();
let res = client
.get("/v1/sql?sql=select * from numbers limit 10")
@@ -85,7 +85,7 @@ pub async fn test_sql_api(store_type: StorageType) {
let body = serde_json::from_str::<JsonResponse>(&res.text().await).unwrap();
assert!(body.success());
assert!(body.execution_time_ms().is_some());
let _ = body.execution_time_ms().unwrap();
let output = body.output().unwrap();
assert_eq!(output.len(), 1);
@@ -112,7 +112,7 @@ pub async fn test_sql_api(store_type: StorageType) {
let body = serde_json::from_str::<JsonResponse>(&res.text().await).unwrap();
assert!(body.success());
assert!(body.execution_time_ms().is_some());
let _ = body.execution_time_ms().unwrap();
let output = body.output().unwrap();
assert_eq!(output.len(), 1);
@@ -132,7 +132,7 @@ pub async fn test_sql_api(store_type: StorageType) {
let body = serde_json::from_str::<JsonResponse>(&res.text().await).unwrap();
assert!(body.success());
assert!(body.execution_time_ms().is_some());
let _ = body.execution_time_ms().unwrap();
let output = body.output().unwrap();
assert_eq!(output.len(), 1);
@@ -152,7 +152,7 @@ pub async fn test_sql_api(store_type: StorageType) {
let body = serde_json::from_str::<JsonResponse>(&res.text().await).unwrap();
assert!(body.success());
assert!(body.execution_time_ms().is_some());
let _ = body.execution_time_ms().unwrap();
let output = body.output().unwrap();
assert_eq!(output.len(), 1);
assert_eq!(
@@ -171,7 +171,7 @@ pub async fn test_sql_api(store_type: StorageType) {
let body = serde_json::from_str::<JsonResponse>(&res.text().await).unwrap();
assert!(body.success());
assert!(body.execution_time_ms().is_some());
let _ = body.execution_time_ms().unwrap();
let outputs = body.output().unwrap();
assert_eq!(outputs.len(), 2);
assert_eq!(
@@ -197,7 +197,7 @@ pub async fn test_sql_api(store_type: StorageType) {
let body = serde_json::from_str::<JsonResponse>(&res.text().await).unwrap();
assert!(!body.success());
assert!(body.execution_time_ms().is_some());
let _ = body.execution_time_ms().unwrap();
assert!(body.error().unwrap().contains("Table not found"));
// test database given
@@ -209,7 +209,7 @@ pub async fn test_sql_api(store_type: StorageType) {
let body = serde_json::from_str::<JsonResponse>(&res.text().await).unwrap();
assert!(body.success());
assert!(body.execution_time_ms().is_some());
let _ = body.execution_time_ms().unwrap();
let outputs = body.output().unwrap();
assert_eq!(outputs.len(), 1);
assert_eq!(
@@ -237,7 +237,7 @@ pub async fn test_sql_api(store_type: StorageType) {
let body = serde_json::from_str::<JsonResponse>(&res.text().await).unwrap();
assert!(body.success());
assert!(body.execution_time_ms().is_some());
let _ = body.execution_time_ms().unwrap();
let outputs = body.output().unwrap();
assert_eq!(outputs.len(), 1);
assert_eq!(
@@ -281,7 +281,7 @@ pub async fn test_prometheus_promql_api(store_type: StorageType) {
let body = serde_json::from_str::<JsonResponse>(&res.text().await).unwrap();
assert!(body.success());
assert!(body.execution_time_ms().is_some());
let _ = body.execution_time_ms().unwrap();
guard.remove_all().await;
}
@@ -460,7 +460,7 @@ def test(n) -> vector[f64]:
let body = serde_json::from_str::<JsonResponse>(&res.text().await).unwrap();
assert_eq!(body.code(), 0);
assert!(body.execution_time_ms().is_some());
let _ = body.execution_time_ms().unwrap();
let output = body.output().unwrap();
assert_eq!(output.len(), 1);
assert_eq!(

View File

@@ -117,9 +117,9 @@ pub async fn test_region_failover(store_type: StorageType) {
};
let cache = get_table_cache(&frontend, &cache_key).unwrap();
assert!(cache.is_some());
let _ = cache.unwrap();
let route_cache = get_route_cache(&frontend, &table_name);
assert!(route_cache.is_some());
let _ = route_cache.unwrap();
let distribution = find_region_distribution(&cluster).await;
info!("Find region distribution: {distribution:?}");
@@ -264,7 +264,7 @@ CREATE TABLE my_table (
PARTITION r3 VALUES LESS THAN (MAXVALUE),
)";
let result = cluster.frontend.do_query(sql, QueryContext::arc()).await;
assert!(result[0].is_ok());
let _ = result.get(0).unwrap();
}
async fn find_region_distribution(cluster: &GreptimeDbCluster) -> HashMap<u64, Vec<u32>> {

View File

@@ -122,7 +122,10 @@ pub async fn test_mysql_crud(store_type: StorageType) {
assert_eq!(ret, 6);
}
assert!(sqlx::query("delete from demo").execute(&pool).await.is_ok());
let _ = sqlx::query("delete from demo")
.execute(&pool)
.await
.unwrap();
let rows = sqlx::query("select i from demo")
.fetch_all(&pool)
.await
@@ -180,7 +183,10 @@ pub async fn test_postgres_crud(store_type: StorageType) {
assert_eq!(ret, 6);
}
assert!(sqlx::query("delete from demo").execute(&pool).await.is_ok());
let _ = sqlx::query("delete from demo")
.execute(&pool)
.await
.unwrap();
let rows = sqlx::query("select i from demo")
.fetch_all(&pool)
.await