fix: Fix All The Tests! (#752)

* fix: Fix several tests compile errors

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: some compile errors in tests

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* fix: compile errors in frontend tests

* fix: compile errors in frontend tests

* test: Fix tests in api and common-query

* test: Fix test in sql crate

* fix: resolve substrait error

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* chore: add more test

* test: Fix tests in servers

* fix instance_test

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>

* test: Fix tests in tests-integration

Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: Lei, HUANG <mrsatangel@gmail.com>
Co-authored-by: evenyag <realevenyag@gmail.com>
This commit is contained in:
Ruihang Xia
2022-12-15 17:47:14 +08:00
committed by GitHub
parent 7c696dae08
commit 0f3dcc1b38
25 changed files with 378 additions and 348 deletions

View File

@@ -445,7 +445,7 @@ mod tests {
assert!(result.is_err());
assert_eq!(
result.unwrap_err().to_string(),
"Failed to create column datatype from List(ListType { inner: Boolean(BooleanType) })"
"Failed to create column datatype from List(ListType { item_type: Boolean(BooleanType) })"
);
}

View File

@@ -41,7 +41,7 @@ async fn run() {
column_defs: vec![
ColumnDef {
name: "timestamp".to_string(),
datatype: ColumnDataType::Timestamp as i32,
datatype: ColumnDataType::TimestampMillisecond as i32,
is_nullable: false,
default_constraint: None,
},

View File

@@ -318,12 +318,11 @@ mod tests {
fn create_test_column(vector: VectorRef) -> Column {
let wrapper: ColumnDataTypeWrapper = vector.data_type().try_into().unwrap();
let array = vector.to_arrow_array();
Column {
column_name: "test".to_string(),
semantic_type: 1,
values: Some(values(&[array.clone()]).unwrap()),
null_mask: null_mask(&vec![array], vector.len()),
values: Some(values(&[vector.clone()]).unwrap()),
null_mask: null_mask(&vec![vector.clone()], vector.len()),
datatype: wrapper.datatype() as i32,
}
}

View File

@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use anymap::AnyMap;
use clap::Parser;
use frontend::frontend::{Frontend, FrontendOptions};
@@ -138,14 +136,14 @@ impl TryFrom<StartCommand> for FrontendOptions {
if let Some(addr) = cmd.mysql_addr {
opts.mysql_options = Some(MysqlOptions {
addr,
tls: Arc::new(tls_option.clone()),
tls: tls_option.clone(),
..Default::default()
});
}
if let Some(addr) = cmd.postgres_addr {
opts.postgres_options = Some(PostgresOptions {
addr,
tls: Arc::new(tls_option),
tls: tls_option,
..Default::default()
});
}

View File

@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use anymap::AnyMap;
use clap::Parser;
use common_telemetry::info;
@@ -262,12 +260,12 @@ impl TryFrom<StartCommand> for FrontendOptions {
let tls_option = TlsOption::new(cmd.tls_mode, cmd.tls_cert_path, cmd.tls_key_path);
if let Some(mut mysql_options) = opts.mysql_options {
mysql_options.tls = Arc::new(tls_option.clone());
mysql_options.tls = tls_option.clone();
opts.mysql_options = Some(mysql_options);
}
if let Some(mut postgres_options) = opts.postgres_options {
postgres_options.tls = Arc::new(tls_option);
postgres_options.tls = tls_option;
opts.postgres_options = Some(postgres_options);
}

View File

@@ -320,8 +320,16 @@ mod test {
.await
.unwrap();
let pretty_print = pretty::pretty_format_batches(&df_recordbatches).unwrap();
// TODO(ruihang): fill this assertion
assert_eq!(pretty_print.to_string().as_str(), "");
assert_eq!(
pretty_print.to_string(),
r#"+---+
| a |
+---+
| 1 |
| 2 |
| 3 |
+---+"#
);
}
#[test]

View File

@@ -425,6 +425,10 @@ impl DFLogicalSubstraitConvertor {
let provider = table_scan
.source
.as_any()
.downcast_ref::<DefaultTableSource>()
.context(UnknownPlanSnafu)?
.table_provider
.as_any()
.downcast_ref::<DfTableProviderAdapter>()
.context(UnknownPlanSnafu)?;
let table_info = provider.table().table_info();

View File

@@ -260,7 +260,7 @@ mod tests {
},
ColumnDef {
name: "ts".to_string(),
datatype: ColumnDataType::Timestamp as i32,
datatype: ColumnDataType::TimestampMillisecond as i32,
is_nullable: false,
default_constraint: None,
},
@@ -300,6 +300,7 @@ mod tests {
ConcreteDataType::timestamp_millisecond_datatype(),
false,
)
.with_time_index(true),
ColumnSchema::new("cpu", ConcreteDataType::float32_datatype(), true),
ColumnSchema::new("memory", ConcreteDataType::float64_datatype(), true),
];

View File

@@ -14,7 +14,9 @@
use catalog::CatalogManagerRef;
use common_query::Output;
use datatypes::prelude::{ConcreteDataType, VectorBuilder};
use datatypes::data_type::DataType;
use datatypes::prelude::ConcreteDataType;
use datatypes::vectors::MutableVector;
use snafu::{ensure, OptionExt, ResultExt};
use sql::ast::Value as SqlValue;
use sql::statements::insert::Insert;
@@ -70,7 +72,7 @@ impl SqlHandler {
};
let rows_num = values.len();
let mut columns_builders: Vec<(&String, &ConcreteDataType, VectorBuilder)> =
let mut columns_builders: Vec<(&String, &ConcreteDataType, Box<dyn MutableVector>)> =
Vec::with_capacity(columns_num);
if columns.is_empty() {
@@ -79,7 +81,7 @@ impl SqlHandler {
columns_builders.push((
&column_schema.name,
data_type,
VectorBuilder::with_capacity(data_type.clone(), rows_num),
data_type.create_mutable_vector(rows_num),
));
}
} else {
@@ -95,7 +97,7 @@ impl SqlHandler {
columns_builders.push((
column_name,
data_type,
VectorBuilder::with_capacity(data_type.clone(), rows_num),
data_type.create_mutable_vector(rows_num),
));
}
}
@@ -123,7 +125,7 @@ impl SqlHandler {
table_name: table_ref.table.to_string(),
columns_values: columns_builders
.into_iter()
.map(|(c, _, mut b)| (c.to_owned(), b.finish()))
.map(|(c, _, mut b)| (c.to_owned(), b.to_vector()))
.collect(),
}))
}
@@ -133,11 +135,11 @@ fn add_row_to_vector(
column_name: &str,
data_type: &ConcreteDataType,
sql_val: &SqlValue,
builder: &mut VectorBuilder,
builder: &mut Box<dyn MutableVector>,
) -> Result<()> {
let value = statements::sql_value_to_value(column_name, data_type, sql_val)
.context(ParseSqlValueSnafu)?;
builder.push(&value);
builder.push_value_ref(value.as_value_ref()).unwrap();
Ok(())
}

View File

@@ -17,11 +17,8 @@ use std::sync::Arc;
use common_catalog::consts::DEFAULT_SCHEMA_NAME;
use common_query::Output;
use common_recordbatch::util;
use datafusion::arrow_print;
use datafusion_common::record_batch::RecordBatch as DfRecordBatch;
use datatypes::arrow::array::{Int64Array, UInt64Array, Utf8Array};
use datatypes::arrow_array::StringArray;
use datatypes::prelude::ConcreteDataType;
use datatypes::data_type::ConcreteDataType;
use datatypes::vectors::{Int64Vector, StringVector, UInt64Vector, VectorRef};
use session::context::QueryContext;
use crate::instance::Instance;
@@ -66,11 +63,13 @@ async fn test_create_database_and_insert_query() {
match query_output {
Output::Stream(s) => {
let batches = util::collect(s).await.unwrap();
let columns = batches[0].df_recordbatch.columns();
assert_eq!(1, columns.len());
assert_eq!(1, batches[0].num_columns());
assert_eq!(
&Int64Array::from_slice(&[1655276557000, 1655276558000]),
columns[0].as_any().downcast_ref::<Int64Array>().unwrap()
Arc::new(Int64Vector::from_vec(vec![
1655276557000_i64,
1655276558000_i64
])) as VectorRef,
*batches[0].column(0)
);
}
_ => unreachable!(),
@@ -155,18 +154,15 @@ async fn assert_query_result(instance: &Instance, sql: &str, ts: i64, host: &str
match query_output {
Output::Stream(s) => {
let batches = util::collect(s).await.unwrap();
let columns = batches[0].df_recordbatch.columns();
assert_eq!(2, columns.len());
// let columns = batches[0].df_recordbatch.columns();
assert_eq!(2, batches[0].num_columns());
assert_eq!(
&Utf8Array::<i32>::from_slice(&[host]),
columns[0]
.as_any()
.downcast_ref::<Utf8Array<i32>>()
.unwrap()
Arc::new(StringVector::from(vec![host])) as VectorRef,
*batches[0].column(0)
);
assert_eq!(
&Int64Array::from_slice(&[ts]),
columns[1].as_any().downcast_ref::<Int64Array>().unwrap()
Arc::new(Int64Vector::from_vec(vec![ts])) as VectorRef,
*batches[0].column(1)
);
}
_ => unreachable!(),
@@ -235,11 +231,13 @@ async fn test_execute_insert_query_with_i64_timestamp() {
match query_output {
Output::Stream(s) => {
let batches = util::collect(s).await.unwrap();
let columns = batches[0].df_recordbatch.columns();
assert_eq!(1, columns.len());
assert_eq!(1, batches[0].num_columns());
assert_eq!(
&Int64Array::from_slice(&[1655276557000, 1655276558000]),
columns[0].as_any().downcast_ref::<Int64Array>().unwrap()
Arc::new(Int64Vector::from_vec(vec![
1655276557000_i64,
1655276558000_i64
])) as VectorRef,
*batches[0].column(0)
);
}
_ => unreachable!(),
@@ -249,11 +247,13 @@ async fn test_execute_insert_query_with_i64_timestamp() {
match query_output {
Output::Stream(s) => {
let batches = util::collect(s).await.unwrap();
let columns = batches[0].df_recordbatch.columns();
assert_eq!(1, columns.len());
assert_eq!(1, batches[0].num_columns());
assert_eq!(
&Int64Array::from_slice(&[1655276557000, 1655276558000]),
columns[0].as_any().downcast_ref::<Int64Array>().unwrap()
Arc::new(Int64Vector::from_vec(vec![
1655276557000_i64,
1655276558000_i64
])) as VectorRef,
*batches[0].column(0)
);
}
_ => unreachable!(),
@@ -270,13 +270,12 @@ async fn test_execute_query() {
match output {
Output::Stream(recordbatch) => {
let numbers = util::collect(recordbatch).await.unwrap();
let columns = numbers[0].df_recordbatch.columns();
assert_eq!(1, columns.len());
assert_eq!(columns[0].len(), 1);
assert_eq!(1, numbers[0].num_columns());
assert_eq!(numbers[0].column(0).len(), 1);
assert_eq!(
*columns[0].as_any().downcast_ref::<UInt64Array>().unwrap(),
UInt64Array::from_slice(&[4950])
Arc::new(UInt64Vector::from_vec(vec![4950_u64])) as VectorRef,
*numbers[0].column(0),
);
}
_ => unreachable!(),
@@ -294,13 +293,12 @@ async fn test_execute_show_databases_tables() {
match output {
Output::RecordBatches(databases) => {
let databases = databases.take();
let columns = databases[0].df_recordbatch.columns();
assert_eq!(1, columns.len());
assert_eq!(columns[0].len(), 1);
assert_eq!(1, databases[0].num_columns());
assert_eq!(databases[0].column(0).len(), 1);
assert_eq!(
*columns[0].as_any().downcast_ref::<StringArray>().unwrap(),
StringArray::from(vec![Some("public")])
*databases[0].column(0),
Arc::new(StringVector::from(vec![Some("public")])) as VectorRef
);
}
_ => unreachable!(),
@@ -310,13 +308,12 @@ async fn test_execute_show_databases_tables() {
match output {
Output::RecordBatches(databases) => {
let databases = databases.take();
let columns = databases[0].df_recordbatch.columns();
assert_eq!(1, columns.len());
assert_eq!(columns[0].len(), 1);
assert_eq!(1, databases[0].num_columns());
assert_eq!(databases[0].column(0).len(), 1);
assert_eq!(
*columns[0].as_any().downcast_ref::<StringArray>().unwrap(),
StringArray::from(vec![Some("public")])
*databases[0].column(0),
Arc::new(StringVector::from(vec![Some("public")])) as VectorRef
);
}
_ => unreachable!(),
@@ -326,9 +323,8 @@ async fn test_execute_show_databases_tables() {
match output {
Output::RecordBatches(databases) => {
let databases = databases.take();
let columns = databases[0].df_recordbatch.columns();
assert_eq!(1, columns.len());
assert_eq!(columns[0].len(), 2);
assert_eq!(1, databases[0].num_columns());
assert_eq!(databases[0].column(0).len(), 2);
}
_ => unreachable!(),
}
@@ -346,9 +342,8 @@ async fn test_execute_show_databases_tables() {
match output {
Output::RecordBatches(databases) => {
let databases = databases.take();
let columns = databases[0].df_recordbatch.columns();
assert_eq!(1, columns.len());
assert_eq!(columns[0].len(), 3);
assert_eq!(1, databases[0].num_columns());
assert_eq!(databases[0].column(0).len(), 3);
}
_ => unreachable!(),
}
@@ -358,13 +353,12 @@ async fn test_execute_show_databases_tables() {
match output {
Output::RecordBatches(databases) => {
let databases = databases.take();
let columns = databases[0].df_recordbatch.columns();
assert_eq!(1, columns.len());
assert_eq!(columns[0].len(), 1);
assert_eq!(1, databases[0].num_columns());
assert_eq!(databases[0].column(0).len(), 1);
assert_eq!(
*columns[0].as_any().downcast_ref::<StringArray>().unwrap(),
StringArray::from(vec![Some("demo")])
*databases[0].column(0),
Arc::new(StringVector::from(vec![Some("demo")])) as VectorRef
);
}
_ => unreachable!(),
@@ -394,18 +388,13 @@ pub async fn test_execute_create() {
assert!(matches!(output, Output::AffectedRows(1)));
}
async fn check_output_stream(output: Output, expected: Vec<&str>) {
async fn check_output_stream(output: Output, expected: String) {
let recordbatches = match output {
Output::Stream(stream) => util::collect(stream).await.unwrap(),
Output::RecordBatches(recordbatches) => recordbatches.take(),
Output::Stream(stream) => util::collect_batches(stream).await.unwrap(),
Output::RecordBatches(recordbatches) => recordbatches,
_ => unreachable!(),
};
let recordbatches = recordbatches
.into_iter()
.map(|r| r.df_recordbatch)
.collect::<Vec<DfRecordBatch>>();
let pretty_print = arrow_print::write(&recordbatches);
let pretty_print = pretty_print.lines().collect::<Vec<&str>>();
let pretty_print = recordbatches.pretty_print().unwrap();
assert_eq!(pretty_print, expected);
}
@@ -438,15 +427,16 @@ async fn test_alter_table() {
assert!(matches!(output, Output::AffectedRows(1)));
let output = execute_sql(&instance, "select * from demo order by ts").await;
let expected = vec![
"+-------+-----+--------+---------------------+--------+",
"| host | cpu | memory | ts | my_tag |",
"+-------+-----+--------+---------------------+--------+",
"| host1 | 1.1 | 100 | 1970-01-01 00:00:01 | |",
"| host2 | 2.2 | 200 | 1970-01-01 00:00:02 | hello |",
"| host3 | 3.3 | 300 | 1970-01-01 00:00:03 | |",
"+-------+-----+--------+---------------------+--------+",
];
let expected = "\
+-------+-----+--------+---------------------+--------+
| host | cpu | memory | ts | my_tag |
+-------+-----+--------+---------------------+--------+
| host1 | 1.1 | 100 | 1970-01-01T00:00:01 | |
| host2 | 2.2 | 200 | 1970-01-01T00:00:02 | hello |
| host3 | 3.3 | 300 | 1970-01-01T00:00:03 | |
+-------+-----+--------+---------------------+--------+\
"
.to_string();
check_output_stream(output, expected).await;
// Drop a column
@@ -454,15 +444,16 @@ async fn test_alter_table() {
assert!(matches!(output, Output::AffectedRows(0)));
let output = execute_sql(&instance, "select * from demo order by ts").await;
let expected = vec![
"+-------+-----+---------------------+--------+",
"| host | cpu | ts | my_tag |",
"+-------+-----+---------------------+--------+",
"| host1 | 1.1 | 1970-01-01 00:00:01 | |",
"| host2 | 2.2 | 1970-01-01 00:00:02 | hello |",
"| host3 | 3.3 | 1970-01-01 00:00:03 | |",
"+-------+-----+---------------------+--------+",
];
let expected = "\
+-------+-----+---------------------+--------+
| host | cpu | ts | my_tag |
+-------+-----+---------------------+--------+
| host1 | 1.1 | 1970-01-01T00:00:01 | |
| host2 | 2.2 | 1970-01-01T00:00:02 | hello |
| host3 | 3.3 | 1970-01-01T00:00:03 | |
+-------+-----+---------------------+--------+\
"
.to_string();
check_output_stream(output, expected).await;
// insert a new row
@@ -474,16 +465,17 @@ async fn test_alter_table() {
assert!(matches!(output, Output::AffectedRows(1)));
let output = execute_sql(&instance, "select * from demo order by ts").await;
let expected = vec![
"+-------+-----+---------------------+--------+",
"| host | cpu | ts | my_tag |",
"+-------+-----+---------------------+--------+",
"| host1 | 1.1 | 1970-01-01 00:00:01 | |",
"| host2 | 2.2 | 1970-01-01 00:00:02 | hello |",
"| host3 | 3.3 | 1970-01-01 00:00:03 | |",
"| host4 | 400 | 1970-01-01 00:00:04 | world |",
"+-------+-----+---------------------+--------+",
];
let expected = "\
+-------+-----+---------------------+--------+
| host | cpu | ts | my_tag |
+-------+-----+---------------------+--------+
| host1 | 1.1 | 1970-01-01T00:00:01 | |
| host2 | 2.2 | 1970-01-01T00:00:02 | hello |
| host3 | 3.3 | 1970-01-01T00:00:03 | |
| host4 | 400 | 1970-01-01T00:00:04 | world |
+-------+-----+---------------------+--------+\
"
.to_string();
check_output_stream(output, expected).await;
}
@@ -522,14 +514,15 @@ async fn test_insert_with_default_value_for_type(type_name: &str) {
assert!(matches!(output, Output::AffectedRows(1)));
let output = execute_sql(&instance, "select host, cpu from test_table").await;
let expected = vec![
"+-------+-----+",
"| host | cpu |",
"+-------+-----+",
"| host1 | 1.1 |",
"| host2 | 2.2 |",
"+-------+-----+",
];
let expected = "\
+-------+-----+
| host | cpu |
+-------+-----+
| host1 | 1.1 |
| host2 | 2.2 |
+-------+-----+\
"
.to_string();
check_output_stream(output, expected).await;
}
@@ -559,13 +552,14 @@ async fn test_use_database() {
assert!(matches!(output, Output::AffectedRows(1)));
let output = execute_sql_in_db(&instance, "show tables", "db1").await;
let expected = vec![
"+--------+",
"| Tables |",
"+--------+",
"| tb1 |",
"+--------+",
];
let expected = "\
+--------+
| Tables |
+--------+
| tb1 |
+--------+\
"
.to_string();
check_output_stream(output, expected).await;
let output = execute_sql_in_db(
@@ -577,25 +571,27 @@ async fn test_use_database() {
assert!(matches!(output, Output::AffectedRows(1)));
let output = execute_sql_in_db(&instance, "select col_i32 from tb1", "db1").await;
let expected = vec![
"+---------+",
"| col_i32 |",
"+---------+",
"| 1 |",
"+---------+",
];
let expected = "\
+---------+
| col_i32 |
+---------+
| 1 |
+---------+\
"
.to_string();
check_output_stream(output, expected).await;
// Making a particular database the default by means of the USE statement does not preclude
// accessing tables in other databases.
let output = execute_sql(&instance, "select number from public.numbers limit 1").await;
let expected = vec![
"+--------+",
"| number |",
"+--------+",
"| 0 |",
"+--------+",
];
let expected = "\
+--------+
| number |
+--------+
| 0 |
+--------+\
"
.to_string();
check_output_stream(output, expected).await;
}

View File

@@ -123,6 +123,18 @@ define_timestamp_with_unit!(Nanosecond);
mod tests {
use super::*;
#[test]
fn test_to_serde_json_value() {
let ts = TimestampSecond::new(123);
let val = serde_json::Value::from(ts);
match val {
serde_json::Value::String(s) => {
assert_eq!("1970-01-01 00:02:03+0000", s);
}
_ => unreachable!(),
}
}
#[test]
fn test_timestamp_scalar() {
let ts = TimestampSecond::new(123);

View File

@@ -125,6 +125,9 @@ macro_rules! impl_data_type_for_timestamp {
fn cast_value_ref(value: ValueRef) -> crate::Result<Option<Self::Wrapper>> {
match value {
ValueRef::Null => Ok(None),
ValueRef::Int64(v) =>{
Ok(Some([<Timestamp $unit>]::from(v)))
}
ValueRef::Timestamp(t) => match t.unit() {
TimeUnit::$unit => Ok(Some([<Timestamp $unit>](t))),
other => error::CastTypeSnafu {

View File

@@ -695,22 +695,26 @@ mod tests {
.await
.unwrap();
match output {
Output::Stream(stream) => {
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
let pretty_print = recordbatches.pretty_print();
let pretty_print = pretty_print.lines().collect::<Vec<&str>>();
let expected = vec![
"+----------------+---------------------+-----+--------+-----------+",
"| host | ts | cpu | memory | disk_util |",
"+----------------+---------------------+-----+--------+-----------+",
"| frontend.host1 | 1970-01-01 00:00:01 | 1.1 | 100 | 9.9 |",
"| frontend.host2 | 1970-01-01 00:00:02 | | | 9.9 |",
"| frontend.host3 | 1970-01-01 00:00:03 | 3.3 | 300 | 9.9 |",
"+----------------+---------------------+-----+--------+-----------+",
];
Output::RecordBatches(_) => {
unreachable!("Output::RecordBatches");
}
Output::AffectedRows(_) => {
unreachable!("Output::AffectedRows");
}
Output::Stream(s) => {
let batches = common_recordbatch::util::collect_batches(s).await.unwrap();
let pretty_print = batches.pretty_print().unwrap();
let expected = "\
+----------------+---------------------+-----+--------+-----------+
| host | ts | cpu | memory | disk_util |
+----------------+---------------------+-----+--------+-----------+
| frontend.host1 | 1970-01-01T00:00:01 | 1.1 | 100 | 9.9 |
| frontend.host2 | 1970-01-01T00:00:02 | | | 9.9 |
| frontend.host3 | 1970-01-01T00:00:03 | 3.3 | 300 | 9.9 |
+----------------+---------------------+-----+--------+-----------+\
";
assert_eq!(pretty_print, expected);
}
_ => unreachable!(),
};
let sql = "select * from demo where ts>cast(1000000000 as timestamp)"; // use nanoseconds as where condition
@@ -718,21 +722,26 @@ mod tests {
.await
.unwrap();
match output {
Output::Stream(stream) => {
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
let pretty_print = recordbatches.pretty_print();
let pretty_print = pretty_print.lines().collect::<Vec<&str>>();
let expected = vec![
"+----------------+---------------------+-----+--------+-----------+",
"| host | ts | cpu | memory | disk_util |",
"+----------------+---------------------+-----+--------+-----------+",
"| frontend.host2 | 1970-01-01 00:00:02 | | | 9.9 |",
"| frontend.host3 | 1970-01-01 00:00:03 | 3.3 | 300 | 9.9 |",
"+----------------+---------------------+-----+--------+-----------+",
];
assert_eq!(pretty_print, expected);
Output::RecordBatches(_) => {
unreachable!("Output::RecordBatches")
}
Output::AffectedRows(_) => {
unreachable!("Output::AffectedRows")
}
Output::Stream(s) => {
let recordbatches = common_recordbatch::util::collect_batches(s).await.unwrap();
let pretty = recordbatches.pretty_print().unwrap();
let expected = "\
+----------------+---------------------+-----+--------+-----------+
| host | ts | cpu | memory | disk_util |
+----------------+---------------------+-----+--------+-----------+
| frontend.host2 | 1970-01-01T00:00:02 | | | 9.9 |
| frontend.host3 | 1970-01-01T00:00:03 | 3.3 | 300 | 9.9 |
+----------------+---------------------+-----+--------+-----------+\
"
.to_string();
assert_eq!(pretty, expected);
}
_ => unreachable!(),
};
}
@@ -791,7 +800,7 @@ mod tests {
..Default::default()
}),
semantic_type: SemanticType::Timestamp as i32,
datatype: ColumnDataType::Timestamp as i32,
datatype: ColumnDataType::TimestampMillisecond as i32,
..Default::default()
};
@@ -909,7 +918,7 @@ mod tests {
},
GrpcColumnDef {
name: "ts".to_string(),
datatype: ColumnDataType::Timestamp as i32,
datatype: ColumnDataType::TimestampMillisecond as i32,
is_nullable: true,
default_constraint: None,
},

View File

@@ -522,6 +522,7 @@ fn find_partition_columns(
#[cfg(test)]
mod test {
use itertools::Itertools;
use servers::query_handler::SqlQueryHandlerRef;
use session::context::QueryContext;
use sql::dialect::GenericDialect;
@@ -604,7 +605,9 @@ ENGINE=mito",
"| public |",
"| test_show_databases |",
"+---------------------+",
];
]
.into_iter()
.join("\n");
let expected2 = vec![
"+---------------------+",
"| Schemas |",
@@ -612,9 +615,10 @@ ENGINE=mito",
"| test_show_databases |",
"| public |",
"+---------------------+",
];
let pretty = r.pretty_print();
let lines = pretty.lines().collect::<Vec<_>>();
]
.into_iter()
.join("\n");
let lines = r.pretty_print().unwrap();
assert!(lines == expected1 || lines == expected2)
}
_ => unreachable!(),
@@ -654,14 +658,12 @@ ENGINE=mito",
let output = instance.do_query(sql, QueryContext::arc()).await.unwrap();
match output {
Output::RecordBatches(r) => {
let expected = vec![
"+--------------+",
"| Tables |",
"+--------------+",
"| dist_numbers |",
"+--------------+",
];
assert_eq!(r.pretty_print().lines().collect::<Vec<_>>(), expected);
let expected = r#"+--------------+
| Tables |
+--------------+
| dist_numbers |
+--------------+"#;
assert_eq!(r.pretty_print().unwrap(), expected);
}
_ => unreachable!(),
}

View File

@@ -63,7 +63,7 @@ mod tests {
use common_query::Output;
use common_recordbatch::RecordBatches;
use datafusion::arrow_print;
use itertools::Itertools;
use servers::query_handler::SqlQueryHandler;
use session::context::QueryContext;
@@ -134,22 +134,18 @@ mod tests {
match output {
Output::Stream(stream) => {
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
let recordbatches = recordbatches
.take()
.into_iter()
.map(|r| r.df_recordbatch)
.collect::<Vec<_>>();
let pretty_print = arrow_print::write(&recordbatches);
let pretty_print = pretty_print.lines().collect::<Vec<&str>>();
let pretty_print = recordbatches.pretty_print().unwrap();
let expected = vec![
"+---------------------+----------------+-------+-------+-------+",
"| greptime_timestamp | greptime_value | tagk1 | tagk2 | tagk3 |",
"+---------------------+----------------+-------+-------+-------+",
"| 1970-01-01 00:00:01 | 1 | tagv1 | tagv2 | |",
"| 1970-01-01 00:00:02 | 2 | | tagv2 | tagv3 |",
"| 1970-01-01 00:00:03 | 3 | | | |",
"| 1970-01-01T00:00:01 | 1 | tagv1 | tagv2 | |",
"| 1970-01-01T00:00:02 | 2 | | tagv2 | tagv3 |",
"| 1970-01-01T00:00:03 | 3 | | | |",
"+---------------------+----------------+-------+-------+-------+",
];
]
.into_iter()
.join("\n");
assert_eq!(pretty_print, expected);
}
_ => unreachable!(),

View File

@@ -187,9 +187,12 @@ mod tests {
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use datatypes::data_type::ConcreteDataType;
use datatypes::types::{BooleanType, StringType};
use datatypes::prelude::ScalarVectorBuilder;
use datatypes::types::StringType;
use datatypes::value::Value;
use datatypes::vectors::VectorBuilder;
use datatypes::vectors::{
BooleanVectorBuilder, Int16VectorBuilder, MutableVector, StringVectorBuilder,
};
use serde::{Deserialize, Serialize};
use store_api::storage::RegionNumber;
use table::requests::InsertRequest;
@@ -351,17 +354,17 @@ mod tests {
#[test]
fn test_partition_values() {
let mut builder = VectorBuilder::new(ConcreteDataType::Boolean(BooleanType));
builder.push(&true.into());
builder.push(&false.into());
builder.push(&true.into());
let v1 = builder.finish();
let mut builder = BooleanVectorBuilder::with_capacity(3);
builder.push(Some(true));
builder.push(Some(false));
builder.push(Some(true));
let v1 = builder.to_vector();
let mut builder = VectorBuilder::new(ConcreteDataType::String(StringType));
builder.push(&"host1".into());
builder.push_null();
builder.push(&"host3".into());
let v2 = builder.finish();
let mut builder = StringVectorBuilder::with_capacity(3);
builder.push(Some("host1"));
builder.push(None);
builder.push(Some("host3"));
let v2 = builder.to_vector();
let vectors = vec![v1, v2];
@@ -380,23 +383,23 @@ mod tests {
fn mock_insert_request() -> InsertRequest {
let mut columns_values = HashMap::with_capacity(4);
let mut builder = VectorBuilder::new(ConcreteDataType::Boolean(BooleanType));
builder.push(&true.into());
builder.push(&false.into());
builder.push(&true.into());
columns_values.insert("enable_reboot".to_string(), builder.finish());
let mut builder = BooleanVectorBuilder::with_capacity(3);
builder.push(Some(true));
builder.push(Some(false));
builder.push(Some(true));
columns_values.insert("enable_reboot".to_string(), builder.to_vector());
let mut builder = VectorBuilder::new(ConcreteDataType::String(StringType));
builder.push(&"host1".into());
builder.push_null();
builder.push(&"host3".into());
columns_values.insert("host".to_string(), builder.finish());
let mut builder = StringVectorBuilder::with_capacity(3);
builder.push(Some("host1"));
builder.push(None);
builder.push(Some("host3"));
columns_values.insert("host".to_string(), builder.to_vector());
let mut builder = VectorBuilder::new(ConcreteDataType::int16_datatype());
builder.push(&1_i16.into());
builder.push(&2_i16.into());
builder.push(&3_i16.into());
columns_values.insert("id".to_string(), builder.finish());
let mut builder = Int16VectorBuilder::with_capacity(3);
builder.push(Some(1_i16));
builder.push(Some(2_i16));
builder.push(Some(3_i16));
columns_values.insert("id".to_string(), builder.to_vector());
InsertRequest {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
@@ -408,22 +411,22 @@ mod tests {
fn mock_wrong_insert_request() -> InsertRequest {
let mut columns_values = HashMap::with_capacity(4);
let mut builder = VectorBuilder::new(ConcreteDataType::Boolean(BooleanType));
builder.push(&true.into());
builder.push(&false.into());
builder.push(&true.into());
columns_values.insert("enable_reboot".to_string(), builder.finish());
let mut builder = BooleanVectorBuilder::with_capacity(3);
builder.push(Some(true));
builder.push(Some(false));
builder.push(Some(true));
columns_values.insert("enable_reboot".to_string(), builder.to_vector());
let mut builder = VectorBuilder::new(ConcreteDataType::String(StringType));
builder.push(&"host1".into());
builder.push_null();
builder.push(&"host3".into());
columns_values.insert("host".to_string(), builder.finish());
let mut builder = StringVectorBuilder::with_capacity(3);
builder.push(Some("host1"));
builder.push(None);
builder.push(Some("host3"));
columns_values.insert("host".to_string(), builder.to_vector());
let mut builder = VectorBuilder::new(ConcreteDataType::int16_datatype());
builder.push(&1_i16.into());
let mut builder = Int16VectorBuilder::with_capacity(1);
builder.push(Some(1_i16));
// two values are missing
columns_values.insert("id".to_string(), builder.finish());
columns_values.insert("id".to_string(), builder.to_vector());
InsertRequest {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),

View File

@@ -512,18 +512,19 @@ mod test {
use api::v1::{column, Column, ColumnDataType};
use common_query::physical_plan::DfPhysicalPlanAdapter;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::util;
use datafusion::execution::context::TaskContext;
use datafusion::physical_plan::coalesce_partitions::CoalescePartitionsExec;
use datafusion::physical_plan::expressions::{col as physical_col, PhysicalSortExpr};
use datafusion::physical_plan::sorts::sort::SortExec;
use datafusion::physical_plan::ExecutionPlan;
use datafusion::prelude::SessionContext;
use datafusion::sql::sqlparser;
use datafusion_expr::expr_fn::{and, binary_expr, col, or};
use datafusion_expr::lit;
use datanode::instance::Instance;
use datatypes::arrow::compute::sort::SortOptions;
use datatypes::arrow::compute::SortOptions;
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::{ColumnSchema, Schema};
use itertools::Itertools;
use meta_client::client::MetaClient;
use meta_client::rpc::router::RegionRoute;
use meta_client::rpc::{Region, Table, TableRoute};
@@ -860,6 +861,7 @@ mod test {
expected_partitions: usize,
expected_output: Vec<&str>,
) {
let expected_output = expected_output.into_iter().join("\n");
let table_scan = table
.scan(&projection, filters.as_slice(), None)
.await
@@ -878,21 +880,17 @@ mod test {
options: SortOptions::default(),
}],
Arc::new(merge),
None,
)
.unwrap();
assert_eq!(sort.output_partitioning().partition_count(), 1);
let stream = sort
.execute(0, Arc::new(RuntimeEnv::default()))
.await
.unwrap();
let session_ctx = SessionContext::new();
let stream = sort.execute(0, session_ctx.task_ctx()).unwrap();
let stream = Box::pin(RecordBatchStreamAdapter::try_new(stream).unwrap());
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
assert_eq!(
recordbatches.pretty_print().lines().collect::<Vec<_>>(),
expected_output
);
assert_eq!(recordbatches.pretty_print().unwrap(), expected_output);
}
async fn new_dist_table() -> DistTable {
@@ -924,14 +922,16 @@ mod test {
PARTITION r3 VALUES LESS THAN (MAXVALUE),
)
ENGINE=mito";
let create_table = match ParserContext::create_with_dialect(sql, &GenericDialect {})
.unwrap()
.pop()
.unwrap()
{
Statement::CreateTable(c) => c,
_ => unreachable!(),
};
let create_table =
match ParserContext::create_with_dialect(sql, &sqlparser::dialect::GenericDialect {})
.unwrap()
.pop()
.unwrap()
{
Statement::CreateTable(c) => c,
_ => unreachable!(),
};
let mut expr = DefaultCreateExprFactory
.create_expr_by_stmt(&create_table)

View File

@@ -148,8 +148,8 @@ mod tests {
use api::v1::{ColumnDataType, InsertExpr};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use datatypes::prelude::ConcreteDataType;
use datatypes::types::StringType;
use datatypes::prelude::ScalarVectorBuilder;
use datatypes::vectors::{Int16VectorBuilder, MutableVector, StringVectorBuilder};
use table::requests::InsertRequest;
use super::to_insert_expr;
@@ -166,16 +166,16 @@ mod tests {
fn mock_insert_request() -> InsertRequest {
let mut columns_values = HashMap::with_capacity(4);
let mut builder = VectorBuilder::new(ConcreteDataType::String(StringType));
builder.push(&"host1".into());
builder.push_null();
builder.push(&"host3".into());
columns_values.insert("host".to_string(), builder.finish());
let mut builder = StringVectorBuilder::with_capacity(3);
builder.push(Some("host1"));
builder.push(None);
builder.push(Some("host3"));
columns_values.insert("host".to_string(), builder.to_vector());
let mut builder = ConcreteDataType::int16_datatype().create_mutable_vector(3);
builder.push(&1_i16.into());
builder.push(&2_i16.into());
builder.push(&3_i16.into());
let mut builder = Int16VectorBuilder::with_capacity(3);
builder.push(Some(1_i16));
builder.push(Some(2_i16));
builder.push(Some(3_i16));
columns_values.insert("id".to_string(), builder.to_vector());
InsertRequest {

View File

@@ -359,7 +359,7 @@ monitor2,host=host4 cpu=66.3,memory=1029 1663840496400340003";
verify_column(
&columns[3],
"ts",
ColumnDataType::Timestamp,
ColumnDataType::TimestampMillisecond,
SemanticType::Timestamp,
Vec::new(),
Values {
@@ -398,7 +398,7 @@ monitor2,host=host4 cpu=66.3,memory=1029 1663840496400340003";
verify_column(
&columns[2],
"ts",
ColumnDataType::Timestamp,
ColumnDataType::TimestampMillisecond,
SemanticType::Timestamp,
Vec::new(),
Values {

View File

@@ -310,90 +310,85 @@ mod test {
let output = check(query, Arc::new(QueryContext::new()));
assert!(output.is_none());
fn test(query: &str, expected: Vec<&str>) {
fn test(query: &str, expected: &str) {
let output = check(query, Arc::new(QueryContext::new()));
match output.unwrap() {
Output::RecordBatches(r) => {
assert_eq!(r.pretty_print().lines().collect::<Vec<_>>(), expected)
assert_eq!(&r.pretty_print().unwrap(), expected)
}
_ => unreachable!(),
}
}
let query = "select version()";
let expected = vec![
"+-----------+",
"| version() |",
"+-----------+",
"| 8.0.26 |",
"+-----------+",
];
let expected = "\
+-----------+
| version() |
+-----------+
| 8.0.26 |
+-----------+";
test(query, expected);
let query = "SELECT @@version_comment LIMIT 1";
let expected = vec![
"+-------------------+",
"| @@version_comment |",
"+-------------------+",
"| Greptime |",
"+-------------------+",
];
let expected = "\
+-------------------+
| @@version_comment |
+-------------------+
| Greptime |
+-------------------+";
test(query, expected);
// variables
let query = "select @@tx_isolation, @@session.tx_isolation";
let expected = vec![
"+-----------------+------------------------+",
"| @@tx_isolation | @@session.tx_isolation |",
"+-----------------+------------------------+",
"| REPEATABLE-READ | REPEATABLE-READ |",
"+-----------------+------------------------+",
];
let expected = "\
+-----------------+------------------------+
| @@tx_isolation | @@session.tx_isolation |
+-----------------+------------------------+
| REPEATABLE-READ | REPEATABLE-READ |
+-----------------+------------------------+";
test(query, expected);
// complex variables
let query = "/* mysql-connector-java-8.0.17 (Revision: 16a712ddb3f826a1933ab42b0039f7fb9eebc6ec) */SELECT @@session.auto_increment_increment AS auto_increment_increment, @@character_set_client AS character_set_client, @@character_set_connection AS character_set_connection, @@character_set_results AS character_set_results, @@character_set_server AS character_set_server, @@collation_server AS collation_server, @@collation_connection AS collation_connection, @@init_connect AS init_connect, @@interactive_timeout AS interactive_timeout, @@license AS license, @@lower_case_table_names AS lower_case_table_names, @@max_allowed_packet AS max_allowed_packet, @@net_write_timeout AS net_write_timeout, @@performance_schema AS performance_schema, @@sql_mode AS sql_mode, @@system_time_zone AS system_time_zone, @@time_zone AS time_zone, @@transaction_isolation AS transaction_isolation, @@wait_timeout AS wait_timeout;";
let expected = vec![
"+--------------------------+----------------------+--------------------------+-----------------------+----------------------+------------------+----------------------+--------------+---------------------+---------+------------------------+--------------------+-------------------+--------------------+----------+------------------+-----------+-----------------------+---------------+",
"| auto_increment_increment | character_set_client | character_set_connection | character_set_results | character_set_server | collation_server | collation_connection | init_connect | interactive_timeout | license | lower_case_table_names | max_allowed_packet | net_write_timeout | performance_schema | sql_mode | system_time_zone | time_zone | transaction_isolation | wait_timeout; |",
"+--------------------------+----------------------+--------------------------+-----------------------+----------------------+------------------+----------------------+--------------+---------------------+---------+------------------------+--------------------+-------------------+--------------------+----------+------------------+-----------+-----------------------+---------------+",
"| 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31536000 | 0 | 0 | 134217728 | 31536000 | 0 | 0 | UTC | UTC | REPEATABLE-READ | 31536000 |",
"+--------------------------+----------------------+--------------------------+-----------------------+----------------------+------------------+----------------------+--------------+---------------------+---------+------------------------+--------------------+-------------------+--------------------+----------+------------------+-----------+-----------------------+---------------+",
];
let expected = "\
+--------------------------+----------------------+--------------------------+-----------------------+----------------------+------------------+----------------------+--------------+---------------------+---------+------------------------+--------------------+-------------------+--------------------+----------+------------------+-----------+-----------------------+---------------+
| auto_increment_increment | character_set_client | character_set_connection | character_set_results | character_set_server | collation_server | collation_connection | init_connect | interactive_timeout | license | lower_case_table_names | max_allowed_packet | net_write_timeout | performance_schema | sql_mode | system_time_zone | time_zone | transaction_isolation | wait_timeout; |
+--------------------------+----------------------+--------------------------+-----------------------+----------------------+------------------+----------------------+--------------+---------------------+---------+------------------------+--------------------+-------------------+--------------------+----------+------------------+-----------+-----------------------+---------------+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31536000 | 0 | 0 | 134217728 | 31536000 | 0 | 0 | UTC | UTC | REPEATABLE-READ | 31536000 |
+--------------------------+----------------------+--------------------------+-----------------------+----------------------+------------------+----------------------+--------------+---------------------+---------+------------------------+--------------------+-------------------+--------------------+----------+------------------+-----------+-----------------------+---------------+";
test(query, expected);
let query = "show variables";
let expected = vec![
"+---------------+-------+",
"| Variable_name | Value |",
"+---------------+-------+",
"| | |",
"+---------------+-------+",
];
let expected = "\
+---------------+-------+
| Variable_name | Value |
+---------------+-------+
| | |
+---------------+-------+";
test(query, expected);
let query = "show variables like 'lower_case_table_names'";
let expected = vec![
"+------------------------+-------+",
"| Variable_name | Value |",
"+------------------------+-------+",
"| lower_case_table_names | 0 |",
"+------------------------+-------+",
];
let expected = "\
+------------------------+-------+
| Variable_name | Value |
+------------------------+-------+
| lower_case_table_names | 0 |
+------------------------+-------+";
test(query, expected);
let query = "show collation";
let expected = vec!["++", "++"]; // empty
let expected = "\
++
++"; // empty
test(query, expected);
let query = "SELECT TIMEDIFF(NOW(), UTC_TIMESTAMP())";
let expected = vec![
"+----------------------------------+",
"| TIMEDIFF(NOW(), UTC_TIMESTAMP()) |",
"+----------------------------------+",
"| 00:00:00 |",
"+----------------------------------+",
];
let expected = "\
+----------------------------------+
| TIMEDIFF(NOW(), UTC_TIMESTAMP()) |
+----------------------------------+
| 00:00:00 |
+----------------------------------+";
test(query, expected);
}
}

View File

@@ -33,7 +33,7 @@ use table::test_util::MemTable;
use crate::create_testing_sql_query_handler;
use crate::mysql::{all_datatype_testing_data, MysqlTextRow, TestingData};
fn create_mysql_server(table: MemTable, tls: Arc<TlsOption>) -> Result<Box<dyn Server>> {
fn create_mysql_server(table: MemTable, tls: TlsOption) -> Result<Box<dyn Server>> {
let query_handler = create_testing_sql_query_handler(table);
let io_runtime = Arc::new(
RuntimeBuilder::default()
@@ -125,7 +125,7 @@ async fn test_shutdown_mysql_server() -> Result<()> {
async fn test_query_all_datatypes() -> Result<()> {
common_telemetry::init_default_ut_logging();
let server_tls = Arc::new(TlsOption::default());
let server_tls = TlsOption::default();
let client_tls = false;
do_test_query_all_datatypes(server_tls, client_tls).await?;
@@ -134,11 +134,11 @@ async fn test_query_all_datatypes() -> Result<()> {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_server_prefer_secure_client_plain() -> Result<()> {
let server_tls = Arc::new(TlsOption {
let server_tls = TlsOption {
mode: servers::tls::TlsMode::Prefer,
cert_path: "tests/ssl/server.crt".to_owned(),
key_path: "tests/ssl/server.key".to_owned(),
});
};
let client_tls = false;
do_test_query_all_datatypes(server_tls, client_tls).await?;
@@ -147,11 +147,11 @@ async fn test_server_prefer_secure_client_plain() -> Result<()> {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_server_prefer_secure_client_secure() -> Result<()> {
let server_tls = Arc::new(TlsOption {
let server_tls = TlsOption {
mode: servers::tls::TlsMode::Prefer,
cert_path: "tests/ssl/server.crt".to_owned(),
key_path: "tests/ssl/server.key".to_owned(),
});
};
let client_tls = true;
do_test_query_all_datatypes(server_tls, client_tls).await?;
@@ -160,11 +160,11 @@ async fn test_server_prefer_secure_client_secure() -> Result<()> {
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn test_server_require_secure_client_secure() -> Result<()> {
let server_tls = Arc::new(TlsOption {
let server_tls = TlsOption {
mode: servers::tls::TlsMode::Require,
cert_path: "tests/ssl/server.crt".to_owned(),
key_path: "tests/ssl/server.key".to_owned(),
});
};
let client_tls = true;
do_test_query_all_datatypes(server_tls, client_tls).await?;
@@ -173,11 +173,11 @@ async fn test_server_require_secure_client_secure() -> Result<()> {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_server_required_secure_client_plain() -> Result<()> {
let server_tls = Arc::new(TlsOption {
let server_tls = TlsOption {
mode: servers::tls::TlsMode::Require,
cert_path: "tests/ssl/server.crt".to_owned(),
key_path: "tests/ssl/server.key".to_owned(),
});
};
let client_tls = false;
@@ -202,7 +202,7 @@ async fn test_server_required_secure_client_plain() -> Result<()> {
Ok(())
}
async fn do_test_query_all_datatypes(server_tls: Arc<TlsOption>, client_tls: bool) -> Result<()> {
async fn do_test_query_all_datatypes(server_tls: TlsOption, client_tls: bool) -> Result<()> {
common_telemetry::init_default_ut_logging();
let TestingData {
column_schemas,

View File

@@ -36,7 +36,7 @@ use crate::create_testing_sql_query_handler;
fn create_postgres_server(
table: MemTable,
check_pwd: bool,
tls: Arc<TlsOption>,
tls: TlsOption,
) -> Result<Box<dyn Server>> {
let query_handler = create_testing_sql_query_handler(table);
let io_runtime = Arc::new(
@@ -194,11 +194,11 @@ async fn test_query_pg_concurrently() -> Result<()> {
async fn test_server_secure_prefer_client_plain() -> Result<()> {
common_telemetry::init_default_ut_logging();
let server_tls = Arc::new(TlsOption {
let server_tls = TlsOption {
mode: servers::tls::TlsMode::Prefer,
cert_path: "tests/ssl/server.crt".to_owned(),
key_path: "tests/ssl/server.key".to_owned(),
});
};
let client_tls = false;
do_simple_query(server_tls, client_tls).await?;
@@ -209,11 +209,11 @@ async fn test_server_secure_prefer_client_plain() -> Result<()> {
async fn test_server_secure_require_client_plain() -> Result<()> {
common_telemetry::init_default_ut_logging();
let server_tls = Arc::new(TlsOption {
let server_tls = TlsOption {
mode: servers::tls::TlsMode::Require,
cert_path: "tests/ssl/server.crt".to_owned(),
key_path: "tests/ssl/server.key".to_owned(),
});
};
let server_port = start_test_server(server_tls).await?;
let r = create_plain_connection(server_port, false).await;
assert!(r.is_err());
@@ -224,11 +224,11 @@ async fn test_server_secure_require_client_plain() -> Result<()> {
async fn test_server_secure_require_client_secure() -> Result<()> {
common_telemetry::init_default_ut_logging();
let server_tls = Arc::new(TlsOption {
let server_tls = TlsOption {
mode: servers::tls::TlsMode::Require,
cert_path: "tests/ssl/server.crt".to_owned(),
key_path: "tests/ssl/server.key".to_owned(),
});
};
let client_tls = true;
do_simple_query(server_tls, client_tls).await?;
@@ -237,7 +237,7 @@ async fn test_server_secure_require_client_secure() -> Result<()> {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_using_db() -> Result<()> {
let server_port = start_test_server(Arc::new(TlsOption::default())).await?;
let server_port = start_test_server(TlsOption::default()).await?;
let client = create_connection_with_given_db(server_port, "testdb")
.await
@@ -253,7 +253,7 @@ async fn test_using_db() -> Result<()> {
Ok(())
}
async fn start_test_server(server_tls: Arc<TlsOption>) -> Result<u16> {
async fn start_test_server(server_tls: TlsOption) -> Result<u16> {
common_telemetry::init_default_ut_logging();
let table = MemTable::default_numbers_table();
let pg_server = create_postgres_server(table, false, server_tls)?;
@@ -262,7 +262,7 @@ async fn start_test_server(server_tls: Arc<TlsOption>) -> Result<u16> {
Ok(server_addr.port())
}
async fn do_simple_query(server_tls: Arc<TlsOption>, client_tls: bool) -> Result<()> {
async fn do_simple_query(server_tls: TlsOption, client_tls: bool) -> Result<()> {
let server_port = start_test_server(server_tls).await?;
if !client_tls {

View File

@@ -422,9 +422,13 @@ mod tests {
let sql_val = SqlValue::Boolean(true);
let v = sql_value_to_value("a", &ConcreteDataType::float64_datatype(), &sql_val);
assert!(v.is_err());
assert!(format!("{:?}", v).contains(
"column_name: \"a\", expect: Float64(Float64), actual: Boolean(BooleanType)"
));
assert!(
format!("{:?}", v).contains(
"column_name: \"a\", expect: Float64(Float64Type), actual: Boolean(BooleanType)"
),
"v is {:?}",
v
);
}
#[test]

View File

@@ -113,7 +113,7 @@ fn expect_data() -> (Column, Column, Column, Column) {
..Default::default()
}),
semantic_type: SemanticType::Timestamp as i32,
datatype: ColumnDataType::Timestamp as i32,
datatype: ColumnDataType::TimestampMillisecond as i32,
..Default::default()
};
@@ -244,7 +244,7 @@ fn testing_create_expr() -> CreateExpr {
},
ColumnDef {
name: "ts".to_string(),
datatype: 15, // timestamp
datatype: ColumnDataType::TimestampMillisecond as i32, // timestamp
is_nullable: true,
default_constraint: None,
},

View File

@@ -116,7 +116,7 @@ pub async fn test_sql_api(store_type: StorageType) {
assert_eq!(
output[0],
serde_json::from_value::<JsonOutput>(json!({
"records":{"schema":{"column_schemas":[{"name":"host","data_type":"String"},{"name":"cpu","data_type":"Float64"},{"name":"memory","data_type":"Float64"},{"name":"ts","data_type":"Timestamp"}]},"rows":[["host",66.6,1024.0,0]]}
"records":{"schema":{"column_schemas":[{"name":"host","data_type":"String"},{"name":"cpu","data_type":"Float64"},{"name":"memory","data_type":"Float64"},{"name":"ts","data_type":"TimestampMillisecond"}]},"rows":[["host",66.6,1024.0,0]]}
})).unwrap()
);
@@ -138,7 +138,7 @@ pub async fn test_sql_api(store_type: StorageType) {
assert_eq!(
output[0],
serde_json::from_value::<JsonOutput>(json!({
"records":{"schema":{"column_schemas":[{"name":"cpu","data_type":"Float64"},{"name":"ts","data_type":"Timestamp"}]},"rows":[[66.6,0]]}
"records":{"schema":{"column_schemas":[{"name":"cpu","data_type":"Float64"},{"name":"ts","data_type":"TimestampMillisecond"}]},"rows":[[66.6,0]]}
})).unwrap()
);
@@ -159,7 +159,7 @@ pub async fn test_sql_api(store_type: StorageType) {
assert_eq!(
output[0],
serde_json::from_value::<JsonOutput>(json!({
"records":{"schema":{"column_schemas":[{"name":"c","data_type":"Float64"},{"name":"time","data_type":"Timestamp"}]},"rows":[[66.6,0]]}
"records":{"schema":{"column_schemas":[{"name":"c","data_type":"Float64"},{"name":"time","data_type":"TimestampMillisecond"}]},"rows":[[66.6,0]]}
})).unwrap()
);