mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-22 22:20:02 +00:00
chore: update datafusion to 50 (#7076)
* chore: update datafusion to 50 Signed-off-by: luofucong <luofc@foxmail.com> * fix ci Signed-off-by: luofucong <luofc@foxmail.com> * fix: update datafusion_pg_catalog import * chore: fix toml format * chore: fix toml format again * fix nextest Signed-off-by: luofucong <luofc@foxmail.com> * fix sqlness Signed-off-by: luofucong <luofc@foxmail.com> * chore: switch datafusion-orc to upstream tag * fix sqlness Signed-off-by: luofucong <luofc@foxmail.com> * resolve PR comments Signed-off-by: luofucong <luofc@foxmail.com> --------- Signed-off-by: luofucong <luofc@foxmail.com> Co-authored-by: Ning Sun <sunning@greptime.com>
This commit is contained in:
805
Cargo.lock
generated
805
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
66
Cargo.toml
66
Cargo.toml
@@ -99,12 +99,12 @@ rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
||||
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
|
||||
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
||||
aquamarine = "0.6"
|
||||
arrow = { version = "56.0", features = ["prettyprint"] }
|
||||
arrow-array = { version = "56.0", default-features = false, features = ["chrono-tz"] }
|
||||
arrow-buffer = "56.0"
|
||||
arrow-flight = "56.0"
|
||||
arrow-ipc = { version = "56.0", default-features = false, features = ["lz4", "zstd"] }
|
||||
arrow-schema = { version = "56.0", features = ["serde"] }
|
||||
arrow = { version = "56.2", features = ["prettyprint"] }
|
||||
arrow-array = { version = "56.2", default-features = false, features = ["chrono-tz"] }
|
||||
arrow-buffer = "56.2"
|
||||
arrow-flight = "56.2"
|
||||
arrow-ipc = { version = "56.2", default-features = false, features = ["lz4", "zstd"] }
|
||||
arrow-schema = { version = "56.2", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
# Remember to update axum-extra, axum-macros when updating axum
|
||||
@@ -123,18 +123,18 @@ clap = { version = "4.4", features = ["derive"] }
|
||||
config = "0.13.0"
|
||||
crossbeam-utils = "0.8"
|
||||
dashmap = "6.1"
|
||||
datafusion = "49"
|
||||
datafusion-common = "49"
|
||||
datafusion-expr = "49"
|
||||
datafusion-functions = "49"
|
||||
datafusion-functions-aggregate-common = "49"
|
||||
datafusion-optimizer = "49"
|
||||
datafusion-orc = { git = "https://github.com/GreptimeTeam/datafusion-orc", rev = "a0a5f902158f153119316eaeec868cff3fc8a99d" }
|
||||
datafusion-pg-catalog = { git = "https://github.com/datafusion-contrib/datafusion-postgres", rev = "3d1b7c7d5b82dd49bafc2803259365e633f654fa" }
|
||||
datafusion-physical-expr = "49"
|
||||
datafusion-physical-plan = "49"
|
||||
datafusion-sql = "49"
|
||||
datafusion-substrait = "49"
|
||||
datafusion = "50"
|
||||
datafusion-common = "50"
|
||||
datafusion-expr = "50"
|
||||
datafusion-functions = "50"
|
||||
datafusion-functions-aggregate-common = "50"
|
||||
datafusion-optimizer = "50"
|
||||
datafusion-orc = "0.5"
|
||||
datafusion-pg-catalog = "0.11"
|
||||
datafusion-physical-expr = "50"
|
||||
datafusion-physical-plan = "50"
|
||||
datafusion-sql = "50"
|
||||
datafusion-substrait = "50"
|
||||
deadpool = "0.12"
|
||||
deadpool-postgres = "0.14"
|
||||
derive_builder = "0.20"
|
||||
@@ -180,7 +180,7 @@ otel-arrow-rust = { git = "https://github.com/GreptimeTeam/otel-arrow", rev = "2
|
||||
"server",
|
||||
] }
|
||||
parking_lot = "0.12"
|
||||
parquet = { version = "56.0", default-features = false, features = ["arrow", "async", "object_store"] }
|
||||
parquet = { version = "56.2", default-features = false, features = ["arrow", "async", "object_store"] }
|
||||
paste = "1.0"
|
||||
pin-project = "1.0"
|
||||
pretty_assertions = "1.4.0"
|
||||
@@ -217,10 +217,7 @@ simd-json = "0.15"
|
||||
similar-asserts = "1.6.0"
|
||||
smallvec = { version = "1", features = ["serde"] }
|
||||
snafu = "0.8"
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "39e4fc94c3c741981f77e9d63b5ce8c02e0a27ea", features = [
|
||||
"visitor",
|
||||
"serde",
|
||||
] } # branch = "v0.55.x"
|
||||
sqlparser = { version = "0.58.0", default-features = false, features = ["std", "visitor", "serde"] }
|
||||
sqlx = { version = "0.8", features = [
|
||||
"runtime-tokio-rustls",
|
||||
"mysql",
|
||||
@@ -322,16 +319,19 @@ git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
rev = "5618e779cf2bb4755b499c630fba4c35e91898cb"
|
||||
|
||||
[patch.crates-io]
|
||||
datafusion = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-functions = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-functions-aggregate-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-optimizer = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-physical-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-physical-plan = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-sql = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-substrait = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-functions = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-functions-aggregate-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-optimizer = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-physical-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-physical-expr-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-physical-plan = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-datasource = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-sql = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-substrait = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "4b519a5caa95472cc3988f5556813a583dd35af1" } # branch = "v0.58.x"
|
||||
|
||||
[profile.release]
|
||||
debug = 1
|
||||
|
||||
@@ -27,6 +27,7 @@ use datafusion::error::DataFusionError;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion_pg_catalog::pg_catalog::catalog_info::CatalogInfo;
|
||||
use datafusion_pg_catalog::pg_catalog::context::EmptyContextProvider;
|
||||
use datafusion_pg_catalog::pg_catalog::{
|
||||
PG_CATALOG_TABLES, PgCatalogSchemaProvider, PgCatalogStaticTables, PgCatalogTable,
|
||||
};
|
||||
@@ -44,7 +45,7 @@ use crate::system_schema::{
|
||||
/// [`PGCatalogProvider`] is the provider for a schema named `pg_catalog`, it is not a catalog.
|
||||
pub struct PGCatalogProvider {
|
||||
catalog_name: String,
|
||||
inner: PgCatalogSchemaProvider<CatalogManagerWrapper>,
|
||||
inner: PgCatalogSchemaProvider<CatalogManagerWrapper, EmptyContextProvider>,
|
||||
tables: HashMap<String, TableRef>,
|
||||
table_ids: HashMap<&'static str, u32>,
|
||||
}
|
||||
@@ -69,6 +70,7 @@ impl PGCatalogProvider {
|
||||
catalog_manager,
|
||||
},
|
||||
Arc::new(static_tables),
|
||||
EmptyContextProvider,
|
||||
)
|
||||
.expect("Failed to initialize PgCatalogSchemaProvider");
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ use bytes::{Buf, Bytes};
|
||||
use datafusion::datasource::physical_plan::FileOpenFuture;
|
||||
use datafusion::error::{DataFusionError, Result as DataFusionResult};
|
||||
use datafusion::physical_plan::SendableRecordBatchStream;
|
||||
use futures::StreamExt;
|
||||
use futures::{StreamExt, TryStreamExt};
|
||||
use object_store::ObjectStore;
|
||||
use snafu::ResultExt;
|
||||
use tokio_util::compat::FuturesAsyncWriteCompatExt;
|
||||
@@ -179,7 +179,7 @@ pub fn open_with_decoder<T: ArrowDecoder, F: Fn() -> DataFusionResult<T>>(
|
||||
Poll::Ready(decoder.flush().transpose())
|
||||
});
|
||||
|
||||
Ok(stream.boxed())
|
||||
Ok(stream.map_err(Into::into).boxed())
|
||||
}))
|
||||
}
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
//! `foo_merge`'s input arg is the same as `foo_state`'s output, and its output is the same as `foo`'s input.
|
||||
//!
|
||||
|
||||
use std::hash::{Hash, Hasher};
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow::array::StructArray;
|
||||
@@ -272,7 +273,7 @@ impl StateMergeHelper {
|
||||
}
|
||||
|
||||
/// Wrapper to make an aggregate function out of a state function.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct StateWrapper {
|
||||
inner: AggregateUDF,
|
||||
name: String,
|
||||
@@ -616,6 +617,20 @@ impl AggregateUDFImpl for MergeWrapper {
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for MergeWrapper {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.inner == other.inner
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for MergeWrapper {}
|
||||
|
||||
impl Hash for MergeWrapper {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.inner.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
/// The merge accumulator, which modify `update_batch`'s behavior to accept one struct array which
|
||||
/// include the state fields of original aggregate function, and merge said states into original accumulator
|
||||
/// the output is the same as original aggregate function
|
||||
|
||||
@@ -39,8 +39,7 @@ use datafusion::prelude::SessionContext;
|
||||
use datafusion_common::arrow::array::AsArray;
|
||||
use datafusion_common::arrow::datatypes::{Float64Type, UInt64Type};
|
||||
use datafusion_common::{Column, TableReference};
|
||||
use datafusion_expr::expr::AggregateFunction;
|
||||
use datafusion_expr::sqlparser::ast::NullTreatment;
|
||||
use datafusion_expr::expr::{AggregateFunction, NullTreatment};
|
||||
use datafusion_expr::{
|
||||
Aggregate, ColumnarValue, Expr, LogicalPlan, ScalarFunctionArgs, SortExpr, TableScan, lit,
|
||||
};
|
||||
|
||||
@@ -68,7 +68,7 @@ impl CountHash {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
|
||||
pub struct CountHash {
|
||||
signature: Signature,
|
||||
}
|
||||
|
||||
@@ -76,7 +76,7 @@ impl Function for GeohashFunction {
|
||||
}
|
||||
|
||||
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
|
||||
Ok(DataType::Utf8)
|
||||
Ok(DataType::Utf8View)
|
||||
}
|
||||
|
||||
fn signature(&self) -> &Signature {
|
||||
@@ -176,7 +176,7 @@ impl Function for GeohashNeighboursFunction {
|
||||
Ok(DataType::List(Arc::new(Field::new(
|
||||
"item",
|
||||
DataType::Utf8View,
|
||||
false,
|
||||
true,
|
||||
))))
|
||||
}
|
||||
|
||||
|
||||
@@ -355,9 +355,9 @@ impl Function for H3CellCenterLatLng {
|
||||
|
||||
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
|
||||
Ok(DataType::List(Arc::new(Field::new(
|
||||
"x",
|
||||
"item",
|
||||
DataType::Float64,
|
||||
false,
|
||||
true,
|
||||
))))
|
||||
}
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use std::any::Any;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
use datafusion::arrow::datatypes::DataType;
|
||||
use datafusion::logical_expr::{ScalarFunctionArgs, ScalarUDFImpl};
|
||||
@@ -33,6 +34,20 @@ impl Debug for ScalarUdf {
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for ScalarUdf {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.function.signature() == other.function.signature()
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for ScalarUdf {}
|
||||
|
||||
impl Hash for ScalarUdf {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.function.signature().hash(state)
|
||||
}
|
||||
}
|
||||
|
||||
impl ScalarUDFImpl for ScalarUdf {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
|
||||
@@ -345,6 +345,20 @@ fn build_struct(
|
||||
Ok(datafusion_expr::ColumnarValue::Array(result_vector.to_arrow_array()))
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for #name {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.signature == other.signature
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for #name {}
|
||||
|
||||
impl std::hash::Hash for #name {
|
||||
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
||||
self.signature.hash(state)
|
||||
}
|
||||
}
|
||||
}
|
||||
.into()
|
||||
}
|
||||
|
||||
@@ -368,8 +368,7 @@ impl TryFrom<DFSchemaRef> for Schema {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: DFSchemaRef) -> Result<Self> {
|
||||
let s: ArrowSchema = value.as_ref().into();
|
||||
s.try_into()
|
||||
value.inner().clone().try_into()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1208,7 +1208,9 @@ impl TryFrom<ScalarValue> for Value {
|
||||
.collect::<Result<Vec<Value>>>()?;
|
||||
Value::Struct(StructValue::try_new(items, struct_type)?)
|
||||
}
|
||||
ScalarValue::Decimal256(_, _, _)
|
||||
ScalarValue::Decimal32(_, _, _)
|
||||
| ScalarValue::Decimal64(_, _, _)
|
||||
| ScalarValue::Decimal256(_, _, _)
|
||||
| ScalarValue::FixedSizeList(_)
|
||||
| ScalarValue::LargeList(_)
|
||||
| ScalarValue::Dictionary(_, _)
|
||||
|
||||
@@ -245,7 +245,9 @@ impl Helper {
|
||||
length,
|
||||
)
|
||||
}
|
||||
ScalarValue::Decimal256(_, _, _)
|
||||
ScalarValue::Decimal32(_, _, _)
|
||||
| ScalarValue::Decimal64(_, _, _)
|
||||
| ScalarValue::Decimal256(_, _, _)
|
||||
| ScalarValue::FixedSizeList(_)
|
||||
| ScalarValue::LargeList(_)
|
||||
| ScalarValue::Dictionary(_, _)
|
||||
|
||||
@@ -427,7 +427,7 @@ fn expand_tumble_analyzer(
|
||||
|
||||
/// This is a placeholder for tumble_start and tumble_end function, so that datafusion can
|
||||
/// recognize them as scalar function
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, PartialEq, Eq, Hash)]
|
||||
pub struct TumbleExpand {
|
||||
signature: Signature,
|
||||
name: String,
|
||||
|
||||
@@ -979,11 +979,10 @@ pub fn to_create_flow_task_expr(
|
||||
query_ctx: &QueryContextRef,
|
||||
) -> Result<CreateFlowExpr> {
|
||||
// retrieve sink table name
|
||||
let sink_table_ref =
|
||||
object_name_to_table_reference(create_flow.sink_table_name.clone().into(), true)
|
||||
.with_context(|_| ConvertIdentifierSnafu {
|
||||
ident: create_flow.sink_table_name.to_string(),
|
||||
})?;
|
||||
let sink_table_ref = object_name_to_table_reference(create_flow.sink_table_name.clone(), true)
|
||||
.with_context(|_| ConvertIdentifierSnafu {
|
||||
ident: create_flow.sink_table_name.to_string(),
|
||||
})?;
|
||||
let catalog = sink_table_ref
|
||||
.catalog()
|
||||
.unwrap_or(query_ctx.current_catalog())
|
||||
@@ -1001,9 +1000,11 @@ pub fn to_create_flow_task_expr(
|
||||
|
||||
let source_table_names = extract_tables_from_query(&create_flow.query)
|
||||
.map(|name| {
|
||||
let reference = object_name_to_table_reference(name.clone().into(), true)
|
||||
.with_context(|_| ConvertIdentifierSnafu {
|
||||
ident: name.to_string(),
|
||||
let reference =
|
||||
object_name_to_table_reference(name.clone(), true).with_context(|_| {
|
||||
ConvertIdentifierSnafu {
|
||||
ident: name.to_string(),
|
||||
}
|
||||
})?;
|
||||
let catalog = reference
|
||||
.catalog()
|
||||
|
||||
@@ -123,7 +123,7 @@ impl EmptyMetric {
|
||||
physical_planner.create_physical_expr(expr, &self.time_index_schema, session_state)
|
||||
})
|
||||
.transpose()?;
|
||||
let result_schema: SchemaRef = Arc::new(self.result_schema.as_ref().into());
|
||||
let result_schema: SchemaRef = self.result_schema.inner().clone();
|
||||
let properties = Arc::new(PlanProperties::new(
|
||||
EquivalenceProperties::new(result_schema.clone()),
|
||||
Partitioning::UnknownPartitioning(1),
|
||||
@@ -134,7 +134,7 @@ impl EmptyMetric {
|
||||
start: self.start,
|
||||
end: self.end,
|
||||
interval: self.interval,
|
||||
time_index_schema: Arc::new(self.time_index_schema.as_ref().into()),
|
||||
time_index_schema: self.time_index_schema.inner().clone(),
|
||||
result_schema,
|
||||
expr: physical_expr,
|
||||
properties,
|
||||
|
||||
@@ -181,7 +181,7 @@ impl HistogramFold {
|
||||
.index_of_column_by_name(None, &self.ts_column)
|
||||
.unwrap();
|
||||
|
||||
let output_schema: SchemaRef = Arc::new(self.output_schema.as_ref().into());
|
||||
let output_schema: SchemaRef = self.output_schema.inner().clone();
|
||||
let properties = PlanProperties::new(
|
||||
EquivalenceProperties::new(output_schema.clone()),
|
||||
Partitioning::UnknownPartitioning(1),
|
||||
@@ -805,14 +805,13 @@ mod test {
|
||||
async fn fold_overall() {
|
||||
let memory_exec = Arc::new(prepare_test_data());
|
||||
let output_schema: SchemaRef = Arc::new(
|
||||
(*HistogramFold::convert_schema(
|
||||
HistogramFold::convert_schema(
|
||||
&Arc::new(memory_exec.schema().to_dfschema().unwrap()),
|
||||
"le",
|
||||
)
|
||||
.unwrap()
|
||||
.as_ref())
|
||||
.clone()
|
||||
.into(),
|
||||
.as_arrow()
|
||||
.clone(),
|
||||
);
|
||||
let properties = PlanProperties::new(
|
||||
EquivalenceProperties::new(output_schema.clone()),
|
||||
|
||||
@@ -167,7 +167,7 @@ impl RangeManipulate {
|
||||
}
|
||||
|
||||
pub fn to_execution_plan(&self, exec_input: Arc<dyn ExecutionPlan>) -> Arc<dyn ExecutionPlan> {
|
||||
let output_schema: SchemaRef = SchemaRef::new(self.output_schema.as_ref().into());
|
||||
let output_schema: SchemaRef = self.output_schema.inner().clone();
|
||||
let properties = exec_input.properties();
|
||||
let properties = PlanProperties::new(
|
||||
EquivalenceProperties::new(output_schema.clone()),
|
||||
@@ -791,8 +791,8 @@ mod test {
|
||||
&field_columns,
|
||||
)
|
||||
.unwrap()
|
||||
.as_ref()
|
||||
.into(),
|
||||
.as_arrow()
|
||||
.clone(),
|
||||
);
|
||||
let properties = PlanProperties::new(
|
||||
EquivalenceProperties::new(manipulate_output_schema.clone()),
|
||||
|
||||
@@ -92,7 +92,7 @@ impl UnionDistinctOn {
|
||||
left_exec: Arc<dyn ExecutionPlan>,
|
||||
right_exec: Arc<dyn ExecutionPlan>,
|
||||
) -> Arc<dyn ExecutionPlan> {
|
||||
let output_schema: SchemaRef = Arc::new(self.output_schema.as_ref().into());
|
||||
let output_schema: SchemaRef = self.output_schema.inner().clone();
|
||||
let properties = Arc::new(PlanProperties::new(
|
||||
EquivalenceProperties::new(output_schema.clone()),
|
||||
Partitioning::UnknownPartitioning(1),
|
||||
|
||||
@@ -54,6 +54,7 @@ meter-core.workspace = true
|
||||
meter-macros.workspace = true
|
||||
object-store.workspace = true
|
||||
once_cell.workspace = true
|
||||
parking_lot.workspace = true
|
||||
partition.workspace = true
|
||||
prometheus.workspace = true
|
||||
promql.workspace = true
|
||||
|
||||
@@ -18,6 +18,7 @@ use std::sync::Arc;
|
||||
use common_function::aggrs::aggr_wrapper::{StateMergeHelper, is_all_aggr_exprs_steppable};
|
||||
use common_telemetry::debug;
|
||||
use datafusion::error::Result as DfResult;
|
||||
use datafusion_common::tree_node::{TreeNode, TreeNodeRecursion};
|
||||
use datafusion_expr::{Expr, LogicalPlan, UserDefinedLogicalNode};
|
||||
use promql::extension_plan::{
|
||||
EmptyMetric, InstantManipulate, RangeManipulate, SeriesDivide, SeriesNormalize,
|
||||
@@ -93,6 +94,12 @@ impl Categorizer {
|
||||
plan: &LogicalPlan,
|
||||
partition_cols: Option<AliasMapping>,
|
||||
) -> DfResult<Commutativity> {
|
||||
// Subquery is treated separately in `inspect_plan_with_subquery`. To avoid rewrite the
|
||||
// "maybe rewritten" plan, stop the check here.
|
||||
if has_subquery(plan)? {
|
||||
return Ok(Commutativity::Unimplemented);
|
||||
}
|
||||
|
||||
let partition_cols = partition_cols.unwrap_or_default();
|
||||
|
||||
let comm = match plan {
|
||||
@@ -331,6 +338,24 @@ pub fn partial_commutative_transformer(plan: &LogicalPlan) -> Option<LogicalPlan
|
||||
Some(plan.clone())
|
||||
}
|
||||
|
||||
fn has_subquery(plan: &LogicalPlan) -> DfResult<bool> {
|
||||
let mut found = false;
|
||||
plan.apply_expressions(|e| {
|
||||
e.apply(|x| {
|
||||
if matches!(
|
||||
x,
|
||||
Expr::Exists(_) | Expr::InSubquery(_) | Expr::ScalarSubquery(_)
|
||||
) {
|
||||
found = true;
|
||||
Ok(TreeNodeRecursion::Stop)
|
||||
} else {
|
||||
Ok(TreeNodeRecursion::Continue)
|
||||
}
|
||||
})
|
||||
})?;
|
||||
Ok(found)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use datafusion_expr::{LogicalPlanBuilder, Sort};
|
||||
|
||||
@@ -163,7 +163,7 @@ impl ExtensionPlanner for DistExtensionPlanner {
|
||||
};
|
||||
|
||||
// TODO(ruihang): generate different execution plans for different variant merge operation
|
||||
let schema = optimized_plan.schema().as_ref().into();
|
||||
let schema = optimized_plan.schema().as_arrow();
|
||||
let query_ctx = session_state
|
||||
.config()
|
||||
.get_extension()
|
||||
@@ -173,7 +173,7 @@ impl ExtensionPlanner for DistExtensionPlanner {
|
||||
table_name,
|
||||
regions,
|
||||
input_plan.clone(),
|
||||
&schema,
|
||||
schema,
|
||||
self.region_query_handler.clone(),
|
||||
query_ctx,
|
||||
session_state.config().target_partitions(),
|
||||
|
||||
@@ -196,9 +196,9 @@ fn fetch_partition_range(input: Arc<dyn ExecutionPlan>) -> DataFusionResult<Opti
|
||||
// TODO(discord9): do this in logical plan instead as it's lessy bugy there
|
||||
// Collects alias of the time index column.
|
||||
if let Some(projection) = plan.as_any().downcast_ref::<ProjectionExec>() {
|
||||
for (expr, output_name) in projection.expr() {
|
||||
if let Some(column_expr) = expr.as_any().downcast_ref::<PhysicalColumn>() {
|
||||
alias_map.push((column_expr.name().to_string(), output_name.clone()));
|
||||
for expr in projection.expr() {
|
||||
if let Some(column_expr) = expr.expr.as_any().downcast_ref::<PhysicalColumn>() {
|
||||
alias_map.push((column_expr.name().to_string(), expr.alias.clone()));
|
||||
}
|
||||
}
|
||||
// resolve alias properly
|
||||
|
||||
@@ -33,11 +33,14 @@ use datafusion::execution::{RecordBatchStream, TaskContext};
|
||||
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
|
||||
use datafusion::physical_plan::{
|
||||
DisplayAs, DisplayFormatType, ExecutionPlan, ExecutionPlanProperties, PlanProperties, TopK,
|
||||
TopKDynamicFilters,
|
||||
};
|
||||
use datafusion_common::{DataFusionError, internal_err};
|
||||
use datafusion_physical_expr::PhysicalSortExpr;
|
||||
use datafusion_physical_expr::expressions::{DynamicFilterPhysicalExpr, lit};
|
||||
use futures::{Stream, StreamExt};
|
||||
use itertools::Itertools;
|
||||
use parking_lot::RwLock;
|
||||
use snafu::location;
|
||||
use store_api::region_engine::PartitionRange;
|
||||
|
||||
@@ -239,6 +242,9 @@ impl PartSortStream {
|
||||
partition: usize,
|
||||
) -> datafusion_common::Result<Self> {
|
||||
let buffer = if let Some(limit) = limit {
|
||||
let filter = Arc::new(RwLock::new(TopKDynamicFilters::new(Arc::new(
|
||||
DynamicFilterPhysicalExpr::new(vec![], lit(true)),
|
||||
))));
|
||||
PartSortBuffer::Top(
|
||||
TopK::try_new(
|
||||
partition,
|
||||
@@ -249,7 +255,7 @@ impl PartSortStream {
|
||||
context.session_config().batch_size(),
|
||||
context.runtime_env(),
|
||||
&sort.metrics,
|
||||
None,
|
||||
filter,
|
||||
)?,
|
||||
0,
|
||||
)
|
||||
@@ -497,6 +503,9 @@ impl PartSortStream {
|
||||
|
||||
/// Internal method for sorting `Top` buffer (with limit).
|
||||
fn sort_top_buffer(&mut self) -> datafusion_common::Result<DfRecordBatch> {
|
||||
let filter = Arc::new(RwLock::new(TopKDynamicFilters::new(Arc::new(
|
||||
DynamicFilterPhysicalExpr::new(vec![], lit(true)),
|
||||
))));
|
||||
let new_top_buffer = TopK::try_new(
|
||||
self.partition,
|
||||
self.schema().clone(),
|
||||
@@ -506,7 +515,7 @@ impl PartSortStream {
|
||||
self.context.session_config().batch_size(),
|
||||
self.context.runtime_env(),
|
||||
&self.root_metrics,
|
||||
None,
|
||||
filter,
|
||||
)?;
|
||||
let PartSortBuffer::Top(top_k, _) =
|
||||
std::mem::replace(&mut self.buffer, PartSortBuffer::Top(new_top_buffer, 0))
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use std::any::Any;
|
||||
use std::borrow::Cow;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
@@ -116,9 +117,10 @@ impl DfLogicalPlanner {
|
||||
|
||||
// default to configuration value
|
||||
let options = self.session_state.config().options();
|
||||
let format = format.as_ref().unwrap_or(&options.explain.format);
|
||||
|
||||
let format: ExplainFormat = format.parse()?;
|
||||
let format = format
|
||||
.map(|x| ExplainFormat::from_str(&x))
|
||||
.transpose()?
|
||||
.unwrap_or_else(|| options.explain.format.clone());
|
||||
|
||||
Ok(LogicalPlan::Explain(Explain {
|
||||
verbose,
|
||||
@@ -208,8 +210,7 @@ impl DfLogicalPlanner {
|
||||
let Statement::Query(query) = stmt.into_owned() else {
|
||||
unreachable!("is_tql_cte should only be true for Query statements");
|
||||
};
|
||||
let sqlparser_stmt =
|
||||
datafusion::sql::sqlparser::ast::Statement::Query(Box::new(query.inner.into()));
|
||||
let sqlparser_stmt = sqlparser::ast::Statement::Query(Box::new(query.inner));
|
||||
sql_to_rel
|
||||
.sql_statement_to_plan_with_context(sqlparser_stmt, &mut planner_context)
|
||||
.context(PlanSqlSnafu)?
|
||||
@@ -261,7 +262,7 @@ impl DfLogicalPlanner {
|
||||
|
||||
let sql_to_rel = SqlToRel::new_with_options(&context_provider, parser_options);
|
||||
|
||||
Ok(sql_to_rel.sql_to_expr(sql.into(), schema, &mut PlannerContext::new())?)
|
||||
Ok(sql_to_rel.sql_to_expr(sql, schema, &mut PlannerContext::new())?)
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
|
||||
@@ -2464,6 +2464,7 @@ impl PromPlanner {
|
||||
window_frame: WindowFrame::new(Some(true)),
|
||||
null_treatment: None,
|
||||
distinct: false,
|
||||
filter: None,
|
||||
},
|
||||
}))
|
||||
})
|
||||
|
||||
@@ -244,6 +244,9 @@ fn parse_expr_list(args: &[Expr], start: usize, len: usize) -> DFResult<Vec<Expr
|
||||
| Expr::BinaryExpr(_)
|
||||
| Expr::ScalarFunction(_),
|
||||
) => args[i].clone(),
|
||||
Some(Expr::Alias(alias)) if matches!(*alias.expr, Expr::ScalarFunction(_)) => {
|
||||
args[i].clone()
|
||||
}
|
||||
other => {
|
||||
return Err(dispose_parse_error(*other));
|
||||
}
|
||||
|
||||
@@ -208,7 +208,7 @@ pub async fn sql_format(
|
||||
|
||||
let mut parts: Vec<String> = Vec::with_capacity(stmts.len());
|
||||
for stmt in stmts {
|
||||
let mut s = format!("{:#}", stmt);
|
||||
let mut s = format!("{stmt}");
|
||||
if !s.trim_end().ends_with(';') {
|
||||
s.push(';');
|
||||
}
|
||||
|
||||
@@ -25,7 +25,10 @@ pub trait ObjectNamePartExt {
|
||||
|
||||
impl ObjectNamePartExt for ObjectNamePart {
|
||||
fn to_string_unquoted(&self) -> String {
|
||||
let ObjectNamePart::Identifier(ident) = self;
|
||||
let ObjectNamePart::Identifier(ident) = self else {
|
||||
// If it's not an ident, just return it as a string.
|
||||
return self.to_string();
|
||||
};
|
||||
ident.value.clone()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,15 +14,15 @@
|
||||
|
||||
use std::str::FromStr;
|
||||
|
||||
use snafu::ResultExt;
|
||||
use sqlparser::ast::{Ident, ObjectNamePart, Query, Value};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use sqlparser::ast::{Ident, Query, Value};
|
||||
use sqlparser::dialect::Dialect;
|
||||
use sqlparser::keywords::Keyword;
|
||||
use sqlparser::parser::{Parser, ParserError, ParserOptions};
|
||||
use sqlparser::tokenizer::{Token, TokenWithSpan};
|
||||
|
||||
use crate::ast::{Expr, ObjectName};
|
||||
use crate::error::{self, Result, SyntaxSnafu};
|
||||
use crate::error::{self, InvalidSqlSnafu, Result, SyntaxSnafu};
|
||||
use crate::parsers::tql_parser;
|
||||
use crate::statements::kill::Kill;
|
||||
use crate::statements::statement::Statement;
|
||||
@@ -106,7 +106,7 @@ impl ParserContext<'_> {
|
||||
expected: "a table name",
|
||||
actual: self.parser.peek_token().to_string(),
|
||||
})?;
|
||||
Ok(Self::canonicalize_object_name(raw_table_name))
|
||||
Self::canonicalize_object_name(raw_table_name)
|
||||
}
|
||||
|
||||
pub fn parse_function(sql: &str, dialect: &dyn Dialect) -> Result<Expr> {
|
||||
@@ -303,17 +303,20 @@ impl ParserContext<'_> {
|
||||
}
|
||||
|
||||
/// Like [canonicalize_identifier] but for [ObjectName].
|
||||
pub fn canonicalize_object_name(object_name: ObjectName) -> ObjectName {
|
||||
pub(crate) fn canonicalize_object_name(object_name: ObjectName) -> Result<ObjectName> {
|
||||
object_name
|
||||
.0
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let ObjectNamePart::Identifier(ident) = x;
|
||||
ident
|
||||
x.as_ident()
|
||||
.cloned()
|
||||
.map(Self::canonicalize_identifier)
|
||||
.with_context(|| InvalidSqlSnafu {
|
||||
msg: format!("not an ident: '{x}'"),
|
||||
})
|
||||
})
|
||||
.map(Self::canonicalize_identifier)
|
||||
.collect::<Vec<_>>()
|
||||
.into()
|
||||
.collect::<Result<Vec<_>>>()
|
||||
.map(Into::into)
|
||||
}
|
||||
|
||||
/// Simply a shortcut for sqlparser's same name method `parse_object_name`,
|
||||
|
||||
@@ -68,7 +68,7 @@ impl ParserContext<'_> {
|
||||
.parser
|
||||
.parse_object_name(false)
|
||||
.context(error::SyntaxSnafu)?;
|
||||
let database_name = Self::canonicalize_object_name(database_name);
|
||||
let database_name = Self::canonicalize_object_name(database_name)?;
|
||||
|
||||
match self.parser.peek_token().token {
|
||||
Token::Word(w) => {
|
||||
@@ -117,7 +117,7 @@ impl ParserContext<'_> {
|
||||
.parser
|
||||
.parse_object_name(false)
|
||||
.context(error::SyntaxSnafu)?;
|
||||
let table_name = Self::canonicalize_object_name(raw_table_name);
|
||||
let table_name = Self::canonicalize_object_name(raw_table_name)?;
|
||||
|
||||
let alter_operation = match self.parser.peek_token().token {
|
||||
Token::Word(w) => {
|
||||
@@ -145,7 +145,7 @@ impl ParserContext<'_> {
|
||||
let new_table_name_obj_raw =
|
||||
self.parse_object_name().context(error::SyntaxSnafu)?;
|
||||
let new_table_name_obj =
|
||||
Self::canonicalize_object_name(new_table_name_obj_raw);
|
||||
Self::canonicalize_object_name(new_table_name_obj_raw)?;
|
||||
let new_table_name = match &new_table_name_obj.0[..] {
|
||||
[table] => table.to_string_unquoted(),
|
||||
_ => {
|
||||
|
||||
@@ -104,7 +104,7 @@ impl ParserContext<'_> {
|
||||
expected: "a table name",
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
let table_name = Self::canonicalize_object_name(raw_table_name);
|
||||
let table_name = Self::canonicalize_object_name(raw_table_name)?;
|
||||
|
||||
if self.parser.parse_keyword(Keyword::TO) {
|
||||
let (with, connection, location, limit) = self.parse_copy_parameters()?;
|
||||
|
||||
@@ -196,7 +196,7 @@ impl<'a> ParserContext<'a> {
|
||||
expected: "a database name",
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
let database_name = Self::canonicalize_object_name(database_name);
|
||||
let database_name = Self::canonicalize_object_name(database_name)?;
|
||||
|
||||
let options = self
|
||||
.parser
|
||||
@@ -2435,8 +2435,7 @@ non TIMESTAMP(6) TIME INDEX,
|
||||
let sql = "CREATE VIEW test AS DELETE from demo";
|
||||
let result =
|
||||
ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default());
|
||||
assert!(result.is_err());
|
||||
assert_matches!(result, Err(crate::error::Error::Syntax { .. }));
|
||||
assert!(result.is_ok_and(|x| x.len() == 1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -51,7 +51,7 @@ impl ParserContext<'_> {
|
||||
let query_stmt = self.parse_query()?;
|
||||
match query_stmt {
|
||||
Statement::Query(query) => Ok(Statement::DeclareCursor(DeclareCursor {
|
||||
cursor_name: ParserContext::canonicalize_object_name(cursor_name),
|
||||
cursor_name: ParserContext::canonicalize_object_name(cursor_name)?,
|
||||
query,
|
||||
})),
|
||||
_ => error::InvalidSqlSnafu {
|
||||
@@ -78,7 +78,7 @@ impl ParserContext<'_> {
|
||||
.context(error::SyntaxSnafu)?;
|
||||
|
||||
Ok(Statement::FetchCursor(FetchCursor {
|
||||
cursor_name: ParserContext::canonicalize_object_name(cursor_name),
|
||||
cursor_name: ParserContext::canonicalize_object_name(cursor_name)?,
|
||||
fetch_size,
|
||||
}))
|
||||
}
|
||||
@@ -91,7 +91,7 @@ impl ParserContext<'_> {
|
||||
.context(error::SyntaxSnafu)?;
|
||||
|
||||
Ok(Statement::CloseCursor(CloseCursor {
|
||||
cursor_name: ParserContext::canonicalize_object_name(cursor_name),
|
||||
cursor_name: ParserContext::canonicalize_object_name(cursor_name)?,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ impl ParserContext<'_> {
|
||||
expected: "a table name",
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
let table_idents = Self::canonicalize_object_name(raw_table_idents);
|
||||
let table_idents = Self::canonicalize_object_name(raw_table_idents)?;
|
||||
ensure!(
|
||||
!table_idents.0.is_empty(),
|
||||
InvalidTableNameSnafu {
|
||||
|
||||
@@ -58,7 +58,7 @@ impl ParserContext<'_> {
|
||||
expected: "a trigger name",
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
let trigger_ident = Self::canonicalize_object_name(raw_trigger_ident);
|
||||
let trigger_ident = Self::canonicalize_object_name(raw_trigger_ident)?;
|
||||
ensure!(
|
||||
!trigger_ident.0.is_empty(),
|
||||
error::InvalidTriggerNameSnafu {
|
||||
@@ -82,7 +82,7 @@ impl ParserContext<'_> {
|
||||
expected: "a view name",
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
let view_ident = Self::canonicalize_object_name(raw_view_ident);
|
||||
let view_ident = Self::canonicalize_object_name(raw_view_ident)?;
|
||||
ensure!(
|
||||
!view_ident.0.is_empty(),
|
||||
InvalidTableNameSnafu {
|
||||
@@ -106,7 +106,7 @@ impl ParserContext<'_> {
|
||||
expected: "a flow name",
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
let flow_ident = Self::canonicalize_object_name(raw_flow_ident);
|
||||
let flow_ident = Self::canonicalize_object_name(raw_flow_ident)?;
|
||||
ensure!(
|
||||
!flow_ident.0.is_empty(),
|
||||
InvalidFlowNameSnafu {
|
||||
@@ -129,7 +129,7 @@ impl ParserContext<'_> {
|
||||
expected: "a table name",
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
let table_ident = Self::canonicalize_object_name(raw_table_ident);
|
||||
let table_ident = Self::canonicalize_object_name(raw_table_ident)?;
|
||||
ensure!(
|
||||
!table_ident.0.is_empty(),
|
||||
InvalidTableNameSnafu {
|
||||
@@ -155,7 +155,7 @@ impl ParserContext<'_> {
|
||||
expected: "a database name",
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
let database_name = Self::canonicalize_object_name(database_name);
|
||||
let database_name = Self::canonicalize_object_name(database_name)?;
|
||||
|
||||
Ok(Statement::DropDatabase(DropDatabase::new(
|
||||
database_name,
|
||||
|
||||
@@ -73,6 +73,7 @@ mod tests {
|
||||
projection: vec![sqlparser::ast::SelectItem::Wildcard(
|
||||
WildcardAdditionalOptions::default(),
|
||||
)],
|
||||
exclude: None,
|
||||
into: None,
|
||||
from: vec![sqlparser::ast::TableWithJoins {
|
||||
relation: sqlparser::ast::TableFactor::Table {
|
||||
@@ -112,9 +113,8 @@ mod tests {
|
||||
with: None,
|
||||
body: Box::new(sqlparser::ast::SetExpr::Select(Box::new(select))),
|
||||
order_by: None,
|
||||
limit: None,
|
||||
limit_by: vec![],
|
||||
offset: None,
|
||||
limit_clause: None,
|
||||
pipe_operators: vec![],
|
||||
fetch: None,
|
||||
locks: vec![],
|
||||
for_clause: None,
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use snafu::ResultExt;
|
||||
use sqlparser::ast::Statement as SpStatement;
|
||||
use sqlparser::ast::{Set, Statement as SpStatement};
|
||||
|
||||
use crate::ast::{Ident, ObjectName};
|
||||
use crate::error::{self, Result};
|
||||
@@ -27,21 +27,27 @@ impl ParserContext<'_> {
|
||||
let _ = self.parser.next_token();
|
||||
let spstatement = self.parser.parse_set().context(error::SyntaxSnafu)?;
|
||||
match spstatement {
|
||||
SpStatement::SetVariable {
|
||||
variables,
|
||||
value,
|
||||
hivevar,
|
||||
..
|
||||
} if !hivevar => Ok(Statement::SetVariables(SetVariables {
|
||||
variable: (*variables)[0].clone(),
|
||||
value,
|
||||
})),
|
||||
SpStatement::Set(set) => match set {
|
||||
Set::SingleAssignment {
|
||||
scope: _,
|
||||
hivevar,
|
||||
variable,
|
||||
values,
|
||||
} if !hivevar => Ok(Statement::SetVariables(SetVariables {
|
||||
variable,
|
||||
value: values,
|
||||
})),
|
||||
|
||||
SpStatement::SetTimeZone { value, .. } => Ok(Statement::SetVariables(SetVariables {
|
||||
variable: ObjectName::from(vec![Ident::new("TIMEZONE")]),
|
||||
value: vec![value],
|
||||
})),
|
||||
Set::SetTimeZone { local: _, value } => Ok(Statement::SetVariables(SetVariables {
|
||||
variable: ObjectName::from(vec![Ident::new("TIMEZONE")]),
|
||||
value: vec![value],
|
||||
})),
|
||||
|
||||
set => error::UnsupportedSnafu {
|
||||
keyword: set.to_string(),
|
||||
}
|
||||
.fail(),
|
||||
},
|
||||
unexp => error::UnsupportedSnafu {
|
||||
keyword: unexp.to_string(),
|
||||
}
|
||||
|
||||
@@ -148,7 +148,7 @@ impl ParserContext<'_> {
|
||||
expected: "a database name",
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
let database_name = Self::canonicalize_object_name(raw_database_name);
|
||||
let database_name = Self::canonicalize_object_name(raw_database_name)?;
|
||||
ensure!(
|
||||
!database_name.0.is_empty(),
|
||||
InvalidDatabaseNameSnafu {
|
||||
@@ -168,7 +168,7 @@ impl ParserContext<'_> {
|
||||
expected: "a table name",
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
let table_name = Self::canonicalize_object_name(raw_table_name);
|
||||
let table_name = Self::canonicalize_object_name(raw_table_name)?;
|
||||
ensure!(
|
||||
!table_name.0.is_empty(),
|
||||
InvalidTableNameSnafu {
|
||||
@@ -197,7 +197,7 @@ impl ParserContext<'_> {
|
||||
expected: "a flow name",
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
let flow_name = Self::canonicalize_object_name(raw_flow_name);
|
||||
let flow_name = Self::canonicalize_object_name(raw_flow_name)?;
|
||||
ensure!(
|
||||
!flow_name.0.is_empty(),
|
||||
InvalidFlowNameSnafu {
|
||||
@@ -214,7 +214,7 @@ impl ParserContext<'_> {
|
||||
expected: "a view name",
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
let view_name = Self::canonicalize_object_name(raw_view_name);
|
||||
let view_name = Self::canonicalize_object_name(raw_view_name)?;
|
||||
ensure!(
|
||||
!view_name.0.is_empty(),
|
||||
InvalidTableNameSnafu {
|
||||
@@ -241,7 +241,7 @@ impl ParserContext<'_> {
|
||||
);
|
||||
|
||||
// Safety: already checked above
|
||||
Ok(Self::canonicalize_object_name(table_name).0[0].to_string_unquoted())
|
||||
Ok(Self::canonicalize_object_name(table_name)?.0[0].to_string_unquoted())
|
||||
}
|
||||
|
||||
fn parse_db_name(&mut self) -> Result<Option<String>> {
|
||||
@@ -262,7 +262,7 @@ impl ParserContext<'_> {
|
||||
|
||||
// Safety: already checked above
|
||||
Ok(Some(
|
||||
Self::canonicalize_object_name(db_name).0[0].to_string_unquoted(),
|
||||
Self::canonicalize_object_name(db_name)?.0[0].to_string_unquoted(),
|
||||
))
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ impl ParserContext<'_> {
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
|
||||
let trigger_name = Self::canonicalize_object_name(trigger_name);
|
||||
let trigger_name = Self::canonicalize_object_name(trigger_name)?;
|
||||
|
||||
ensure!(
|
||||
!trigger_name.0.is_empty(),
|
||||
|
||||
@@ -33,7 +33,7 @@ impl ParserContext<'_> {
|
||||
expected: "a table name",
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
let table_ident = Self::canonicalize_object_name(raw_table_ident);
|
||||
let table_ident = Self::canonicalize_object_name(raw_table_ident)?;
|
||||
|
||||
ensure!(
|
||||
!table_ident.0.is_empty(),
|
||||
|
||||
@@ -75,7 +75,7 @@ pub fn parser_expr_to_scalar_value_literal(
|
||||
// 1. convert parser expr to logical expr
|
||||
let empty_df_schema = DFSchema::empty();
|
||||
let logical_expr = SqlToRel::new(&StubContextProvider::default())
|
||||
.sql_to_expr(expr.into(), &empty_df_schema, &mut Default::default())
|
||||
.sql_to_expr(expr, &empty_df_schema, &mut Default::default())
|
||||
.context(ConvertToLogicalExpressionSnafu)?;
|
||||
|
||||
struct FindNow {
|
||||
|
||||
@@ -310,6 +310,6 @@ impl TryFrom<&Statement> for DfStatement {
|
||||
.fail();
|
||||
}
|
||||
};
|
||||
Ok(DfStatement::Statement(Box::new(s.into())))
|
||||
Ok(DfStatement::Statement(Box::new(s)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -194,7 +194,7 @@ fn extract_tables_from_set_expr(set_expr: &SetExpr, names: &mut HashSet<ObjectNa
|
||||
extract_tables_from_set_expr(left, names);
|
||||
extract_tables_from_set_expr(right, names);
|
||||
}
|
||||
SetExpr::Values(_) | SetExpr::Insert(_) | SetExpr::Update(_) | SetExpr::Table(_) => {}
|
||||
_ => {}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -662,7 +662,7 @@ pub async fn test_http_sql_slow_query(store_type: StorageType) {
|
||||
let (app, mut guard) = setup_test_http_app_with_frontend(store_type, "sql_api").await;
|
||||
let client = TestClient::new(app).await;
|
||||
|
||||
let slow_query = "WITH RECURSIVE slow_cte AS (SELECT 1 AS n, md5(CAST(random() AS STRING)) AS hash UNION ALL SELECT n + 1, md5(concat(hash, n)) FROM slow_cte WHERE n < 4500) SELECT COUNT(*) FROM slow_cte";
|
||||
let slow_query = "SELECT count(*) FROM generate_series(1, 1000000000)";
|
||||
let encoded_slow_query = encode(slow_query);
|
||||
|
||||
let query_params = format!("/v1/sql?sql={encoded_slow_query}");
|
||||
@@ -1152,12 +1152,12 @@ pub async fn test_prom_http_api(store_type: StorageType) {
|
||||
// query `__name__` without match[]
|
||||
// create a physical table and a logical table
|
||||
let res = client
|
||||
.get("/v1/sql?sql=create table physical_table (`ts` timestamp time index, message string) with ('physical_metric_table' = 'true');")
|
||||
.get("/v1/sql?sql=create table physical_table (`ts` timestamp time index, `message` string) with ('physical_metric_table' = 'true');")
|
||||
.send()
|
||||
.await;
|
||||
assert_eq!(res.status(), StatusCode::OK, "{:?}", res.text().await);
|
||||
let res = client
|
||||
.get("/v1/sql?sql=create table logic_table (`ts` timestamp time index, message string) with ('on_physical_table' = 'physical_table');")
|
||||
.get("/v1/sql?sql=create table logic_table (`ts` timestamp time index, `message` string) with ('on_physical_table' = 'physical_table');")
|
||||
.send()
|
||||
.await;
|
||||
assert_eq!(res.status(), StatusCode::OK, "{:?}", res.text().await);
|
||||
@@ -5297,7 +5297,7 @@ pub async fn test_log_query(store_type: StorageType) {
|
||||
|
||||
// prepare data with SQL API
|
||||
let res = client
|
||||
.get("/v1/sql?sql=create table logs (`ts` timestamp time index, message string);")
|
||||
.get("/v1/sql?sql=create table logs (`ts` timestamp time index, `message` string);")
|
||||
.send()
|
||||
.await;
|
||||
assert_eq!(res.status(), StatusCode::OK, "{:?}", res.text().await);
|
||||
|
||||
@@ -708,7 +708,7 @@ pub async fn test_mysql_slow_query(store_type: StorageType) {
|
||||
.unwrap();
|
||||
|
||||
// The slow query will run at least longer than 1s.
|
||||
let slow_query = "WITH RECURSIVE slow_cte AS (SELECT 1 AS n, md5(CAST(random() AS STRING)) AS hash UNION ALL SELECT n + 1, md5(concat(hash, n)) FROM slow_cte WHERE n < 4500) SELECT COUNT(*) FROM slow_cte";
|
||||
let slow_query = "SELECT count(*) FROM generate_series(1, 1000000000)";
|
||||
|
||||
// Simulate a slow query.
|
||||
sqlx::query(slow_query).fetch_all(&pool).await.unwrap();
|
||||
@@ -823,7 +823,7 @@ pub async fn test_postgres_slow_query(store_type: StorageType) {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let slow_query = "WITH RECURSIVE slow_cte AS (SELECT 1 AS n, md5(CAST(random() AS STRING)) AS hash UNION ALL SELECT n + 1, md5(concat(hash, n)) FROM slow_cte WHERE n < 4500) SELECT COUNT(*) FROM slow_cte";
|
||||
let slow_query = "SELECT count(*) FROM generate_series(1, 1000000000)";
|
||||
let _ = sqlx::query(slow_query).fetch_all(&pool).await.unwrap();
|
||||
|
||||
// Wait for the slow query to be recorded.
|
||||
|
||||
@@ -203,13 +203,13 @@ SELECT * FROM (SELECT i1.i AS a, i2.i AS b, row_number() OVER (ORDER BY i1.i, i2
|
||||
-- Align the result to PostgreSQL: empty.
|
||||
EXPLAIN SELECT * FROM (SELECT 0=1 AS cond FROM integers i1, integers i2) a1 WHERE cond ORDER BY 1;
|
||||
|
||||
+---------------+---------------+
|
||||
| plan_type | plan |
|
||||
+---------------+---------------+
|
||||
| logical_plan | EmptyRelation |
|
||||
| physical_plan | EmptyExec |
|
||||
| | |
|
||||
+---------------+---------------+
|
||||
+---------------+-----------------------+
|
||||
| plan_type | plan |
|
||||
+---------------+-----------------------+
|
||||
| logical_plan | EmptyRelation: rows=0 |
|
||||
| physical_plan | EmptyExec |
|
||||
| | |
|
||||
+---------------+-----------------------+
|
||||
|
||||
-- Align the result to PostgreSQL: empty.
|
||||
SELECT * FROM (SELECT 0=1 AS cond FROM integers i1, integers i2 GROUP BY 1) a1 WHERE cond ORDER BY 1;
|
||||
|
||||
@@ -10,13 +10,12 @@ SELECT corr(NULL,NULL);
|
||||
+-----------------+
|
||||
|
||||
-- Single value returns NULL
|
||||
-- FIXME(dennis): datafusion returns 0.0 here, should be NULL
|
||||
SELECT corr(1,1);
|
||||
|
||||
+-------------------------+
|
||||
| corr(Int64(1),Int64(1)) |
|
||||
+-------------------------+
|
||||
| 0.0 |
|
||||
| |
|
||||
+-------------------------+
|
||||
|
||||
-- Test with table
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
SELECT corr(NULL,NULL);
|
||||
|
||||
-- Single value returns NULL
|
||||
-- FIXME(dennis): datafusion returns 0.0 here, should be NULL
|
||||
SELECT corr(1,1);
|
||||
|
||||
-- Test with table
|
||||
|
||||
@@ -10,55 +10,55 @@ Affected Rows: 6
|
||||
|
||||
SELECT stddev_samp(1);
|
||||
|
||||
+------------------+
|
||||
| stddev(Int64(1)) |
|
||||
+------------------+
|
||||
| |
|
||||
+------------------+
|
||||
+-----------------------+
|
||||
| stddev_samp(Int64(1)) |
|
||||
+-----------------------+
|
||||
| |
|
||||
+-----------------------+
|
||||
|
||||
SELECT var_samp(1);
|
||||
|
||||
+---------------+
|
||||
| var(Int64(1)) |
|
||||
+---------------+
|
||||
| |
|
||||
+---------------+
|
||||
+--------------------+
|
||||
| var_samp(Int64(1)) |
|
||||
+--------------------+
|
||||
| |
|
||||
+--------------------+
|
||||
|
||||
-- stddev_samp
|
||||
SELECT round(stddev_samp(val), 1) FROM stddev_test;
|
||||
|
||||
+-----------------------------------------+
|
||||
| round(stddev(stddev_test.val),Int64(1)) |
|
||||
+-----------------------------------------+
|
||||
| 478.8 |
|
||||
+-----------------------------------------+
|
||||
+----------------------------------------------+
|
||||
| round(stddev_samp(stddev_test.val),Int64(1)) |
|
||||
+----------------------------------------------+
|
||||
| 478.8 |
|
||||
+----------------------------------------------+
|
||||
|
||||
SELECT round(stddev_samp(val), 1) FROM stddev_test WHERE val IS NOT NULL;
|
||||
|
||||
+-----------------------------------------+
|
||||
| round(stddev(stddev_test.val),Int64(1)) |
|
||||
+-----------------------------------------+
|
||||
| 478.8 |
|
||||
+-----------------------------------------+
|
||||
+----------------------------------------------+
|
||||
| round(stddev_samp(stddev_test.val),Int64(1)) |
|
||||
+----------------------------------------------+
|
||||
| 478.8 |
|
||||
+----------------------------------------------+
|
||||
|
||||
SELECT grp, sum(val), round(stddev_samp(val), 1), min(val) FROM stddev_test GROUP BY grp ORDER BY grp;
|
||||
|
||||
+-----+----------------------+-----------------------------------------+----------------------+
|
||||
| grp | sum(stddev_test.val) | round(stddev(stddev_test.val),Int64(1)) | min(stddev_test.val) |
|
||||
+-----+----------------------+-----------------------------------------+----------------------+
|
||||
| 1 | 85 | 0.7 | 42 |
|
||||
| 2 | 1042 | 677.4 | 42 |
|
||||
| 3 | | | |
|
||||
+-----+----------------------+-----------------------------------------+----------------------+
|
||||
+-----+----------------------+----------------------------------------------+----------------------+
|
||||
| grp | sum(stddev_test.val) | round(stddev_samp(stddev_test.val),Int64(1)) | min(stddev_test.val) |
|
||||
+-----+----------------------+----------------------------------------------+----------------------+
|
||||
| 1 | 85 | 0.7 | 42 |
|
||||
| 2 | 1042 | 677.4 | 42 |
|
||||
| 3 | | | |
|
||||
+-----+----------------------+----------------------------------------------+----------------------+
|
||||
|
||||
SELECT grp, sum(val), round(stddev_samp(val), 1), min(val) FROM stddev_test WHERE val IS NOT NULL GROUP BY grp ORDER BY grp;
|
||||
|
||||
+-----+----------------------+-----------------------------------------+----------------------+
|
||||
| grp | sum(stddev_test.val) | round(stddev(stddev_test.val),Int64(1)) | min(stddev_test.val) |
|
||||
+-----+----------------------+-----------------------------------------+----------------------+
|
||||
| 1 | 85 | 0.7 | 42 |
|
||||
| 2 | 1042 | 677.4 | 42 |
|
||||
+-----+----------------------+-----------------------------------------+----------------------+
|
||||
+-----+----------------------+----------------------------------------------+----------------------+
|
||||
| grp | sum(stddev_test.val) | round(stddev_samp(stddev_test.val),Int64(1)) | min(stddev_test.val) |
|
||||
+-----+----------------------+----------------------------------------------+----------------------+
|
||||
| 1 | 85 | 0.7 | 42 |
|
||||
| 2 | 1042 | 677.4 | 42 |
|
||||
+-----+----------------------+----------------------------------------------+----------------------+
|
||||
|
||||
-- stddev_pop
|
||||
SELECT round(stddev_pop(val), 1) FROM stddev_test;
|
||||
@@ -99,38 +99,38 @@ SELECT grp, sum(val), round(stddev_pop(val), 1), min(val) FROM stddev_test WHERE
|
||||
-- var_samp
|
||||
SELECT round(var_samp(val), 1) FROM stddev_test;
|
||||
|
||||
+--------------------------------------+
|
||||
| round(var(stddev_test.val),Int64(1)) |
|
||||
+--------------------------------------+
|
||||
| 229281.6 |
|
||||
+--------------------------------------+
|
||||
+-------------------------------------------+
|
||||
| round(var_samp(stddev_test.val),Int64(1)) |
|
||||
+-------------------------------------------+
|
||||
| 229281.6 |
|
||||
+-------------------------------------------+
|
||||
|
||||
SELECT round(var_samp(val), 1) FROM stddev_test WHERE val IS NOT NULL;
|
||||
|
||||
+--------------------------------------+
|
||||
| round(var(stddev_test.val),Int64(1)) |
|
||||
+--------------------------------------+
|
||||
| 229281.6 |
|
||||
+--------------------------------------+
|
||||
+-------------------------------------------+
|
||||
| round(var_samp(stddev_test.val),Int64(1)) |
|
||||
+-------------------------------------------+
|
||||
| 229281.6 |
|
||||
+-------------------------------------------+
|
||||
|
||||
SELECT grp, sum(val), round(var_samp(val), 1), min(val) FROM stddev_test GROUP BY grp ORDER BY grp;
|
||||
|
||||
+-----+----------------------+--------------------------------------+----------------------+
|
||||
| grp | sum(stddev_test.val) | round(var(stddev_test.val),Int64(1)) | min(stddev_test.val) |
|
||||
+-----+----------------------+--------------------------------------+----------------------+
|
||||
| 1 | 85 | 0.5 | 42 |
|
||||
| 2 | 1042 | 458882.0 | 42 |
|
||||
| 3 | | | |
|
||||
+-----+----------------------+--------------------------------------+----------------------+
|
||||
+-----+----------------------+-------------------------------------------+----------------------+
|
||||
| grp | sum(stddev_test.val) | round(var_samp(stddev_test.val),Int64(1)) | min(stddev_test.val) |
|
||||
+-----+----------------------+-------------------------------------------+----------------------+
|
||||
| 1 | 85 | 0.5 | 42 |
|
||||
| 2 | 1042 | 458882.0 | 42 |
|
||||
| 3 | | | |
|
||||
+-----+----------------------+-------------------------------------------+----------------------+
|
||||
|
||||
SELECT grp, sum(val), round(var_samp(val), 1), min(val) FROM stddev_test WHERE val IS NOT NULL GROUP BY grp ORDER BY grp;
|
||||
|
||||
+-----+----------------------+--------------------------------------+----------------------+
|
||||
| grp | sum(stddev_test.val) | round(var(stddev_test.val),Int64(1)) | min(stddev_test.val) |
|
||||
+-----+----------------------+--------------------------------------+----------------------+
|
||||
| 1 | 85 | 0.5 | 42 |
|
||||
| 2 | 1042 | 458882.0 | 42 |
|
||||
+-----+----------------------+--------------------------------------+----------------------+
|
||||
+-----+----------------------+-------------------------------------------+----------------------+
|
||||
| grp | sum(stddev_test.val) | round(var_samp(stddev_test.val),Int64(1)) | min(stddev_test.val) |
|
||||
+-----+----------------------+-------------------------------------------+----------------------+
|
||||
| 1 | 85 | 0.5 | 42 |
|
||||
| 2 | 1042 | 458882.0 | 42 |
|
||||
+-----+----------------------+-------------------------------------------+----------------------+
|
||||
|
||||
-- var_pop
|
||||
SELECT round(var_pop(val), 1) FROM stddev_test;
|
||||
|
||||
@@ -25,7 +25,7 @@ Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Execution err
|
||||
-- No matching function signature
|
||||
SELECT cos(0, 1, 2, 3);
|
||||
|
||||
Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Failed to coerce arguments to satisfy a call to 'cos' function: coercion from [Int64, Int64, Int64, Int64] to the signature Uniform(1, [Float64, Float32]) failed No function matches the given name and argument types 'cos(Int64, Int64, Int64, Int64)'. You might need to add explicit type casts.
|
||||
Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Failed to coerce arguments to satisfy a call to 'cos' function: coercion from Int64, Int64, Int64, Int64 to the signature Uniform(1, [Float64, Float32]) failed No function matches the given name and argument types 'cos(Int64, Int64, Int64, Int64)'. You might need to add explicit type casts.
|
||||
Candidate functions:
|
||||
cos(Float64/Float32)
|
||||
|
||||
|
||||
@@ -28,27 +28,27 @@ Error: 3001(EngineExecuteQuery), Divide by zero error
|
||||
|
||||
SELECT POW (2, 5);
|
||||
|
||||
+--------------------------+
|
||||
| power(Int64(2),Int64(5)) |
|
||||
+--------------------------+
|
||||
| 32 |
|
||||
+--------------------------+
|
||||
+------------------------+
|
||||
| pow(Int64(2),Int64(5)) |
|
||||
+------------------------+
|
||||
| 32 |
|
||||
+------------------------+
|
||||
|
||||
SELECT POW (1.01, 365);
|
||||
|
||||
+---------------------------------+
|
||||
| power(Float64(1.01),Int64(365)) |
|
||||
+---------------------------------+
|
||||
| 37.78343433288728 |
|
||||
+---------------------------------+
|
||||
+-------------------------------+
|
||||
| pow(Float64(1.01),Int64(365)) |
|
||||
+-------------------------------+
|
||||
| 37.78343433288728 |
|
||||
+-------------------------------+
|
||||
|
||||
SELECT POW (0.99, 365);
|
||||
|
||||
+---------------------------------+
|
||||
| power(Float64(0.99),Int64(365)) |
|
||||
+---------------------------------+
|
||||
| 0.025517964452291125 |
|
||||
+---------------------------------+
|
||||
+-------------------------------+
|
||||
| pow(Float64(0.99),Int64(365)) |
|
||||
+-------------------------------+
|
||||
| 0.025517964452291125 |
|
||||
+-------------------------------+
|
||||
|
||||
SELECT CLAMP(10, 0, 1);
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ Error: 1001(Unsupported), This feature is not implemented: Unsupported LIMIT exp
|
||||
|
||||
SELECT a FROM test LIMIT row_number() OVER ();
|
||||
|
||||
Error: 3001(EngineExecuteQuery), This feature is not implemented: Unsupported LIMIT expression: Some(Cast(Cast { expr: WindowFunction(WindowFunction { fun: WindowUDF(WindowUDF { inner: RowNumber { signature: Signature { type_signature: Nullary, volatility: Immutable } } }), params: WindowFunctionParams { args: [], partition_by: [], order_by: [], window_frame: WindowFrame { units: Rows, start_bound: Preceding(UInt64(NULL)), end_bound: Following(UInt64(NULL)), is_causal: false }, null_treatment: None, distinct: false } }), data_type: Int64 }))
|
||||
Error: 3001(EngineExecuteQuery), This feature is not implemented: Unsupported LIMIT expression: Some(Cast(Cast { expr: WindowFunction(WindowFunction { fun: WindowUDF(WindowUDF { inner: RowNumber { signature: Signature { type_signature: Nullary, volatility: Immutable } } }), params: WindowFunctionParams { args: [], partition_by: [], order_by: [], window_frame: WindowFrame { units: Rows, start_bound: Preceding(UInt64(NULL)), end_bound: Following(UInt64(NULL)), is_causal: false }, filter: None, null_treatment: None, distinct: false } }), data_type: Int64 }))
|
||||
|
||||
CREATE TABLE test2 (a STRING, ts TIMESTAMP TIME INDEX);
|
||||
|
||||
|
||||
@@ -23,12 +23,12 @@ Affected Rows: 10
|
||||
-- Test by calculate
|
||||
SELECT ts, length(host), max(val) RANGE '5s' FROM host ALIGN '20s' BY (length(host)) ORDER BY ts;
|
||||
|
||||
+---------------------+-----------------------------+------------------------+
|
||||
| ts | character_length(host.host) | max(host.val) RANGE 5s |
|
||||
+---------------------+-----------------------------+------------------------+
|
||||
| 1970-01-01T00:00:00 | 5 | 3 |
|
||||
| 1970-01-01T00:00:20 | 5 | 5 |
|
||||
+---------------------+-----------------------------+------------------------+
|
||||
+---------------------+-------------------+------------------------+
|
||||
| ts | length(host.host) | max(host.val) RANGE 5s |
|
||||
+---------------------+-------------------+------------------------+
|
||||
| 1970-01-01T00:00:00 | 5 | 3 |
|
||||
| 1970-01-01T00:00:20 | 5 | 5 |
|
||||
+---------------------+-------------------+------------------------+
|
||||
|
||||
SELECT ts, max(val) RANGE '5s' FROM host ALIGN '20s' BY (2) ORDER BY ts;
|
||||
|
||||
@@ -52,12 +52,12 @@ SELECT ts, max(val) RANGE '5s' FROM host ALIGN '20s' BY () ORDER BY ts;
|
||||
|
||||
SELECT ts, length(host)::INT64 + 2, max(val) RANGE '5s' FROM host ALIGN '20s' BY (length(host)::INT64 + 2) ORDER BY ts;
|
||||
|
||||
+---------------------+------------------------------------------------------------------+------------------------+
|
||||
| ts | arrow_cast(character_length(host.host),Utf8("Int64")) + Int64(2) | max(host.val) RANGE 5s |
|
||||
+---------------------+------------------------------------------------------------------+------------------------+
|
||||
| 1970-01-01T00:00:00 | 7 | 3 |
|
||||
| 1970-01-01T00:00:20 | 7 | 5 |
|
||||
+---------------------+------------------------------------------------------------------+------------------------+
|
||||
+---------------------+--------------------------------------------------------+------------------------+
|
||||
| ts | arrow_cast(length(host.host),Utf8("Int64")) + Int64(2) | max(host.val) RANGE 5s |
|
||||
+---------------------+--------------------------------------------------------+------------------------+
|
||||
| 1970-01-01T00:00:00 | 7 | 3 |
|
||||
| 1970-01-01T00:00:20 | 7 | 5 |
|
||||
+---------------------+--------------------------------------------------------+------------------------+
|
||||
|
||||
-- Test error
|
||||
-- project non-aggregation key
|
||||
|
||||
@@ -188,22 +188,22 @@ SELECT ts, host, floor(cos(ceil(sin(min(val) RANGE '5s')))) FROM host ALIGN '5s'
|
||||
|
||||
SELECT ts, host, gcd(CAST(max(floor(val::DOUBLE)) RANGE '10s' FILL PREV as INT64) * 4, max(val * 4) RANGE '10s' FILL PREV) * length(host) + 1 FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| ts | host | gcd(arrow_cast(max(floor(host.val)) RANGE 10s FILL PREV,Utf8("Int64")) * Int64(4),max(host.val * Int64(4)) RANGE 10s FILL PREV) * character_length(host.host) + Int64(1) |
|
||||
+---------------------+-------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| 1969-12-31T23:59:55 | host1 | 1 |
|
||||
| 1970-01-01T00:00:00 | host1 | 1 |
|
||||
| 1970-01-01T00:00:05 | host1 | 21 |
|
||||
| 1970-01-01T00:00:10 | host1 | 21 |
|
||||
| 1970-01-01T00:00:15 | host1 | 41 |
|
||||
| 1970-01-01T00:00:20 | host1 | 41 |
|
||||
| 1969-12-31T23:59:55 | host2 | 61 |
|
||||
| 1970-01-01T00:00:00 | host2 | 61 |
|
||||
| 1970-01-01T00:00:05 | host2 | 81 |
|
||||
| 1970-01-01T00:00:10 | host2 | 81 |
|
||||
| 1970-01-01T00:00:15 | host2 | 101 |
|
||||
| 1970-01-01T00:00:20 | host2 | 101 |
|
||||
+---------------------+-------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
+---------------------+-------+----------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| ts | host | gcd(arrow_cast(max(floor(host.val)) RANGE 10s FILL PREV,Utf8("Int64")) * Int64(4),max(host.val * Int64(4)) RANGE 10s FILL PREV) * length(host.host) + Int64(1) |
|
||||
+---------------------+-------+----------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| 1969-12-31T23:59:55 | host1 | 1 |
|
||||
| 1970-01-01T00:00:00 | host1 | 1 |
|
||||
| 1970-01-01T00:00:05 | host1 | 21 |
|
||||
| 1970-01-01T00:00:10 | host1 | 21 |
|
||||
| 1970-01-01T00:00:15 | host1 | 41 |
|
||||
| 1970-01-01T00:00:20 | host1 | 41 |
|
||||
| 1969-12-31T23:59:55 | host2 | 61 |
|
||||
| 1970-01-01T00:00:00 | host2 | 61 |
|
||||
| 1970-01-01T00:00:05 | host2 | 81 |
|
||||
| 1970-01-01T00:00:10 | host2 | 81 |
|
||||
| 1970-01-01T00:00:15 | host2 | 101 |
|
||||
| 1970-01-01T00:00:20 | host2 | 101 |
|
||||
+---------------------+-------+----------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
|
||||
DROP TABLE host;
|
||||
|
||||
|
||||
@@ -712,10 +712,10 @@ select * from pg_catalog.pg_type order by oid;
|
||||
-- SQLNESS REPLACE (\d+\s*) OID
|
||||
select * from pg_catalog.pg_database where datname = 'public';
|
||||
|
||||
+-----+---------+--------+----------+------------+----------+---------------+--------------+--------------+---------------+--------------+------------+---------------+--------+
|
||||
| oid | datname | datdba | encoding | datcollate | datctype | datistemplate | datallowconn | datconnlimit | datlastsysoid | datfrozenxid | datminmxid | dattablespace | datacl |
|
||||
+-----+---------+--------+----------+------------+----------+---------------+--------------+--------------+---------------+--------------+------------+---------------+--------+
|
||||
+-----+---------+--------+----------+------------+----------+---------------+--------------+--------------+---------------+--------------+------------+---------------+--------+
|
||||
+-----+---------+--------+----------+----------------+------------+----------+---------------+--------------+--------------+---------------+--------------+------------+---------------+--------------+-------------+--------+
|
||||
| oid | datname | datdba | encoding | datlocprovider | datcollate | datctype | datistemplate | datallowconn | datconnlimit | datlastsysoid | datfrozenxid | datminmxid | dattablespace | daticulocale | daticurules | datacl |
|
||||
+-----+---------+--------+----------+----------------+------------+----------+---------------+--------------+--------------+---------------+--------------+------------+---------------+--------------+-------------+--------+
|
||||
+-----+---------+--------+----------+----------------+------------+----------+---------------+--------------+--------------+---------------+--------------+------------+---------------+--------------+-------------+--------+
|
||||
|
||||
-- \d
|
||||
-- SQLNESS PROTOCOL POSTGRES
|
||||
|
||||
@@ -184,6 +184,7 @@ TQL EXPLAIN VERBOSE (0, 10, '5s') test;
|
||||
| physical_plan after OutputRequirements_| MergeScanExec: REDACTED
|
||||
|_|_|
|
||||
| physical_plan after LimitAggregation_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan after LimitPushPastWindows_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan after LimitPushdown_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan after ProjectionPushdown_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan after EnsureCooperative_| CooperativeExec_|
|
||||
@@ -321,6 +322,7 @@ TQL EXPLAIN VERBOSE (0, 10, '5s') test AS series;
|
||||
| physical_plan after OutputRequirements_| MergeScanExec: REDACTED
|
||||
|_|_|
|
||||
| physical_plan after LimitAggregation_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan after LimitPushPastWindows_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan after LimitPushdown_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan after ProjectionPushdown_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan after EnsureCooperative_| CooperativeExec_|
|
||||
|
||||
@@ -775,8 +775,8 @@ LIMIT 5;
|
||||
| | SubqueryAlias: l |
|
||||
| | TableScan: labels |
|
||||
| | ]] |
|
||||
| physical_plan | SortPreservingMergeExec: [ts@0 ASC NULLS LAST, host@2 ASC NULLS LAST, avg_value@1 ASC NULLS LAST], fetch=5 |
|
||||
| | SortExec: TopK(fetch=5), expr=[ts@0 ASC NULLS LAST, host@2 ASC NULLS LAST, avg_value@1 ASC NULLS LAST], preserve_REDACTED
|
||||
| physical_plan | SortPreservingMergeExec: [ts@0 ASC NULLS LAST, avg_value@1 ASC NULLS LAST], fetch=5 |
|
||||
| | SortExec: TopK(fetch=5), expr=[ts@0 ASC NULLS LAST, avg_value@1 ASC NULLS LAST], preserve_REDACTED
|
||||
| | ProjectionExec: expr=[ts@0 as ts, cpu@1 as avg_value, host@2 as host] |
|
||||
| | CoalesceBatchesExec: target_batch_size=8192 |
|
||||
| | HashJoinExec: mode=Partitioned, join_type=Inner, on=[(date_trunc(Utf8("second"),t.ts)@2, date_trunc(Utf8("second"),l.ts)@2)], projection=[ts@0, cpu@1, host@4] |
|
||||
@@ -861,8 +861,8 @@ LIMIT 5;
|
||||
| | SubqueryAlias: l |
|
||||
| | TableScan: labels |
|
||||
| | ]] |
|
||||
| physical_plan | SortPreservingMergeExec: [ts@0 ASC NULLS LAST, host@2 ASC NULLS LAST, avg_value@1 ASC NULLS LAST], fetch=5 |
|
||||
| | SortExec: TopK(fetch=5), expr=[ts@0 ASC NULLS LAST, host@2 ASC NULLS LAST, avg_value@1 ASC NULLS LAST], preserve_REDACTED
|
||||
| physical_plan | SortPreservingMergeExec: [ts@0 ASC NULLS LAST, avg_value@1 ASC NULLS LAST], fetch=5 |
|
||||
| | SortExec: TopK(fetch=5), expr=[ts@0 ASC NULLS LAST, avg_value@1 ASC NULLS LAST], preserve_REDACTED
|
||||
| | ProjectionExec: expr=[ts@1 as ts, cpu@0 as avg_value, host@2 as host] |
|
||||
| | CoalesceBatchesExec: target_batch_size=8192 |
|
||||
| | HashJoinExec: mode=Partitioned, join_type=Inner, on=[(date_trunc(Utf8("second"),t.ts)@2, date_trunc(Utf8("second"),l.ts)@2)], projection=[cpu@0, ts@1, host@4] |
|
||||
|
||||
@@ -23,14 +23,14 @@ Affected Rows: 1
|
||||
|
||||
SELECT LENGTH(a) FROM test ORDER BY 1;
|
||||
|
||||
+--------------------------+
|
||||
| character_length(test.a) |
|
||||
+--------------------------+
|
||||
| 10 |
|
||||
| 100 |
|
||||
| 1000 |
|
||||
| 10000 |
|
||||
+--------------------------+
|
||||
+----------------+
|
||||
| length(test.a) |
|
||||
+----------------+
|
||||
| 10 |
|
||||
| 100 |
|
||||
| 1000 |
|
||||
| 10000 |
|
||||
+----------------+
|
||||
|
||||
DROP TABLE test;
|
||||
|
||||
|
||||
@@ -33,11 +33,11 @@ Affected Rows: 1
|
||||
-- verify that the append worked
|
||||
SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
|
||||
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| count(*) | count(bigtable.a) | max(character_length(bigtable.a)) | sum(character_length(bigtable.a)) |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| 1 | 1 | 10000 | 10000 |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| count(*) | count(bigtable.a) | max(length(bigtable.a)) | sum(length(bigtable.a)) |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| 1 | 1 | 10000 | 10000 |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
|
||||
-- we create a total of 16K entries in the big table
|
||||
-- the total size of this table is 16K*10K = 160MB
|
||||
@@ -48,11 +48,11 @@ Affected Rows: 1
|
||||
|
||||
SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
|
||||
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| count(*) | count(bigtable.a) | max(character_length(bigtable.a)) | sum(character_length(bigtable.a)) |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| 2 | 2 | 10000 | 20000 |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| count(*) | count(bigtable.a) | max(length(bigtable.a)) | sum(length(bigtable.a)) |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| 2 | 2 | 10000 | 20000 |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
|
||||
INSERT INTO bigtable SELECT a, to_unixtime(ts) * 23 FROM bigtable;
|
||||
|
||||
@@ -60,11 +60,11 @@ Affected Rows: 2
|
||||
|
||||
SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
|
||||
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| count(*) | count(bigtable.a) | max(character_length(bigtable.a)) | sum(character_length(bigtable.a)) |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| 4 | 4 | 10000 | 40000 |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| count(*) | count(bigtable.a) | max(length(bigtable.a)) | sum(length(bigtable.a)) |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| 4 | 4 | 10000 | 40000 |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
|
||||
INSERT INTO bigtable SELECT a, to_unixtime(ts) * 31 FROM bigtable;
|
||||
|
||||
@@ -72,11 +72,11 @@ Affected Rows: 4
|
||||
|
||||
SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
|
||||
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| count(*) | count(bigtable.a) | max(character_length(bigtable.a)) | sum(character_length(bigtable.a)) |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| 8 | 8 | 10000 | 80000 |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| count(*) | count(bigtable.a) | max(length(bigtable.a)) | sum(length(bigtable.a)) |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| 8 | 8 | 10000 | 80000 |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
|
||||
INSERT INTO bigtable SELECT a, to_unixtime(ts) * 37 FROM bigtable;
|
||||
|
||||
@@ -84,11 +84,11 @@ Affected Rows: 8
|
||||
|
||||
SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
|
||||
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| count(*) | count(bigtable.a) | max(character_length(bigtable.a)) | sum(character_length(bigtable.a)) |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| 16 | 16 | 10000 | 160000 |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| count(*) | count(bigtable.a) | max(length(bigtable.a)) | sum(length(bigtable.a)) |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| 16 | 16 | 10000 | 160000 |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
|
||||
INSERT INTO bigtable SELECT a, to_unixtime(ts) * 41 FROM bigtable;
|
||||
|
||||
@@ -96,11 +96,11 @@ Affected Rows: 16
|
||||
|
||||
SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
|
||||
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| count(*) | count(bigtable.a) | max(character_length(bigtable.a)) | sum(character_length(bigtable.a)) |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| 32 | 32 | 10000 | 320000 |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| count(*) | count(bigtable.a) | max(length(bigtable.a)) | sum(length(bigtable.a)) |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| 32 | 32 | 10000 | 320000 |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
|
||||
INSERT INTO bigtable SELECT a, to_unixtime(ts) * 47 FROM bigtable;
|
||||
|
||||
@@ -108,11 +108,11 @@ Affected Rows: 32
|
||||
|
||||
SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
|
||||
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| count(*) | count(bigtable.a) | max(character_length(bigtable.a)) | sum(character_length(bigtable.a)) |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| 64 | 64 | 10000 | 640000 |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| count(*) | count(bigtable.a) | max(length(bigtable.a)) | sum(length(bigtable.a)) |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| 64 | 64 | 10000 | 640000 |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
|
||||
INSERT INTO bigtable SELECT a, to_unixtime(ts) * 51 FROM bigtable;
|
||||
|
||||
@@ -120,11 +120,11 @@ Affected Rows: 64
|
||||
|
||||
SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
|
||||
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| count(*) | count(bigtable.a) | max(character_length(bigtable.a)) | sum(character_length(bigtable.a)) |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| 128 | 128 | 10000 | 1280000 |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| count(*) | count(bigtable.a) | max(length(bigtable.a)) | sum(length(bigtable.a)) |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| 128 | 128 | 10000 | 1280000 |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
|
||||
INSERT INTO bigtable SELECT a, to_unixtime(ts) * 53 FROM bigtable;
|
||||
|
||||
@@ -132,11 +132,11 @@ Affected Rows: 128
|
||||
|
||||
SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
|
||||
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| count(*) | count(bigtable.a) | max(character_length(bigtable.a)) | sum(character_length(bigtable.a)) |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| 256 | 256 | 10000 | 2560000 |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| count(*) | count(bigtable.a) | max(length(bigtable.a)) | sum(length(bigtable.a)) |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| 256 | 256 | 10000 | 2560000 |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
|
||||
INSERT INTO bigtable SELECT a, to_unixtime(ts) * 57 FROM bigtable;
|
||||
|
||||
@@ -144,11 +144,11 @@ Affected Rows: 256
|
||||
|
||||
SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
|
||||
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| count(*) | count(bigtable.a) | max(character_length(bigtable.a)) | sum(character_length(bigtable.a)) |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| 512 | 512 | 10000 | 5120000 |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| count(*) | count(bigtable.a) | max(length(bigtable.a)) | sum(length(bigtable.a)) |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| 512 | 512 | 10000 | 5120000 |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
|
||||
INSERT INTO bigtable SELECT a, to_unixtime(ts) * 61 FROM bigtable;
|
||||
|
||||
@@ -156,11 +156,11 @@ Affected Rows: 512
|
||||
|
||||
SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
|
||||
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| count(*) | count(bigtable.a) | max(character_length(bigtable.a)) | sum(character_length(bigtable.a)) |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| 1024 | 1024 | 10000 | 10240000 |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| count(*) | count(bigtable.a) | max(length(bigtable.a)) | sum(length(bigtable.a)) |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| 1024 | 1024 | 10000 | 10240000 |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
|
||||
INSERT INTO bigtable SELECT a, to_unixtime(ts) * 63 FROM bigtable;
|
||||
|
||||
@@ -168,20 +168,20 @@ Affected Rows: 1024
|
||||
|
||||
SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
|
||||
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| count(*) | count(bigtable.a) | max(character_length(bigtable.a)) | sum(character_length(bigtable.a)) |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| 2048 | 2048 | 10000 | 20480000 |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| count(*) | count(bigtable.a) | max(length(bigtable.a)) | sum(length(bigtable.a)) |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| 2048 | 2048 | 10000 | 20480000 |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
|
||||
-- SQLNESS ARG restart=true
|
||||
SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
|
||||
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| count(*) | count(bigtable.a) | max(character_length(bigtable.a)) | sum(character_length(bigtable.a)) |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| 2048 | 2048 | 10000 | 20480000 |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| count(*) | count(bigtable.a) | max(length(bigtable.a)) | sum(length(bigtable.a)) |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| 2048 | 2048 | 10000 | 20480000 |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
|
||||
INSERT INTO bigtable SELECT a, to_unixtime(ts) * 67 FROM bigtable;
|
||||
|
||||
@@ -189,11 +189,11 @@ Affected Rows: 2048
|
||||
|
||||
SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
|
||||
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| count(*) | count(bigtable.a) | max(character_length(bigtable.a)) | sum(character_length(bigtable.a)) |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| 4096 | 4096 | 10000 | 40960000 |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| count(*) | count(bigtable.a) | max(length(bigtable.a)) | sum(length(bigtable.a)) |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| 4096 | 4096 | 10000 | 40960000 |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
|
||||
INSERT INTO bigtable SELECT a, to_unixtime(ts) * 71 FROM bigtable;
|
||||
|
||||
@@ -201,11 +201,11 @@ Affected Rows: 4096
|
||||
|
||||
SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
|
||||
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| count(*) | count(bigtable.a) | max(character_length(bigtable.a)) | sum(character_length(bigtable.a)) |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
| 8192 | 8192 | 10000 | 81920000 |
|
||||
+----------+-------------------+-----------------------------------+-----------------------------------+
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| count(*) | count(bigtable.a) | max(length(bigtable.a)) | sum(length(bigtable.a)) |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
| 8192 | 8192 | 10000 | 81920000 |
|
||||
+----------+-------------------+-------------------------+-------------------------+
|
||||
|
||||
DROP TABLE test;
|
||||
|
||||
|
||||
@@ -58,12 +58,12 @@ SELECT substr('🦤🦆f', 1, 2);
|
||||
-- length on emojis
|
||||
SELECT length(s) FROM emojis ORDER BY id;
|
||||
|
||||
+----------------------------+
|
||||
| character_length(emojis.s) |
|
||||
+----------------------------+
|
||||
| 1 |
|
||||
| 3 |
|
||||
+----------------------------+
|
||||
+------------------+
|
||||
| length(emojis.s) |
|
||||
+------------------+
|
||||
| 1 |
|
||||
| 3 |
|
||||
+------------------+
|
||||
|
||||
DROP TABLE emojis;
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ Error: 2000(InvalidSyntax), Invalid SQL syntax: sql parser error: Expected: AS,
|
||||
|
||||
CREATE VIEW test_view as DELETE FROM public.numbers;
|
||||
|
||||
Error: 2000(InvalidSyntax), Invalid SQL syntax: sql parser error: Expected: SELECT, VALUES, or a subquery in the query body, found: DELETE at Line: 1, Column: 26
|
||||
Error: 1001(Unsupported), Failed to plan SQL: This feature is not implemented: Query DELETE FROM public.numbers not implemented yet
|
||||
|
||||
--- Table already exists ---
|
||||
CREATE VIEW test_table as SELECT * FROM public.numbers;
|
||||
|
||||
Reference in New Issue
Block a user