diff --git a/src/common/function/src/scalars/math/rate.rs b/src/common/function/src/scalars/math/rate.rs index e296fb9496..cbe4c92550 100644 --- a/src/common/function/src/scalars/math/rate.rs +++ b/src/common/function/src/scalars/math/rate.rs @@ -37,7 +37,7 @@ impl fmt::Display for RateFunction { impl Function for RateFunction { fn name(&self) -> &str { - "prom_rate" + "rate" } fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result { @@ -82,7 +82,7 @@ mod tests { #[test] fn test_rate_function() { let rate = RateFunction; - assert_eq!("prom_rate", rate.name()); + assert_eq!("rate", rate.name()); assert_eq!( ConcreteDataType::float64_datatype(), rate.return_type(&[]).unwrap() diff --git a/src/datatypes/src/vectors/dictionary.rs b/src/datatypes/src/vectors/dictionary.rs index e6831d2ed7..07994d13bd 100644 --- a/src/datatypes/src/vectors/dictionary.rs +++ b/src/datatypes/src/vectors/dictionary.rs @@ -16,8 +16,8 @@ use std::any::Any; use std::sync::Arc; use arrow::array::Array; -use arrow::datatypes::Int32Type; -use arrow_array::{ArrayRef, DictionaryArray, Int32Array}; +use arrow::datatypes::Int64Type; +use arrow_array::{ArrayRef, DictionaryArray, Int64Array}; use serde_json::Value as JsonValue; use snafu::ResultExt; @@ -32,7 +32,7 @@ use crate::vectors::{self, Helper, Validity, Vector, VectorRef}; /// Vector of dictionaries, basically backed by Arrow's `DictionaryArray`. #[derive(Debug, PartialEq)] pub struct DictionaryVector { - array: DictionaryArray, + array: DictionaryArray, /// The datatype of the items in the dictionary. item_type: ConcreteDataType, /// The vector of items in the dictionary. @@ -41,7 +41,7 @@ pub struct DictionaryVector { impl DictionaryVector { /// Create a new instance of `DictionaryVector` from a dictionary array and item type - pub fn new(array: DictionaryArray, item_type: ConcreteDataType) -> Result { + pub fn new(array: DictionaryArray, item_type: ConcreteDataType) -> Result { let item_vector = Helper::try_into_vector(array.values())?; Ok(Self { @@ -52,12 +52,12 @@ impl DictionaryVector { } /// Returns the underlying Arrow dictionary array - pub fn array(&self) -> &DictionaryArray { + pub fn array(&self) -> &DictionaryArray { &self.array } /// Returns the keys array of this dictionary - pub fn keys(&self) -> &arrow_array::PrimitiveArray { + pub fn keys(&self) -> &arrow_array::PrimitiveArray { self.array.keys() } @@ -74,7 +74,7 @@ impl DictionaryVector { impl Vector for DictionaryVector { fn data_type(&self) -> ConcreteDataType { ConcreteDataType::Dictionary(DictionaryType::new( - ConcreteDataType::int32_datatype(), + ConcreteDataType::int64_datatype(), self.item_type.clone(), )) } @@ -163,10 +163,10 @@ impl Serializable for DictionaryVector { } } -impl TryFrom> for DictionaryVector { +impl TryFrom> for DictionaryVector { type Error = crate::error::Error; - fn try_from(array: DictionaryArray) -> Result { + fn try_from(array: DictionaryArray) -> Result { let item_type = ConcreteDataType::from_arrow_type(array.values().data_type()); let item_vector = Helper::try_into_vector(array.values())?; @@ -243,7 +243,7 @@ impl VectorOp for DictionaryVector { previous_offset = offset; } - let new_keys = Int32Array::from(replicated_keys); + let new_keys = Int64Array::from(replicated_keys); let new_array = DictionaryArray::try_new(new_keys, self.values().clone()) .expect("Failed to create replicated dictionary array"); @@ -261,7 +261,7 @@ impl VectorOp for DictionaryVector { let filtered_key_array = filtered_key_vector.to_arrow_array(); let filtered_key_array = filtered_key_array .as_any() - .downcast_ref::() + .downcast_ref::() .unwrap(); let new_array = DictionaryArray::try_new(filtered_key_array.clone(), self.values().clone()) @@ -291,7 +291,7 @@ impl VectorOp for DictionaryVector { let key_vector = Helper::try_into_vector(&key_array)?; let new_key_vector = key_vector.take(indices)?; let new_key_array = new_key_vector.to_arrow_array(); - let new_key_array = new_key_array.as_any().downcast_ref::().unwrap(); + let new_key_array = new_key_array.as_any().downcast_ref::().unwrap(); let new_array = DictionaryArray::try_new(new_key_array.clone(), self.values().clone()) .expect("Failed to create filtered dictionary array"); @@ -318,7 +318,7 @@ mod tests { // Keys: [0, 1, 2, null, 1, 3] // Resulting in: ["a", "b", "c", null, "b", "d"] let values = StringArray::from(vec!["a", "b", "c", "d"]); - let keys = Int32Array::from(vec![Some(0), Some(1), Some(2), None, Some(1), Some(3)]); + let keys = Int64Array::from(vec![Some(0), Some(1), Some(2), None, Some(1), Some(3)]); let dict_array = DictionaryArray::new(keys, Arc::new(values)); DictionaryVector::try_from(dict_array).unwrap() } @@ -404,7 +404,7 @@ mod tests { assert_eq!( casted.data_type(), ConcreteDataType::Dictionary(DictionaryType::new( - ConcreteDataType::int32_datatype(), + ConcreteDataType::int64_datatype(), ConcreteDataType::string_datatype(), )) ); diff --git a/src/datatypes/src/vectors/helper.rs b/src/datatypes/src/vectors/helper.rs index cb8c8972a7..4e23d56809 100644 --- a/src/datatypes/src/vectors/helper.rs +++ b/src/datatypes/src/vectors/helper.rs @@ -20,7 +20,7 @@ use std::sync::Arc; use arrow::array::{Array, ArrayRef, StringArray}; use arrow::compute; use arrow::compute::kernels::comparison; -use arrow::datatypes::{DataType as ArrowDataType, Int32Type, TimeUnit}; +use arrow::datatypes::{DataType as ArrowDataType, Int64Type, TimeUnit}; use arrow_array::DictionaryArray; use arrow_schema::IntervalUnit; use datafusion_common::ScalarValue; @@ -348,11 +348,11 @@ impl Helper { ArrowDataType::Decimal128(_, _) => { Arc::new(Decimal128Vector::try_from_arrow_array(array)?) } - ArrowDataType::Dictionary(key, value) if matches!(&**key, ArrowDataType::Int32) => { + ArrowDataType::Dictionary(key, value) if matches!(&**key, ArrowDataType::Int64) => { let array = array .as_ref() .as_any() - .downcast_ref::>() + .downcast_ref::>() .unwrap(); // Safety: the type is guarded by match arm condition Arc::new(DictionaryVector::new( array.clone(), diff --git a/src/mito2/src/read/seq_scan.rs b/src/mito2/src/read/seq_scan.rs index ee56fd9d40..76c30a45ff 100644 --- a/src/mito2/src/read/seq_scan.rs +++ b/src/mito2/src/read/seq_scan.rs @@ -206,6 +206,14 @@ impl SeqScan { .build(), )); } + if self.properties.partitions[partition].is_empty() { + return Ok(Box::pin(RecordBatchStreamWrapper::new( + self.stream_ctx.input.mapper.output_schema(), + common_recordbatch::EmptyRecordBatchStream::new( + self.stream_ctx.input.mapper.output_schema(), + ), + ))); + } if self.stream_ctx.input.distribution == Some(TimeSeriesDistribution::PerSeries) { return self.scan_partition_by_series(metrics_set, partition); diff --git a/src/promql/src/extension_plan/instant_manipulate.rs b/src/promql/src/extension_plan/instant_manipulate.rs index 1071e94f9a..a3339a4e95 100644 --- a/src/promql/src/extension_plan/instant_manipulate.rs +++ b/src/promql/src/extension_plan/instant_manipulate.rs @@ -91,9 +91,9 @@ impl UserDefinedLogicalNodeCore for InstantManipulate { _exprs: Vec, inputs: Vec, ) -> DataFusionResult { - if inputs.is_empty() { + if inputs.len() != 1 { return Err(DataFusionError::Internal( - "InstantManipulate should have at least one input".to_string(), + "InstantManipulate should have exact one input".to_string(), )); } @@ -354,6 +354,9 @@ impl Stream for InstantManipulateStream { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let poll = match ready!(self.input.poll_next_unpin(cx)) { Some(Ok(batch)) => { + if batch.num_rows() == 0 { + return Poll::Pending; + } let timer = std::time::Instant::now(); self.num_series.add(1); let result = Ok(batch).and_then(|batch| self.manipulate(batch)); diff --git a/src/promql/src/extension_plan/range_manipulate.rs b/src/promql/src/extension_plan/range_manipulate.rs index 1e1cac3555..7af75b0458 100644 --- a/src/promql/src/extension_plan/range_manipulate.rs +++ b/src/promql/src/extension_plan/range_manipulate.rs @@ -42,7 +42,7 @@ use greptime_proto::substrait_extension as pb; use prost::Message; use snafu::ResultExt; -use crate::error::{DataFusionPlanningSnafu, DeserializeSnafu, Result}; +use crate::error::{DeserializeSnafu, Result}; use crate::extension_plan::{Millisecond, METRIC_NUM_SERIES}; use crate::metrics::PROMQL_SERIES_COUNT; use crate::range_array::RangeArray; @@ -194,20 +194,26 @@ impl RangeManipulate { pub fn deserialize(bytes: &[u8]) -> Result { let pb_range_manipulate = pb::RangeManipulate::decode(bytes).context(DeserializeSnafu)?; + let empty_schema = Arc::new(DFSchema::empty()); let placeholder_plan = LogicalPlan::EmptyRelation(EmptyRelation { produce_one_row: false, - schema: Arc::new(DFSchema::empty()), + schema: empty_schema.clone(), }); - Self::new( - pb_range_manipulate.start, - pb_range_manipulate.end, - pb_range_manipulate.interval, - pb_range_manipulate.range, - pb_range_manipulate.time_index, - pb_range_manipulate.tag_columns, - placeholder_plan, - ) - .context(DataFusionPlanningSnafu) + + // Unlike `Self::new()`, this method doesn't check the input schema as it will fail + // because the input schema is empty. + // But this is Ok since datafusion guarantees to call `with_exprs_and_inputs` for the + // deserialized plan. + Ok(Self { + start: pb_range_manipulate.start, + end: pb_range_manipulate.end, + interval: pb_range_manipulate.interval, + range: pb_range_manipulate.range, + time_index: pb_range_manipulate.time_index, + field_columns: pb_range_manipulate.tag_columns, + input: placeholder_plan, + output_schema: empty_schema, + }) } } @@ -270,14 +276,19 @@ impl UserDefinedLogicalNodeCore for RangeManipulate { fn with_exprs_and_inputs( &self, _exprs: Vec, - inputs: Vec, + mut inputs: Vec, ) -> DataFusionResult { - if inputs.is_empty() { + if inputs.len() != 1 { return Err(DataFusionError::Internal( - "RangeManipulate should have at least one input".to_string(), + "RangeManipulate should have at exact one input".to_string(), )); } + let input: LogicalPlan = inputs.pop().unwrap(); + let input_schema = input.schema(); + let output_schema = + Self::calculate_output_schema(input_schema, &self.time_index, &self.field_columns)?; + Ok(Self { start: self.start, end: self.end, @@ -285,8 +296,8 @@ impl UserDefinedLogicalNodeCore for RangeManipulate { range: self.range, time_index: self.time_index.clone(), field_columns: self.field_columns.clone(), - input: inputs.into_iter().next().unwrap(), - output_schema: self.output_schema.clone(), + input, + output_schema, }) } } diff --git a/src/promql/src/extension_plan/series_divide.rs b/src/promql/src/extension_plan/series_divide.rs index 06ef942762..36e1c10f42 100644 --- a/src/promql/src/extension_plan/series_divide.rs +++ b/src/promql/src/extension_plan/series_divide.rs @@ -106,6 +106,10 @@ impl SeriesDivide { }) } + pub fn tags(&self) -> &[String] { + &self.tag_columns + } + pub fn serialize(&self) -> Vec { pb::SeriesDivide { tag_columns: self.tag_columns.clone(), @@ -315,7 +319,9 @@ impl Stream for SeriesDivideStream { let next_batch = ready!(self.as_mut().fetch_next_batch(cx)).transpose()?; let timer = std::time::Instant::now(); if let Some(next_batch) = next_batch { - self.buffer.push(next_batch); + if next_batch.num_rows() != 0 { + self.buffer.push(next_batch); + } continue; } else { // input stream is ended diff --git a/src/promql/src/functions.rs b/src/promql/src/functions.rs index 81a9c9cedb..fee6387d20 100644 --- a/src/promql/src/functions.rs +++ b/src/promql/src/functions.rs @@ -40,7 +40,7 @@ pub use holt_winters::HoltWinters; pub use idelta::IDelta; pub use predict_linear::PredictLinear; pub use quantile::QuantileOverTime; -pub use quantile_aggr::quantile_udaf; +pub use quantile_aggr::{quantile_udaf, QUANTILE_NAME}; pub use resets::Resets; pub use round::Round; diff --git a/src/promql/src/functions/extrapolate_rate.rs b/src/promql/src/functions/extrapolate_rate.rs index bdcd6a2b1b..aadaab904c 100644 --- a/src/promql/src/functions/extrapolate_rate.rs +++ b/src/promql/src/functions/extrapolate_rate.rs @@ -228,7 +228,7 @@ impl ExtrapolatedRate { - pub fn name() -> &'static str { + pub const fn name() -> &'static str { "prom_delta" } @@ -239,7 +239,7 @@ impl ExtrapolatedRate { // rate impl ExtrapolatedRate { - pub fn name() -> &'static str { + pub const fn name() -> &'static str { "prom_rate" } @@ -250,7 +250,7 @@ impl ExtrapolatedRate { // increase impl ExtrapolatedRate { - pub fn name() -> &'static str { + pub const fn name() -> &'static str { "prom_increase" } diff --git a/src/promql/src/functions/quantile_aggr.rs b/src/promql/src/functions/quantile_aggr.rs index 08d18c8c4f..5652f57342 100644 --- a/src/promql/src/functions/quantile_aggr.rs +++ b/src/promql/src/functions/quantile_aggr.rs @@ -27,7 +27,7 @@ use datatypes::arrow::datatypes::{DataType, Field, Float64Type}; use crate::functions::quantile::quantile_impl; -const QUANTILE_NAME: &str = "quantile"; +pub const QUANTILE_NAME: &str = "quantile"; const VALUES_FIELD_NAME: &str = "values"; const DEFAULT_LIST_FIELD_NAME: &str = "item"; diff --git a/src/query/src/dist_plan/commutativity.rs b/src/query/src/dist_plan/commutativity.rs index 5b3cb0f2db..59871e991c 100644 --- a/src/query/src/dist_plan/commutativity.rs +++ b/src/query/src/dist_plan/commutativity.rs @@ -55,12 +55,16 @@ impl Categorizer { LogicalPlan::Filter(filter) => Self::check_expr(&filter.predicate), LogicalPlan::Window(_) => Commutativity::Unimplemented, LogicalPlan::Aggregate(aggr) => { - if Self::check_partition(&aggr.group_expr, &partition_cols) { - return Commutativity::Commutative; + if !Self::check_partition(&aggr.group_expr, &partition_cols) { + return Commutativity::NonCommutative; } - - // check all children exprs and uses the strictest level - Commutativity::Unimplemented + for expr in &aggr.aggr_expr { + let commutativity = Self::check_expr(expr); + if !matches!(commutativity, Commutativity::Commutative) { + return commutativity; + } + } + Commutativity::Commutative } LogicalPlan::Sort(_) => { if partition_cols.is_empty() { @@ -94,7 +98,7 @@ impl Categorizer { } } LogicalPlan::Extension(extension) => { - Self::check_extension_plan(extension.node.as_ref() as _) + Self::check_extension_plan(extension.node.as_ref() as _, &partition_cols) } LogicalPlan::Distinct(_) => { if partition_cols.is_empty() { @@ -116,13 +120,30 @@ impl Categorizer { } } - pub fn check_extension_plan(plan: &dyn UserDefinedLogicalNode) -> Commutativity { + pub fn check_extension_plan( + plan: &dyn UserDefinedLogicalNode, + partition_cols: &[String], + ) -> Commutativity { match plan.name() { - name if name == EmptyMetric::name() + name if name == SeriesDivide::name() => { + let series_divide = plan.as_any().downcast_ref::().unwrap(); + let tags = series_divide.tags().iter().collect::>(); + for partition_col in partition_cols { + if !tags.contains(partition_col) { + return Commutativity::NonCommutative; + } + } + Commutativity::Commutative + } + name if name == SeriesNormalize::name() || name == InstantManipulate::name() - || name == SeriesNormalize::name() - || name == RangeManipulate::name() - || name == SeriesDivide::name() + || name == RangeManipulate::name() => + { + // They should always follows Series Divide. + // Either all commutative or all non-commutative (which will be blocked by SeriesDivide). + Commutativity::Commutative + } + name if name == EmptyMetric::name() || name == MergeScanLogicalPlan::name() || name == MergeSortLogicalPlan::name() => { @@ -148,8 +169,9 @@ impl Categorizer { | Expr::Negative(_) | Expr::Between(_) | Expr::Exists(_) - | Expr::InList(_) - | Expr::ScalarFunction(_) => Commutativity::Commutative, + | Expr::InList(_) => Commutativity::Commutative, + Expr::ScalarFunction(_udf) => Commutativity::Commutative, + Expr::AggregateFunction(_udaf) => Commutativity::Commutative, Expr::Like(_) | Expr::SimilarTo(_) @@ -158,7 +180,6 @@ impl Categorizer { | Expr::Case(_) | Expr::Cast(_) | Expr::TryCast(_) - | Expr::AggregateFunction(_) | Expr::WindowFunction(_) | Expr::InSubquery(_) | Expr::ScalarSubquery(_) diff --git a/src/query/src/optimizer/parallelize_scan.rs b/src/query/src/optimizer/parallelize_scan.rs index b6d22f7de5..b60259f93f 100644 --- a/src/query/src/optimizer/parallelize_scan.rs +++ b/src/query/src/optimizer/parallelize_scan.rs @@ -23,7 +23,6 @@ use datafusion::physical_plan::ExecutionPlan; use datafusion_common::tree_node::{Transformed, TreeNode}; use datafusion_common::{DataFusionError, Result}; use store_api::region_engine::PartitionRange; -use store_api::storage::TimeSeriesDistribution; use table::table::scan::RegionScanExec; #[derive(Debug)] @@ -66,14 +65,6 @@ impl ParallelizeScan { return Ok(Transformed::no(plan)); } - // don't parallelize if we want per series distribution - if matches!( - region_scan_exec.distribution(), - Some(TimeSeriesDistribution::PerSeries) - ) { - return Ok(Transformed::no(plan)); - } - let ranges = region_scan_exec.get_partition_ranges(); let total_range_num = ranges.len(); let expected_partition_num = config.execution.target_partitions; @@ -121,36 +112,25 @@ impl ParallelizeScan { /// Distribute [`PartitionRange`]s to each partition. /// /// Currently we assign ranges to partitions according to their rows so each partition - /// has similar number of rows. - /// This method may return partitions with smaller number than `expected_partition_num` - /// if the number of ranges is smaller than `expected_partition_num`. But this will - /// return at least one partition. + /// has similar number of rows. This method always return `expected_partition_num` partitions. fn assign_partition_range( mut ranges: Vec, expected_partition_num: usize, ) -> Vec> { if ranges.is_empty() { // Returns a single partition with no range. - return vec![vec![]]; + return vec![vec![]; expected_partition_num]; } if ranges.len() == 1 { - return vec![ranges]; + let mut vec = vec![vec![]; expected_partition_num]; + vec[0] = ranges; + return vec; } // Sort ranges by number of rows in descending order. ranges.sort_by(|a, b| b.num_rows.cmp(&a.num_rows)); - // Get the max row number of the ranges. Note that the number of rows may be 0 if statistics are not available. - let max_rows = ranges[0].num_rows; - let total_rows = ranges.iter().map(|range| range.num_rows).sum::(); - // Computes the partition num by the max row number. This eliminates the unbalance of the partitions. - let balanced_partition_num = if max_rows > 0 { - total_rows.div_ceil(max_rows) - } else { - ranges.len() - }; - let actual_partition_num = expected_partition_num.min(balanced_partition_num).max(1); - let mut partition_ranges = vec![vec![]; actual_partition_num]; + let mut partition_ranges = vec![vec![]; expected_partition_num]; #[derive(Eq, PartialEq)] struct HeapNode { @@ -172,14 +152,13 @@ impl ParallelizeScan { } let mut part_heap = - BinaryHeap::from_iter((0..actual_partition_num).map(|partition_idx| HeapNode { + BinaryHeap::from_iter((0..expected_partition_num).map(|partition_idx| HeapNode { num_rows: 0, partition_idx, })); // Assigns the range to the partition with the smallest number of rows. for range in ranges { - // Safety: actual_partition_num always > 0. let mut node = part_heap.pop().unwrap(); let partition_idx = node.partition_idx; node.num_rows += range.num_rows; @@ -263,7 +242,7 @@ mod test { ]; assert_eq!(result, expected); - // assign 4 ranges to 5 partitions. Only 4 partitions are returned. + // assign 4 ranges to 5 partitions. let expected_partition_num = 5; let result = ParallelizeScan::assign_partition_range(ranges, expected_partition_num); let expected = vec![ @@ -273,32 +252,31 @@ mod test { num_rows: 250, identifier: 4, }], + vec![PartitionRange { + start: Timestamp::new(0, TimeUnit::Second), + end: Timestamp::new(10, TimeUnit::Second), + num_rows: 100, + identifier: 1, + }], vec![PartitionRange { start: Timestamp::new(10, TimeUnit::Second), end: Timestamp::new(20, TimeUnit::Second), num_rows: 200, identifier: 2, }], - vec![ - PartitionRange { - start: Timestamp::new(20, TimeUnit::Second), - end: Timestamp::new(30, TimeUnit::Second), - num_rows: 150, - identifier: 3, - }, - PartitionRange { - start: Timestamp::new(0, TimeUnit::Second), - end: Timestamp::new(10, TimeUnit::Second), - num_rows: 100, - identifier: 1, - }, - ], + vec![], + vec![PartitionRange { + start: Timestamp::new(20, TimeUnit::Second), + end: Timestamp::new(30, TimeUnit::Second), + num_rows: 150, + identifier: 3, + }], ]; assert_eq!(result, expected); - // assign 0 ranges to 5 partitions. Only 1 partition is returned. + // assign 0 ranges to 5 partitions. Should return 5 empty ranges. let result = ParallelizeScan::assign_partition_range(vec![], 5); - assert_eq!(result.len(), 1); + assert_eq!(result.len(), 5); } #[test] diff --git a/src/query/src/promql/planner.rs b/src/query/src/promql/planner.rs index 2869a2201b..7c094e4d69 100644 --- a/src/query/src/promql/planner.rs +++ b/src/query/src/promql/planner.rs @@ -1740,7 +1740,7 @@ impl PromPlanner { // regexp_replace(src_label, regex, replacement) let args = vec![ if src_label.is_empty() { - DfExpr::Literal(ScalarValue::Null) + DfExpr::Literal(ScalarValue::Utf8(Some(String::new()))) } else { DfExpr::Column(Column::from_name(src_label)) }, diff --git a/src/query/src/query_engine/default_serializer.rs b/src/query/src/query_engine/default_serializer.rs index a7e5ff2ca1..6045415a9e 100644 --- a/src/query/src/query_engine/default_serializer.rs +++ b/src/query/src/query_engine/default_serializer.rs @@ -29,6 +29,11 @@ use datafusion::execution::{FunctionRegistry, SessionStateBuilder}; use datafusion::logical_expr::LogicalPlan; use datafusion_expr::UserDefinedLogicalNode; use greptime_proto::substrait_extension::MergeScan as PbMergeScan; +use promql::functions::{ + quantile_udaf, AbsentOverTime, AvgOverTime, Changes, CountOverTime, Delta, Deriv, IDelta, + Increase, LastOverTime, MaxOverTime, MinOverTime, PresentOverTime, Rate, Resets, Round, + StddevOverTime, StdvarOverTime, SumOverTime, +}; use prost::Message; use session::context::QueryContextRef; use snafu::ResultExt; @@ -133,6 +138,28 @@ impl SubstraitPlanDecoder for DefaultPlanDecoder { let _ = session_state.register_udaf(Arc::new(HllState::state_udf_impl())); let _ = session_state.register_udaf(Arc::new(HllState::merge_udf_impl())); let _ = session_state.register_udaf(Arc::new(GeoPathAccumulator::udf_impl())); + let _ = session_state.register_udaf(quantile_udaf()); + + let _ = session_state.register_udf(Arc::new(IDelta::::scalar_udf())); + let _ = session_state.register_udf(Arc::new(IDelta::::scalar_udf())); + let _ = session_state.register_udf(Arc::new(Rate::scalar_udf())); + let _ = session_state.register_udf(Arc::new(Increase::scalar_udf())); + let _ = session_state.register_udf(Arc::new(Delta::scalar_udf())); + let _ = session_state.register_udf(Arc::new(Resets::scalar_udf())); + let _ = session_state.register_udf(Arc::new(Changes::scalar_udf())); + let _ = session_state.register_udf(Arc::new(Deriv::scalar_udf())); + let _ = session_state.register_udf(Arc::new(Round::scalar_udf())); + let _ = session_state.register_udf(Arc::new(AvgOverTime::scalar_udf())); + let _ = session_state.register_udf(Arc::new(MinOverTime::scalar_udf())); + let _ = session_state.register_udf(Arc::new(MaxOverTime::scalar_udf())); + let _ = session_state.register_udf(Arc::new(SumOverTime::scalar_udf())); + let _ = session_state.register_udf(Arc::new(CountOverTime::scalar_udf())); + let _ = session_state.register_udf(Arc::new(LastOverTime::scalar_udf())); + let _ = session_state.register_udf(Arc::new(AbsentOverTime::scalar_udf())); + let _ = session_state.register_udf(Arc::new(PresentOverTime::scalar_udf())); + let _ = session_state.register_udf(Arc::new(StddevOverTime::scalar_udf())); + let _ = session_state.register_udf(Arc::new(StdvarOverTime::scalar_udf())); + // TODO(ruihang): add quantile_over_time, predict_linear, holt_winters, round } let logical_plan = DFLogicalSubstraitConvertor .decode(message, session_state) diff --git a/src/query/src/query_engine/state.rs b/src/query/src/query_engine/state.rs index d55ab471f9..7acd38aa37 100644 --- a/src/query/src/query_engine/state.rs +++ b/src/query/src/query_engine/state.rs @@ -31,6 +31,7 @@ use datafusion::error::Result as DfResult; use datafusion::execution::context::{QueryPlanner, SessionConfig, SessionContext, SessionState}; use datafusion::execution::runtime_env::RuntimeEnv; use datafusion::execution::SessionStateBuilder; +use datafusion::physical_optimizer::enforce_sorting::EnforceSorting; use datafusion::physical_optimizer::optimizer::PhysicalOptimizer; use datafusion::physical_optimizer::sanity_checker::SanityCheckPlan; use datafusion::physical_optimizer::PhysicalOptimizerRule; @@ -142,6 +143,9 @@ impl QueryEngineState { physical_optimizer .rules .insert(1, Arc::new(PassDistribution)); + physical_optimizer + .rules + .insert(2, Arc::new(EnforceSorting {})); // Add rule for windowed sort physical_optimizer .rules diff --git a/src/store-api/src/region_engine.rs b/src/store-api/src/region_engine.rs index 5f6069d961..fc27c2ff10 100644 --- a/src/store-api/src/region_engine.rs +++ b/src/store-api/src/region_engine.rs @@ -22,9 +22,8 @@ use std::sync::{Arc, Mutex}; use api::greptime_proto::v1::meta::{GrantedRegion as PbGrantedRegion, RegionRole as PbRegionRole}; use api::region::RegionResponse; use async_trait::async_trait; -use common_error::ext::{BoxedError, PlainError}; -use common_error::status_code::StatusCode; -use common_recordbatch::SendableRecordBatchStream; +use common_error::ext::BoxedError; +use common_recordbatch::{EmptyRecordBatchStream, SendableRecordBatchStream}; use common_time::Timestamp; use datafusion_physical_plan::metrics::ExecutionPlanMetricsSet; use datafusion_physical_plan::{DisplayAs, DisplayFormatType}; @@ -834,12 +833,10 @@ impl RegionScanner for SinglePartitionScanner { _partition: usize, ) -> Result { let mut stream = self.stream.lock().unwrap(); - stream.take().ok_or_else(|| { - BoxedError::new(PlainError::new( - "Not expected to run ExecutionPlan more than once".to_string(), - StatusCode::Unexpected, - )) - }) + let result = stream + .take() + .or_else(|| Some(Box::pin(EmptyRecordBatchStream::new(self.schema.clone())))); + Ok(result.unwrap()) } fn has_predicate(&self) -> bool { diff --git a/src/table/src/lib.rs b/src/table/src/lib.rs index 64e72029b8..5fb800ab3c 100644 --- a/src/table/src/lib.rs +++ b/src/table/src/lib.rs @@ -14,6 +14,7 @@ #![feature(assert_matches)] #![feature(try_blocks)] +#![feature(let_chains)] pub mod dist_table; pub mod error; diff --git a/src/table/src/table/scan.rs b/src/table/src/table/scan.rs index cd69b22b5a..4b41da9c05 100644 --- a/src/table/src/table/scan.rs +++ b/src/table/src/table/scan.rs @@ -82,11 +82,17 @@ impl RegionScanExec { if scanner.properties().is_logical_region() { pk_names.sort_unstable(); } - let mut pk_columns: Vec = pk_names - .into_iter() + let pk_columns = pk_names + .iter() + .filter_map( + |col| Some(Arc::new(Column::new_with_schema(col, &arrow_schema).ok()?) as _), + ) + .collect::>(); + let mut pk_sort_columns: Vec = pk_names + .iter() .filter_map(|col| { Some(PhysicalSortExpr::new( - Arc::new(Column::new_with_schema(&col, &arrow_schema).ok()?) as _, + Arc::new(Column::new_with_schema(col, &arrow_schema).ok()?) as _, SortOptions { descending: false, nulls_first: true, @@ -113,28 +119,37 @@ impl RegionScanExec { let eq_props = match request.distribution { Some(TimeSeriesDistribution::PerSeries) => { if let Some(ts) = ts_col { - pk_columns.push(ts); + pk_sort_columns.push(ts); } EquivalenceProperties::new_with_orderings( arrow_schema.clone(), - &[LexOrdering::new(pk_columns)], + &[LexOrdering::new(pk_sort_columns)], ) } Some(TimeSeriesDistribution::TimeWindowed) => { if let Some(ts_col) = ts_col { - pk_columns.insert(0, ts_col); + pk_sort_columns.insert(0, ts_col); } EquivalenceProperties::new_with_orderings( arrow_schema.clone(), - &[LexOrdering::new(pk_columns)], + &[LexOrdering::new(pk_sort_columns)], ) } None => EquivalenceProperties::new(arrow_schema.clone()), }; + let partitioning = match request.distribution { + Some(TimeSeriesDistribution::PerSeries) => { + Partitioning::Hash(pk_columns.clone(), num_output_partition) + } + Some(TimeSeriesDistribution::TimeWindowed) | None => { + Partitioning::UnknownPartitioning(num_output_partition) + } + }; + let properties = PlanProperties::new( eq_props, - Partitioning::UnknownPartitioning(num_output_partition), + partitioning, EmissionType::Incremental, Boundedness::Bounded, ); @@ -188,9 +203,14 @@ impl RegionScanExec { warn!("Setting partition ranges more than once for RegionScanExec"); } - let num_partitions = partitions.len(); let mut properties = self.properties.clone(); - properties.partitioning = Partitioning::UnknownPartitioning(num_partitions); + let new_partitioning = match properties.partitioning { + Partitioning::Hash(ref columns, _) => { + Partitioning::Hash(columns.clone(), target_partitions) + } + _ => Partitioning::UnknownPartitioning(target_partitions), + }; + properties.partitioning = new_partitioning; { let mut scanner = self.scanner.lock().unwrap(); @@ -480,12 +500,6 @@ mod test { assert_eq!(batch2.df_record_batch(), &recordbatches[1]); let result = plan.execute(0, ctx.task_ctx()); - assert!(result.is_err()); - match result { - Err(e) => assert!(e - .to_string() - .contains("Not expected to run ExecutionPlan more than once")), - _ => unreachable!(), - } + assert!(result.is_ok()); } } diff --git a/tests/cases/distributed/explain/analyze.result b/tests/cases/distributed/explain/analyze.result index 16b367e0c6..60e6b7e542 100644 --- a/tests/cases/distributed/explain/analyze.result +++ b/tests/cases/distributed/explain/analyze.result @@ -44,7 +44,6 @@ explain analyze SELECT count(*) FROM system_metrics; |_|_|_AggregateExec: mode=Final, gby=[], aggr=[count(system_REDACTED |_|_|_CoalescePartitionsExec REDACTED |_|_|_AggregateExec: mode=Partial, gby=[], aggr=[count(system_REDACTED -|_|_|_RepartitionExec: partitioning=REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges) REDACTED |_|_|_| |_|_| Total rows: 1_| diff --git a/tests/cases/distributed/explain/order_by.result b/tests/cases/distributed/explain/order_by.result index 9f0f5002b1..26e566a776 100644 --- a/tests/cases/distributed/explain/order_by.result +++ b/tests/cases/distributed/explain/order_by.result @@ -98,6 +98,7 @@ SELECT i, t AS alias_ts FROM test_pk ORDER BY alias_ts DESC LIMIT 5; -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT i, t AS alias_ts FROM test_pk ORDER BY t DESC LIMIT 5; +-+-+-+ @@ -106,9 +107,9 @@ EXPLAIN ANALYZE SELECT i, t AS alias_ts FROM test_pk ORDER BY t DESC LIMIT 5; | 0_| 0_|_MergeScanExec: REDACTED |_|_|_| | 1_| 0_|_ProjectionExec: expr=[i@0 as i, t@1 as alias_ts] REDACTED -|_|_|_SortPreservingMergeExec: [test_pk.t__temp__0@2 DESC] REDACTED -|_|_|_WindowedSortExec: expr=test_pk.t__temp__0@2 DESC num_ranges=1 fetch=5 REDACTED -|_|_|_PartSortExec: expr=test_pk.t__temp__0@2 DESC num_ranges=1 limit=5 REDACTED +|_|_|_SortPreservingMergeExec: [test_pk.t__temp__0@2 DESC], fetch=5 REDACTED +|_|_|_WindowedSortExec: expr=test_pk.t__temp__0@2 DESC num_ranges=REDACTED fetch=5 REDACTED +|_|_|_PartSortExec: expr=test_pk.t__temp__0@2 DESC num_ranges=REDACTED limit=5 REDACTED |_|_|_ProjectionExec: expr=[i@0 as i, t@1 as t, t@1 as test_pk.t__temp__0] REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges) REDACTED |_|_|_| @@ -120,6 +121,7 @@ EXPLAIN ANALYZE SELECT i, t AS alias_ts FROM test_pk ORDER BY t DESC LIMIT 5; -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT i, t AS alias_ts FROM test_pk ORDER BY alias_ts DESC LIMIT 5; +-+-+-+ @@ -128,9 +130,9 @@ EXPLAIN ANALYZE SELECT i, t AS alias_ts FROM test_pk ORDER BY alias_ts DESC LIMI | 0_| 0_|_MergeScanExec: REDACTED |_|_|_| | 1_| 0_|_ProjectionExec: expr=[i@0 as i, t@1 as alias_ts] REDACTED -|_|_|_SortPreservingMergeExec: [t@1 DESC] REDACTED -|_|_|_WindowedSortExec: expr=t@1 DESC num_ranges=1 fetch=5 REDACTED -|_|_|_PartSortExec: expr=t@1 DESC num_ranges=1 limit=5 REDACTED +|_|_|_SortPreservingMergeExec: [t@1 DESC], fetch=5 REDACTED +|_|_|_WindowedSortExec: expr=t@1 DESC num_ranges=REDACTED fetch=5 REDACTED +|_|_|_PartSortExec: expr=t@1 DESC num_ranges=REDACTED limit=5 REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges) REDACTED |_|_|_| |_|_| Total rows: 5_| diff --git a/tests/cases/distributed/explain/order_by.sql b/tests/cases/distributed/explain/order_by.sql index 4c2f505697..690afe8da4 100644 --- a/tests/cases/distributed/explain/order_by.sql +++ b/tests/cases/distributed/explain/order_by.sql @@ -43,6 +43,7 @@ SELECT i, t AS alias_ts FROM test_pk ORDER BY alias_ts DESC LIMIT 5; -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT i, t AS alias_ts FROM test_pk ORDER BY t DESC LIMIT 5; -- SQLNESS REPLACE (-+) - @@ -50,6 +51,7 @@ EXPLAIN ANALYZE SELECT i, t AS alias_ts FROM test_pk ORDER BY t DESC LIMIT 5; -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT i, t AS alias_ts FROM test_pk ORDER BY alias_ts DESC LIMIT 5; DROP TABLE test_pk; diff --git a/tests/cases/distributed/optimizer/windowed_sort.result b/tests/cases/distributed/optimizer/windowed_sort.result index 42562b94aa..2ae30e8959 100644 --- a/tests/cases/distributed/optimizer/windowed_sort.result +++ b/tests/cases/distributed/optimizer/windowed_sort.result @@ -70,7 +70,8 @@ ORDER BY | 0_| 0_|_MergeScanExec: REDACTED |_|_|_| | 1_| 0_|_ProjectionExec: expr=[collect_time_utc@0 as collect_time, peak_current@1 as peak_current] REDACTED -|_|_|_SortExec: expr=[collect_time_utc@0 ASC NULLS LAST], preserve_partitioning=[false] REDACTED +|_|_|_SortPreservingMergeExec: [collect_time_utc@0 ASC NULLS LAST] REDACTED +|_|_|_SortExec: expr=[collect_time_utc@0 ASC NULLS LAST], preserve_partitioning=[true] REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges) REDACTED |_|_|_| |_|_| Total rows: 8_| @@ -103,6 +104,7 @@ ORDER BY -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT collect_time_utc AS collect_time_0, peak_current, @@ -117,7 +119,8 @@ ORDER BY | 0_| 0_|_MergeScanExec: REDACTED |_|_|_| | 1_| 0_|_ProjectionExec: expr=[collect_time_utc@0 as collect_time_0, peak_current@1 as peak_current] REDACTED -|_|_|_SortExec: expr=[collect_time_utc@0 ASC NULLS LAST], preserve_partitioning=[false] REDACTED +|_|_|_SortPreservingMergeExec: [collect_time_utc@0 ASC NULLS LAST] REDACTED +|_|_|_SortExec: expr=[collect_time_utc@0 ASC NULLS LAST], preserve_partitioning=[true] REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges) REDACTED |_|_|_| |_|_| Total rows: 8_| @@ -151,6 +154,7 @@ ORDER BY -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT collect_time AS true_collect_time, collect_time_utc AS collect_time, @@ -167,8 +171,8 @@ ORDER BY |_|_|_| | 1_| 0_|_ProjectionExec: expr=[collect_time@0 as true_collect_time, collect_time_utc@1 as collect_time, peak_current@2 as peak_current] REDACTED |_|_|_SortPreservingMergeExec: [collect_time@0 DESC] REDACTED -|_|_|_WindowedSortExec: expr=collect_time@0 DESC num_ranges=1 REDACTED -|_|_|_PartSortExec: expr=collect_time@0 DESC num_ranges=1 REDACTED +|_|_|_WindowedSortExec: expr=collect_time@0 DESC num_ranges=REDACTED REDACTED +|_|_|_PartSortExec: expr=collect_time@0 DESC num_ranges=REDACTED REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges) REDACTED |_|_|_| |_|_| Total rows: 8_| @@ -202,6 +206,7 @@ ORDER BY -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT collect_time_utc AS collect_time, collect_time AS true_collect_time, @@ -218,8 +223,8 @@ ORDER BY |_|_|_| | 1_| 0_|_ProjectionExec: expr=[collect_time_utc@0 as collect_time, collect_time@1 as true_collect_time, peak_current@2 as peak_current] REDACTED |_|_|_SortPreservingMergeExec: [collect_time@1 DESC] REDACTED -|_|_|_WindowedSortExec: expr=collect_time@1 DESC num_ranges=1 REDACTED -|_|_|_PartSortExec: expr=collect_time@1 DESC num_ranges=1 REDACTED +|_|_|_WindowedSortExec: expr=collect_time@1 DESC num_ranges=REDACTED REDACTED +|_|_|_PartSortExec: expr=collect_time@1 DESC num_ranges=REDACTED REDACTED |_|_|_ProjectionExec: expr=[collect_time_utc@1 as collect_time_utc, collect_time@0 as collect_time, peak_current@2 as peak_current] REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges) REDACTED |_|_|_| diff --git a/tests/cases/distributed/optimizer/windowed_sort.sql b/tests/cases/distributed/optimizer/windowed_sort.sql index 72d3464af7..eefdacd51a 100644 --- a/tests/cases/distributed/optimizer/windowed_sort.sql +++ b/tests/cases/distributed/optimizer/windowed_sort.sql @@ -61,6 +61,7 @@ ORDER BY -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT collect_time_utc AS collect_time_0, peak_current, @@ -84,6 +85,7 @@ ORDER BY -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT collect_time AS true_collect_time, collect_time_utc AS collect_time, @@ -108,6 +110,7 @@ ORDER BY -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT collect_time_utc AS collect_time, collect_time AS true_collect_time, diff --git a/tests/cases/distributed/tql-explain-analyze/analyze.result b/tests/cases/distributed/tql-explain-analyze/analyze.result new file mode 100644 index 0000000000..61e09e1d33 --- /dev/null +++ b/tests/cases/distributed/tql-explain-analyze/analyze.result @@ -0,0 +1,171 @@ +CREATE TABLE test(i DOUBLE, j TIMESTAMP TIME INDEX, k STRING PRIMARY KEY); + +Affected Rows: 0 + +-- insert two points at 1ms and one point at 2ms +INSERT INTO test VALUES (1, 1, "a"), (1, 1, "b"), (2, 2, "a"); + +Affected Rows: 3 + +-- analyze at 0s, 5s and 10s. No point at 0s. +-- SQLNESS REPLACE (metrics.*) REDACTED +-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED +-- SQLNESS REPLACE (-+) - +-- SQLNESS REPLACE (\s\s+) _ +-- SQLNESS REPLACE (peers.*) REDACTED +-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +TQL ANALYZE (0, 10, '5s') test; + ++-+-+-+ +| stage | node | plan_| ++-+-+-+ +| 0_| 0_|_MergeScanExec: REDACTED +|_|_|_| +| 1_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED +|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED +|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED +|_|_|_| +|_|_| Total rows: 4_| ++-+-+-+ + +-- 'lookback' parameter is not fully supported, the test has to be updated +-- analyze at 0s, 5s and 10s. No point at 0s. +-- SQLNESS REPLACE (metrics.*) REDACTED +-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED +-- SQLNESS REPLACE (-+) - +-- SQLNESS REPLACE (\s\s+) _ +-- SQLNESS REPLACE (peers.*) REDACTED +-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +TQL ANALYZE (0, 10, '1s', '2s') test; + ++-+-+-+ +| stage | node | plan_| ++-+-+-+ +| 0_| 0_|_MergeScanExec: REDACTED +|_|_|_| +| 1_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[2000], interval=[1000], time index=[j] REDACTED +|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED +|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED +|_|_|_| +|_|_| Total rows: 4_| ++-+-+-+ + +-- analyze at 0s, 5s and 10s. No point at 0s. +-- SQLNESS REPLACE (metrics.*) REDACTED +-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED +-- SQLNESS REPLACE (-+) - +-- SQLNESS REPLACE (\s\s+) _ +-- SQLNESS REPLACE (peers.*) REDACTED +-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +TQL ANALYZE ('1970-01-01T00:00:00'::timestamp, '1970-01-01T00:00:00'::timestamp + '10 seconds'::interval, '5s') test; + ++-+-+-+ +| stage | node | plan_| ++-+-+-+ +| 0_| 0_|_MergeScanExec: REDACTED +|_|_|_| +| 1_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED +|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED +|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED +|_|_|_| +|_|_| Total rows: 4_| ++-+-+-+ + +-- analyze verbose at 0s, 5s and 10s. No point at 0s. +-- SQLNESS REPLACE (-+) - +-- SQLNESS REPLACE (\s\s+) _ +-- SQLNESS REPLACE (elapsed_compute.*) REDACTED +-- SQLNESS REPLACE (peers.*) REDACTED +-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED +-- SQLNESS REPLACE (metrics.*) REDACTED +-- SQLNESS REPLACE (Duration.*) REDACTED +-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +TQL ANALYZE VERBOSE (0, 10, '5s') test; + ++-+-+-+ +| stage | node | plan_| ++-+-+-+ +| 0_| 0_|_MergeScanExec: REDACTED +|_|_|_| +| 1_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED +|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED +|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries, projection=["i", "j", "k"], filters=[j >= TimestampMillisecond(-300000, None), j <= TimestampMillisecond(310000, None)], REDACTED +|_|_|_| +|_|_| Total rows: 4_| ++-+-+-+ + +DROP TABLE test; + +Affected Rows: 0 + +-- partition table +CREATE TABLE test(i DOUBLE, j TIMESTAMP TIME INDEX, k STRING, l STRING, PRIMARY KEY(k, l)) PARTITION ON COLUMNS (k) (k < 'a', k >= 'a'); + +Affected Rows: 0 + +-- SQLNESS REPLACE (metrics.*) REDACTED +-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED +-- SQLNESS REPLACE (-+) - +-- SQLNESS REPLACE (\s\s+) _ +-- SQLNESS REPLACE (peers.*) REDACTED +-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +TQL ANALYZE (0, 10, '5s') test; + ++-+-+-+ +| stage | node | plan_| ++-+-+-+ +| 0_| 0_|_SortPreservingMergeExec: [k@2 ASC, l@3 ASC, j@1 ASC] REDACTED +|_|_|_SortExec: expr=[k@2 ASC, l@3 ASC, j@1 ASC], preserve_partitioning=[true] REDACTED +|_|_|_MergeScanExec: REDACTED +|_|_|_| +| 1_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED +|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED +|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED +|_|_|_| +| 1_| 1_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED +|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED +|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED +|_|_|_| +|_|_| Total rows: 0_| ++-+-+-+ + +-- SQLNESS REPLACE (metrics.*) REDACTED +-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED +-- SQLNESS REPLACE (-+) - +-- SQLNESS REPLACE (\s\s+) _ +-- SQLNESS REPLACE (peers.*) REDACTED +-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +TQL ANALYZE (0, 10, '5s') rate(test[10s]); + ++-+-+-+ +| stage | node | plan_| ++-+-+-+ +| 0_| 0_|_SortPreservingMergeExec: [k@2 ASC, l@3 ASC, j@0 ASC] REDACTED +|_|_|_SortExec: expr=[k@2 ASC, l@3 ASC, j@0 ASC], preserve_partitioning=[true] REDACTED +|_|_|_MergeScanExec: REDACTED +|_|_|_| +| 1_| 0_|_ProjectionExec: expr=[j@0 as j, prom_rate(j_range,i,test.j,Int64(10000))@1 as prom_rate(j_range,i,j,Int64(10000)), k@2 as k, l@3 as l] REDACTED +|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED +|_|_|_FilterExec: prom_rate(j_range,i,test.j,Int64(10000))@1 IS NOT NULL REDACTED +|_|_|_ProjectionExec: expr=[j@1 as j, prom_rate(j_range@4, i@0, j@1, 10000) as prom_rate(j_range,i,test.j,Int64(10000)), k@2 as k, l@3 as l] REDACTED +|_|_|_PromRangeManipulateExec: req range=[0..10000], interval=[5000], eval range=[10000], time index=[j] REDACTED +|_|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [true] REDACTED +|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED +|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED +|_|_|_| +| 1_| 1_|_ProjectionExec: expr=[j@0 as j, prom_rate(j_range,i,test.j,Int64(10000))@1 as prom_rate(j_range,i,j,Int64(10000)), k@2 as k, l@3 as l] REDACTED +|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED +|_|_|_FilterExec: prom_rate(j_range,i,test.j,Int64(10000))@1 IS NOT NULL REDACTED +|_|_|_ProjectionExec: expr=[j@1 as j, prom_rate(j_range@4, i@0, j@1, 10000) as prom_rate(j_range,i,test.j,Int64(10000)), k@2 as k, l@3 as l] REDACTED +|_|_|_PromRangeManipulateExec: req range=[0..10000], interval=[5000], eval range=[10000], time index=[j] REDACTED +|_|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [true] REDACTED +|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED +|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED +|_|_|_| +|_|_| Total rows: 0_| ++-+-+-+ + +drop table test; + +Affected Rows: 0 + diff --git a/tests/cases/standalone/common/tql-explain-analyze/analyze.sql b/tests/cases/distributed/tql-explain-analyze/analyze.sql similarity index 100% rename from tests/cases/standalone/common/tql-explain-analyze/analyze.sql rename to tests/cases/distributed/tql-explain-analyze/analyze.sql diff --git a/tests/cases/standalone/common/aggregate/distinct.result b/tests/cases/standalone/common/aggregate/distinct.result index 4130d26f71..72326b586b 100644 --- a/tests/cases/standalone/common/aggregate/distinct.result +++ b/tests/cases/standalone/common/aggregate/distinct.result @@ -98,7 +98,6 @@ EXPLAIN ANALYZE SELECT DISTINCT a FROM test ORDER BY a; |_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED |_|_|_RepartitionExec: partitioning=REDACTED |_|_|_AggregateExec: mode=Partial, gby=[a@0 as a], aggr=[] REDACTED -|_|_|_RepartitionExec: partitioning=REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges) REDACTED |_|_|_| |_|_| Total rows: 2_| diff --git a/tests/cases/standalone/common/aggregate/multi_regions.result b/tests/cases/standalone/common/aggregate/multi_regions.result index ff72d4c2f9..6c73b89891 100644 --- a/tests/cases/standalone/common/aggregate/multi_regions.result +++ b/tests/cases/standalone/common/aggregate/multi_regions.result @@ -32,7 +32,6 @@ select sum(val) from t group by host; |_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED |_|_|_RepartitionExec: partitioning=REDACTED |_|_|_AggregateExec: mode=Partial, gby=[host@1 as host], aggr=[sum(t.val)] REDACTED -|_|_|_RepartitionExec: partitioning=REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges) REDACTED |_|_|_| | 1_| 1_|_ProjectionExec: expr=[sum(t.val)@1 as sum(t.val)] REDACTED @@ -40,7 +39,6 @@ select sum(val) from t group by host; |_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED |_|_|_RepartitionExec: partitioning=REDACTED |_|_|_AggregateExec: mode=Partial, gby=[host@1 as host], aggr=[sum(t.val)] REDACTED -|_|_|_RepartitionExec: partitioning=REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges) REDACTED |_|_|_| |_|_| Total rows: 0_| diff --git a/tests/cases/standalone/common/cte/cte.result b/tests/cases/standalone/common/cte/cte.result index 48b0dfd4f1..0a796b68b7 100644 --- a/tests/cases/standalone/common/cte/cte.result +++ b/tests/cases/standalone/common/cte/cte.result @@ -46,6 +46,7 @@ with cte1 as (Select i as j from a), cte2 as (select ref.j as k from cte1 as ref | 42 | 43 | +----+----+ +-- SQLNESS SORT_RESULT 3 1 with cte1 as (select i as j from a), cte2 as (select ref.j as k from cte1 as ref), cte3 as (select ref2.j+1 as i from cte1 as ref2) select * from cte2 union all select * FROM cte3 order by 1; +----+ diff --git a/tests/cases/standalone/common/cte/cte.sql b/tests/cases/standalone/common/cte/cte.sql index b3eda58fb5..ebd517e31b 100644 --- a/tests/cases/standalone/common/cte/cte.sql +++ b/tests/cases/standalone/common/cte/cte.sql @@ -12,6 +12,7 @@ with cte1(xxx) as (Select i as j from a) select x from cte1 t1(x); with cte1 as (Select i as j from a), cte2 as (select ref.j as k from cte1 as ref), cte3 as (select ref2.j+1 as i from cte1 as ref2) select * from cte2 , cte3; +-- SQLNESS SORT_RESULT 3 1 with cte1 as (select i as j from a), cte2 as (select ref.j as k from cte1 as ref), cte3 as (select ref2.j+1 as i from cte1 as ref2) select * from cte2 union all select * FROM cte3 order by 1; with cte1 as (select 42), cte1 as (select 42) select * FROM cte1; diff --git a/tests/cases/standalone/common/order/order_by.result b/tests/cases/standalone/common/order/order_by.result index c586b15879..f125d23fe6 100644 --- a/tests/cases/standalone/common/order/order_by.result +++ b/tests/cases/standalone/common/order/order_by.result @@ -287,6 +287,7 @@ select tag from t where num > 6 order by ts; -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED -- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED explain analyze select tag from t where num > 6 order by ts desc limit 2; +-+-+-+ @@ -298,14 +299,14 @@ explain analyze select tag from t where num > 6 order by ts desc limit 2; |_|_|_MergeScanExec: REDACTED |_|_|_| | 1_| 0_|_SortPreservingMergeExec: [ts@1 DESC], fetch=2 REDACTED -|_|_|_WindowedSortExec: expr=ts@1 DESC num_ranges=1 fetch=2 REDACTED -|_|_|_PartSortExec: expr=ts@1 DESC num_ranges=1 limit=2 REDACTED +|_|_|_WindowedSortExec: expr=ts@1 DESC num_ranges=REDACTED fetch=2 REDACTED +|_|_|_PartSortExec: expr=ts@1 DESC num_ranges=REDACTED limit=2 REDACTED |_|_|_FilterExec: num@2 > 6, projection=[tag@0, ts@1] REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges) REDACTED |_|_|_| | 1_| 1_|_SortPreservingMergeExec: [ts@1 DESC], fetch=2 REDACTED -|_|_|_WindowedSortExec: expr=ts@1 DESC num_ranges=1 fetch=2 REDACTED -|_|_|_PartSortExec: expr=ts@1 DESC num_ranges=1 limit=2 REDACTED +|_|_|_WindowedSortExec: expr=ts@1 DESC num_ranges=REDACTED fetch=2 REDACTED +|_|_|_PartSortExec: expr=ts@1 DESC num_ranges=REDACTED limit=2 REDACTED |_|_|_FilterExec: num@2 > 6, projection=[tag@0, ts@1] REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges) REDACTED |_|_|_| diff --git a/tests/cases/standalone/common/order/order_by.sql b/tests/cases/standalone/common/order/order_by.sql index 2461cf5a13..dd641613d9 100644 --- a/tests/cases/standalone/common/order/order_by.sql +++ b/tests/cases/standalone/common/order/order_by.sql @@ -94,6 +94,7 @@ select tag from t where num > 6 order by ts; -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED -- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED explain analyze select tag from t where num > 6 order by ts desc limit 2; drop table t; diff --git a/tests/cases/standalone/common/order/windowed_sort.result b/tests/cases/standalone/common/order/windowed_sort.result index 268ecff52b..1509e25bea 100644 --- a/tests/cases/standalone/common/order/windowed_sort.result +++ b/tests/cases/standalone/common/order/windowed_sort.result @@ -60,6 +60,7 @@ SELECT * FROM test ORDER BY t LIMIT 5; -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT * FROM test ORDER BY t LIMIT 5; +-+-+-+ @@ -68,7 +69,7 @@ EXPLAIN ANALYZE SELECT * FROM test ORDER BY t LIMIT 5; | 0_| 0_|_MergeScanExec: REDACTED |_|_|_| | 1_| 0_|_SortPreservingMergeExec: [t@1 ASC NULLS LAST], fetch=5 REDACTED -|_|_|_WindowedSortExec: expr=t@1 ASC NULLS LAST num_ranges=4 fetch=5 REDACTED +|_|_|_WindowedSortExec: expr=t@1 ASC NULLS LAST num_ranges=REDACTED fetch=5 REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=4 (1 memtable ranges, 3 file 3 ranges) REDACTED |_|_|_| |_|_| Total rows: 5_| @@ -91,6 +92,7 @@ SELECT * FROM test ORDER BY t DESC LIMIT 5; -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT * FROM test ORDER BY t DESC LIMIT 5; +-+-+-+ @@ -99,8 +101,8 @@ EXPLAIN ANALYZE SELECT * FROM test ORDER BY t DESC LIMIT 5; | 0_| 0_|_MergeScanExec: REDACTED |_|_|_| | 1_| 0_|_SortPreservingMergeExec: [t@1 DESC], fetch=5 REDACTED -|_|_|_WindowedSortExec: expr=t@1 DESC num_ranges=4 fetch=5 REDACTED -|_|_|_PartSortExec: expr=t@1 DESC num_ranges=4 limit=5 REDACTED +|_|_|_WindowedSortExec: expr=t@1 DESC num_ranges=REDACTED fetch=5 REDACTED +|_|_|_PartSortExec: expr=t@1 DESC num_ranges=REDACTED limit=5 REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=4 (1 memtable ranges, 3 file 3 ranges) REDACTED |_|_|_| |_|_| Total rows: 5_| @@ -123,6 +125,7 @@ SELECT * FROM test where i > 2 ORDER BY t LIMIT 4; -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT * FROM test where i > 2 ORDER BY t LIMIT 4; +-+-+-+ @@ -131,7 +134,7 @@ EXPLAIN ANALYZE SELECT * FROM test where i > 2 ORDER BY t LIMIT 4; | 0_| 0_|_MergeScanExec: REDACTED |_|_|_| | 1_| 0_|_SortPreservingMergeExec: [t@1 ASC NULLS LAST], fetch=4 REDACTED -|_|_|_WindowedSortExec: expr=t@1 ASC NULLS LAST num_ranges=4 fetch=4 REDACTED +|_|_|_WindowedSortExec: expr=t@1 ASC NULLS LAST num_ranges=REDACTED fetch=4 REDACTED |_|_|_FilterExec: i@0 > 2 REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=4 (1 memtable ranges, 3 file 3 ranges) REDACTED |_|_|_| @@ -155,6 +158,7 @@ SELECT * FROM test where i > 2 ORDER BY t DESC LIMIT 4; -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT * FROM test where i > 2 ORDER BY t DESC LIMIT 4; +-+-+-+ @@ -163,8 +167,8 @@ EXPLAIN ANALYZE SELECT * FROM test where i > 2 ORDER BY t DESC LIMIT 4; | 0_| 0_|_MergeScanExec: REDACTED |_|_|_| | 1_| 0_|_SortPreservingMergeExec: [t@1 DESC], fetch=4 REDACTED -|_|_|_WindowedSortExec: expr=t@1 DESC num_ranges=4 fetch=4 REDACTED -|_|_|_PartSortExec: expr=t@1 DESC num_ranges=4 limit=4 REDACTED +|_|_|_WindowedSortExec: expr=t@1 DESC num_ranges=REDACTED fetch=4 REDACTED +|_|_|_PartSortExec: expr=t@1 DESC num_ranges=REDACTED limit=4 REDACTED |_|_|_FilterExec: i@0 > 2 REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=4 (1 memtable ranges, 3 file 3 ranges) REDACTED |_|_|_| @@ -188,6 +192,7 @@ SELECT * FROM test where t > 8 ORDER BY t DESC LIMIT 4; -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT * FROM test where t > 8 ORDER BY t DESC LIMIT 4; +-+-+-+ @@ -196,8 +201,8 @@ EXPLAIN ANALYZE SELECT * FROM test where t > 8 ORDER BY t DESC LIMIT 4; | 0_| 0_|_MergeScanExec: REDACTED |_|_|_| | 1_| 0_|_SortPreservingMergeExec: [t@1 DESC], fetch=4 REDACTED -|_|_|_WindowedSortExec: expr=t@1 DESC num_ranges=2 fetch=4 REDACTED -|_|_|_PartSortExec: expr=t@1 DESC num_ranges=2 limit=4 REDACTED +|_|_|_WindowedSortExec: expr=t@1 DESC num_ranges=REDACTED fetch=4 REDACTED +|_|_|_PartSortExec: expr=t@1 DESC num_ranges=REDACTED limit=4 REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=2 (1 memtable ranges, 1 file 1 ranges) REDACTED |_|_|_| |_|_| Total rows: 4_| @@ -269,6 +274,7 @@ SELECT * FROM test_pk ORDER BY t LIMIT 5; -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT * FROM test_pk ORDER BY t LIMIT 5; +-+-+-+ @@ -277,8 +283,8 @@ EXPLAIN ANALYZE SELECT * FROM test_pk ORDER BY t LIMIT 5; | 0_| 0_|_MergeScanExec: REDACTED |_|_|_| | 1_| 0_|_SortPreservingMergeExec: [t@2 ASC NULLS LAST], fetch=5 REDACTED -|_|_|_WindowedSortExec: expr=t@2 ASC NULLS LAST num_ranges=4 fetch=5 REDACTED -|_|_|_PartSortExec: expr=t@2 ASC NULLS LAST num_ranges=4 limit=5 REDACTED +|_|_|_WindowedSortExec: expr=t@2 ASC NULLS LAST num_ranges=REDACTED fetch=5 REDACTED +|_|_|_PartSortExec: expr=t@2 ASC NULLS LAST num_ranges=REDACTED limit=5 REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=4 (1 memtable ranges, 3 file 3 ranges) REDACTED |_|_|_| |_|_| Total rows: 5_| @@ -290,6 +296,7 @@ EXPLAIN ANALYZE SELECT * FROM test_pk ORDER BY t LIMIT 5; -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED -- SQLNESS REPLACE (files.*) REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE VERBOSE SELECT * FROM test_pk ORDER BY t LIMIT 5; +-+-+-+ @@ -298,8 +305,8 @@ EXPLAIN ANALYZE VERBOSE SELECT * FROM test_pk ORDER BY t LIMIT 5; | 0_| 0_|_MergeScanExec: REDACTED |_|_|_| | 1_| 0_|_SortPreservingMergeExec: [t@2 ASC NULLS LAST], fetch=5 REDACTED -|_|_|_WindowedSortExec: expr=t@2 ASC NULLS LAST num_ranges=4 fetch=5 REDACTED -|_|_|_PartSortExec: expr=t@2 ASC NULLS LAST num_ranges=4 limit=5 REDACTED +|_|_|_WindowedSortExec: expr=t@2 ASC NULLS LAST num_ranges=REDACTED fetch=5 REDACTED +|_|_|_PartSortExec: expr=t@2 ASC NULLS LAST num_ranges=REDACTED limit=5 REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=4 (1 memtable ranges, 3 file 3 ranges), projection=["pk", "i", "t"], REDACTED |_|_|_| |_|_| Total rows: 5_| @@ -322,6 +329,7 @@ SELECT * FROM test_pk ORDER BY t DESC LIMIT 5; -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT * FROM test_pk ORDER BY t DESC LIMIT 5; +-+-+-+ @@ -330,8 +338,8 @@ EXPLAIN ANALYZE SELECT * FROM test_pk ORDER BY t DESC LIMIT 5; | 0_| 0_|_MergeScanExec: REDACTED |_|_|_| | 1_| 0_|_SortPreservingMergeExec: [t@2 DESC], fetch=5 REDACTED -|_|_|_WindowedSortExec: expr=t@2 DESC num_ranges=4 fetch=5 REDACTED -|_|_|_PartSortExec: expr=t@2 DESC num_ranges=4 limit=5 REDACTED +|_|_|_WindowedSortExec: expr=t@2 DESC num_ranges=REDACTED fetch=5 REDACTED +|_|_|_PartSortExec: expr=t@2 DESC num_ranges=REDACTED limit=5 REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=4 (1 memtable ranges, 3 file 3 ranges) REDACTED |_|_|_| |_|_| Total rows: 5_| @@ -355,6 +363,7 @@ SELECT * FROM test_pk where pk > 7 ORDER BY t LIMIT 5; -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT * FROM test_pk where pk > 7 ORDER BY t LIMIT 5; +-+-+-+ @@ -363,8 +372,8 @@ EXPLAIN ANALYZE SELECT * FROM test_pk where pk > 7 ORDER BY t LIMIT 5; | 0_| 0_|_MergeScanExec: REDACTED |_|_|_| | 1_| 0_|_SortPreservingMergeExec: [t@2 ASC NULLS LAST], fetch=5 REDACTED -|_|_|_WindowedSortExec: expr=t@2 ASC NULLS LAST num_ranges=4 fetch=5 REDACTED -|_|_|_PartSortExec: expr=t@2 ASC NULLS LAST num_ranges=4 limit=5 REDACTED +|_|_|_WindowedSortExec: expr=t@2 ASC NULLS LAST num_ranges=REDACTED fetch=5 REDACTED +|_|_|_PartSortExec: expr=t@2 ASC NULLS LAST num_ranges=REDACTED limit=5 REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=4 (1 memtable ranges, 3 file 3 ranges) REDACTED |_|_|_| |_|_| Total rows: 5_| @@ -376,6 +385,7 @@ EXPLAIN ANALYZE SELECT * FROM test_pk where pk > 7 ORDER BY t LIMIT 5; -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED -- SQLNESS REPLACE (files.*) REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE VERBOSE SELECT * FROM test_pk where pk > 7 ORDER BY t LIMIT 5; +-+-+-+ @@ -384,8 +394,8 @@ EXPLAIN ANALYZE VERBOSE SELECT * FROM test_pk where pk > 7 ORDER BY t LIMIT 5; | 0_| 0_|_MergeScanExec: REDACTED |_|_|_| | 1_| 0_|_SortPreservingMergeExec: [t@2 ASC NULLS LAST], fetch=5 REDACTED -|_|_|_WindowedSortExec: expr=t@2 ASC NULLS LAST num_ranges=4 fetch=5 REDACTED -|_|_|_PartSortExec: expr=t@2 ASC NULLS LAST num_ranges=4 limit=5 REDACTED +|_|_|_WindowedSortExec: expr=t@2 ASC NULLS LAST num_ranges=REDACTED fetch=5 REDACTED +|_|_|_PartSortExec: expr=t@2 ASC NULLS LAST num_ranges=REDACTED limit=5 REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=4 (1 memtable ranges, 3 file 3 ranges), projection=["pk", "i", "t"], filters=[pk > Int32(7)], REDACTED |_|_|_| |_|_| Total rows: 5_| diff --git a/tests/cases/standalone/common/order/windowed_sort.sql b/tests/cases/standalone/common/order/windowed_sort.sql index 6c70764d7c..9e0e24c38d 100644 --- a/tests/cases/standalone/common/order/windowed_sort.sql +++ b/tests/cases/standalone/common/order/windowed_sort.sql @@ -22,6 +22,7 @@ SELECT * FROM test ORDER BY t LIMIT 5; -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT * FROM test ORDER BY t LIMIT 5; SELECT * FROM test ORDER BY t DESC LIMIT 5; @@ -31,6 +32,7 @@ SELECT * FROM test ORDER BY t DESC LIMIT 5; -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT * FROM test ORDER BY t DESC LIMIT 5; -- Filter on a field. @@ -41,6 +43,7 @@ SELECT * FROM test where i > 2 ORDER BY t LIMIT 4; -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT * FROM test where i > 2 ORDER BY t LIMIT 4; -- Filter on a field. @@ -51,6 +54,7 @@ SELECT * FROM test where i > 2 ORDER BY t DESC LIMIT 4; -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT * FROM test where i > 2 ORDER BY t DESC LIMIT 4; -- Filter on the time index. @@ -61,6 +65,7 @@ SELECT * FROM test where t > 8 ORDER BY t DESC LIMIT 4; -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT * FROM test where t > 8 ORDER BY t DESC LIMIT 4; DROP TABLE test; @@ -89,6 +94,7 @@ SELECT * FROM test_pk ORDER BY t LIMIT 5; -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT * FROM test_pk ORDER BY t LIMIT 5; -- SQLNESS REPLACE (-+) - @@ -97,6 +103,7 @@ EXPLAIN ANALYZE SELECT * FROM test_pk ORDER BY t LIMIT 5; -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED -- SQLNESS REPLACE (files.*) REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE VERBOSE SELECT * FROM test_pk ORDER BY t LIMIT 5; SELECT * FROM test_pk ORDER BY t DESC LIMIT 5; @@ -106,6 +113,7 @@ SELECT * FROM test_pk ORDER BY t DESC LIMIT 5; -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT * FROM test_pk ORDER BY t DESC LIMIT 5; -- Filter on a pk column. @@ -116,6 +124,7 @@ SELECT * FROM test_pk where pk > 7 ORDER BY t LIMIT 5; -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT * FROM test_pk where pk > 7 ORDER BY t LIMIT 5; -- SQLNESS REPLACE (-+) - @@ -124,6 +133,7 @@ EXPLAIN ANALYZE SELECT * FROM test_pk where pk > 7 ORDER BY t LIMIT 5; -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED -- SQLNESS REPLACE (files.*) REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE VERBOSE SELECT * FROM test_pk where pk > 7 ORDER BY t LIMIT 5; DROP TABLE test_pk; diff --git a/tests/cases/standalone/common/select/skipping_index.result b/tests/cases/standalone/common/select/skipping_index.result index 20b3c40934..e8e7deac14 100644 --- a/tests/cases/standalone/common/select/skipping_index.result +++ b/tests/cases/standalone/common/select/skipping_index.result @@ -62,7 +62,6 @@ EXPLAIN ANALYZE SELECT * FROM skipping_table WHERE id = 'id2' ORDER BY `name`; |_|_|_SortExec: expr=[name@2 ASC NULLS LAST], preserve_partitioning=[true] REDACTED |_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED |_|_|_FilterExec: id@1 = id2 REDACTED -|_|_|_RepartitionExec: partitioning=REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=3 (0 memtable ranges, 3 file 3 ranges) REDACTED |_|_|_| |_|_| Total rows: 1_| @@ -85,7 +84,6 @@ EXPLAIN ANALYZE SELECT * FROM skipping_table WHERE id = 'id5' ORDER BY `name`; |_|_|_SortExec: expr=[name@2 ASC NULLS LAST], preserve_partitioning=[true] REDACTED |_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED |_|_|_FilterExec: id@1 = id5 REDACTED -|_|_|_RepartitionExec: partitioning=REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=3 (0 memtable ranges, 3 file 3 ranges) REDACTED |_|_|_| |_|_| Total rows: 0_| diff --git a/tests/cases/standalone/common/select/tql_filter.result b/tests/cases/standalone/common/select/tql_filter.result index 5c10d77a52..9e4fe5fdf9 100644 --- a/tests/cases/standalone/common/select/tql_filter.result +++ b/tests/cases/standalone/common/select/tql_filter.result @@ -17,11 +17,11 @@ tql analyze (1, 3, '1s') t1{ a = "a" }; +-+-+-+ | stage | node | plan_| +-+-+-+ -| 0_| 0_|_PromInstantManipulateExec: range=[1000..3000], lookback=[300000], interval=[1000], time index=[b] REDACTED -|_|_|_PromSeriesDivideExec: tags=["a"] REDACTED -|_|_|_MergeScanExec: REDACTED +| 0_| 0_|_MergeScanExec: REDACTED |_|_|_| -| 1_| 0_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED +| 1_| 0_|_PromInstantManipulateExec: range=[1000..3000], lookback=[300000], interval=[1000], time index=[b] REDACTED +|_|_|_PromSeriesDivideExec: tags=["a"] REDACTED +|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED |_|_|_| |_|_| Total rows: 3_| +-+-+-+ @@ -37,11 +37,11 @@ tql analyze (1, 3, '1s') t1{ a =~ ".*" }; +-+-+-+ | stage | node | plan_| +-+-+-+ -| 0_| 0_|_PromInstantManipulateExec: range=[1000..3000], lookback=[300000], interval=[1000], time index=[b] REDACTED -|_|_|_PromSeriesDivideExec: tags=["a"] REDACTED -|_|_|_MergeScanExec: REDACTED +| 0_| 0_|_MergeScanExec: REDACTED |_|_|_| -| 1_| 0_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED +| 1_| 0_|_PromInstantManipulateExec: range=[1000..3000], lookback=[300000], interval=[1000], time index=[b] REDACTED +|_|_|_PromSeriesDivideExec: tags=["a"] REDACTED +|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED |_|_|_| |_|_| Total rows: 6_| +-+-+-+ @@ -57,11 +57,11 @@ tql analyze (1, 3, '1s') t1{ a =~ "a.*" }; +-+-+-+ | stage | node | plan_| +-+-+-+ -| 0_| 0_|_PromInstantManipulateExec: range=[1000..3000], lookback=[300000], interval=[1000], time index=[b] REDACTED -|_|_|_PromSeriesDivideExec: tags=["a"] REDACTED -|_|_|_MergeScanExec: REDACTED +| 0_| 0_|_MergeScanExec: REDACTED |_|_|_| -| 1_| 0_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED +| 1_| 0_|_PromInstantManipulateExec: range=[1000..3000], lookback=[300000], interval=[1000], time index=[b] REDACTED +|_|_|_PromSeriesDivideExec: tags=["a"] REDACTED +|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED |_|_|_| |_|_| Total rows: 3_| +-+-+-+ diff --git a/tests/cases/standalone/common/tql-explain-analyze/explain.result b/tests/cases/standalone/common/tql-explain-analyze/explain.result index 200ec5c814..bc3ebc0b35 100644 --- a/tests/cases/standalone/common/tql-explain-analyze/explain.result +++ b/tests/cases/standalone/common/tql-explain-analyze/explain.result @@ -12,18 +12,13 @@ Affected Rows: 3 -- SQLNESS REPLACE (peers.*) REDACTED TQL EXPLAIN (0, 10, '5s') test; -+---------------+-----------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+-----------------------------------------------------------------------------------------------+ -| logical_plan | PromInstantManipulate: range=[0..0], lookback=[300000], interval=[300000], time index=[j] | -| | PromSeriesDivide: tags=["k"] | -| | Projection: test.i, test.j, test.k | -| | MergeScan [is_placeholder=false] | -| physical_plan | PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j] | -| | PromSeriesDivideExec: tags=["k"] | -| | MergeScanExec: REDACTED -| | | -+---------------+-----------------------------------------------------------------------------------------------+ ++---------------+-------------------------------------------------+ +| plan_type | plan | ++---------------+-------------------------------------------------+ +| logical_plan | MergeScan [is_placeholder=false] | +| physical_plan | MergeScanExec: REDACTED +| | | ++---------------+-------------------------------------------------+ -- 'lookback' parameter is not fully supported, the test has to be updated -- explain at 0s, 5s and 10s. No point at 0s. @@ -31,36 +26,26 @@ TQL EXPLAIN (0, 10, '5s') test; -- SQLNESS REPLACE (peers.*) REDACTED TQL EXPLAIN (0, 10, '1s', '2s') test; -+---------------+---------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+---------------------------------------------------------------------------------------------+ -| logical_plan | PromInstantManipulate: range=[0..0], lookback=[2000], interval=[300000], time index=[j] | -| | PromSeriesDivide: tags=["k"] | -| | Projection: test.i, test.j, test.k | -| | MergeScan [is_placeholder=false] | -| physical_plan | PromInstantManipulateExec: range=[0..0], lookback=[2000], interval=[300000], time index=[j] | -| | PromSeriesDivideExec: tags=["k"] | -| | MergeScanExec: REDACTED -| | | -+---------------+---------------------------------------------------------------------------------------------+ ++---------------+-------------------------------------------------+ +| plan_type | plan | ++---------------+-------------------------------------------------+ +| logical_plan | MergeScan [is_placeholder=false] | +| physical_plan | MergeScanExec: REDACTED +| | | ++---------------+-------------------------------------------------+ -- explain at 0s, 5s and 10s. No point at 0s. -- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED -- SQLNESS REPLACE (peers.*) REDACTED TQL EXPLAIN ('1970-01-01T00:00:00'::timestamp, '1970-01-01T00:00:00'::timestamp + '10 seconds'::interval, '5s') test; -+---------------+-----------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+-----------------------------------------------------------------------------------------------+ -| logical_plan | PromInstantManipulate: range=[0..0], lookback=[300000], interval=[300000], time index=[j] | -| | PromSeriesDivide: tags=["k"] | -| | Projection: test.i, test.j, test.k | -| | MergeScan [is_placeholder=false] | -| physical_plan | PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j] | -| | PromSeriesDivideExec: tags=["k"] | -| | MergeScanExec: REDACTED -| | | -+---------------+-----------------------------------------------------------------------------------------------+ ++---------------+-------------------------------------------------+ +| plan_type | plan | ++---------------+-------------------------------------------------+ +| logical_plan | MergeScan [is_placeholder=false] | +| physical_plan | MergeScanExec: REDACTED +| | | ++---------------+-------------------------------------------------+ -- explain verbose at 0s, 5s and 10s. No point at 0s. -- SQLNESS REPLACE (-+) - @@ -85,9 +70,7 @@ TQL EXPLAIN VERBOSE (0, 10, '5s') test; | logical_plan after expand_wildcard_rule_| SAME TEXT AS ABOVE_| | logical_plan after resolve_grouping_function_| SAME TEXT AS ABOVE_| | logical_plan after type_coercion_| SAME TEXT AS ABOVE_| -| logical_plan after DistPlannerAnalyzer_| PromInstantManipulate: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_| -|_|_PromSeriesDivide: tags=["k"]_| -|_|_Projection: test.i, test.j, test.k_| +| logical_plan after DistPlannerAnalyzer_| Projection: test.i, test.j, test.k_| |_|_MergeScan [is_placeholder=false]_| | analyzed_logical_plan_| SAME TEXT AS ABOVE_| | logical_plan after eliminate_nested_union_| SAME TEXT AS ABOVE_| @@ -114,37 +97,46 @@ TQL EXPLAIN VERBOSE (0, 10, '5s') test; | logical_plan after unwrap_cast_in_comparison_| SAME TEXT AS ABOVE_| | logical_plan after common_sub_expression_eliminate_| SAME TEXT AS ABOVE_| | logical_plan after eliminate_group_by_constant_| SAME TEXT AS ABOVE_| +| logical_plan after optimize_projections_| MergeScan [is_placeholder=false]_| +| logical_plan after ScanHintRule_| SAME TEXT AS ABOVE_| +| logical_plan after eliminate_nested_union_| SAME TEXT AS ABOVE_| +| logical_plan after simplify_expressions_| SAME TEXT AS ABOVE_| +| logical_plan after unwrap_cast_in_comparison_| SAME TEXT AS ABOVE_| +| logical_plan after replace_distinct_aggregate_| SAME TEXT AS ABOVE_| +| logical_plan after eliminate_join_| SAME TEXT AS ABOVE_| +| logical_plan after decorrelate_predicate_subquery_| SAME TEXT AS ABOVE_| +| logical_plan after scalar_subquery_to_join_| SAME TEXT AS ABOVE_| +| logical_plan after extract_equijoin_predicate_| SAME TEXT AS ABOVE_| +| logical_plan after eliminate_duplicated_expr_| SAME TEXT AS ABOVE_| +| logical_plan after eliminate_filter_| SAME TEXT AS ABOVE_| +| logical_plan after eliminate_cross_join_| SAME TEXT AS ABOVE_| +| logical_plan after common_sub_expression_eliminate_| SAME TEXT AS ABOVE_| +| logical_plan after eliminate_limit_| SAME TEXT AS ABOVE_| +| logical_plan after propagate_empty_relation_| SAME TEXT AS ABOVE_| +| logical_plan after eliminate_one_union_| SAME TEXT AS ABOVE_| +| logical_plan after filter_null_join_keys_| SAME TEXT AS ABOVE_| +| logical_plan after eliminate_outer_join_| SAME TEXT AS ABOVE_| +| logical_plan after push_down_limit_| SAME TEXT AS ABOVE_| +| logical_plan after push_down_filter_| SAME TEXT AS ABOVE_| +| logical_plan after single_distinct_aggregation_to_group_by | SAME TEXT AS ABOVE_| +| logical_plan after simplify_expressions_| SAME TEXT AS ABOVE_| +| logical_plan after unwrap_cast_in_comparison_| SAME TEXT AS ABOVE_| +| logical_plan after common_sub_expression_eliminate_| SAME TEXT AS ABOVE_| +| logical_plan after eliminate_group_by_constant_| SAME TEXT AS ABOVE_| | logical_plan after optimize_projections_| SAME TEXT AS ABOVE_| | logical_plan after ScanHintRule_| SAME TEXT AS ABOVE_| -| logical_plan_| PromInstantManipulate: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_| -|_|_PromSeriesDivide: tags=["k"]_| -|_|_Projection: test.i, test.j, test.k_| -|_|_MergeScan [is_placeholder=false]_| -| initial_physical_plan_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_| -|_|_PromSeriesDivideExec: tags=["k"]_| -|_|_ProjectionExec: expr=[i@0 as i, j@1 as j, k@2 as k]_| -|_|_MergeScanExec: REDACTED +| logical_plan_| MergeScan [is_placeholder=false]_| +| initial_physical_plan_| MergeScanExec: REDACTED |_|_| -| initial_physical_plan_with_stats_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j], statistics=[Rows=Inexact(0), Bytes=Absent, [(Col[0]:),(Col[1]:),(Col[2]:)]] | -|_|_PromSeriesDivideExec: tags=["k"], statistics=[Rows=Absent, Bytes=Absent, [(Col[0]:),(Col[1]:),(Col[2]:)]]_| -|_|_ProjectionExec: expr=[i@0 as i, j@1 as j, k@2 as k], statistics=[Rows=Absent, Bytes=Absent, [(Col[0]:),(Col[1]:),(Col[2]:)]]_| -|_|_MergeScanExec: REDACTED +| initial_physical_plan_with_stats_| MergeScanExec: REDACTED |_|_| -| initial_physical_plan_with_schema_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j], schema=[i:Float64;N, j:Timestamp(Millisecond, None), k:Utf8;N]_| -|_|_PromSeriesDivideExec: tags=["k"], schema=[i:Float64;N, j:Timestamp(Millisecond, None), k:Utf8;N]_| -|_|_ProjectionExec: expr=[i@0 as i, j@1 as j, k@2 as k], schema=[i:Float64;N, j:Timestamp(Millisecond, None), k:Utf8;N]_| -|_|_MergeScanExec: REDACTED +| initial_physical_plan_with_schema_| MergeScanExec: REDACTED |_|_| -| physical_plan after parallelize_scan_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_| -|_|_PromSeriesDivideExec: tags=["k"]_| -|_|_ProjectionExec: expr=[i@0 as i, j@1 as j, k@2 as k]_| -|_|_MergeScanExec: REDACTED +| physical_plan after parallelize_scan_| MergeScanExec: REDACTED |_|_| | physical_plan after PassDistributionRule_| SAME TEXT AS ABOVE_| +| physical_plan after EnforceSorting_| SAME TEXT AS ABOVE_| | physical_plan after OutputRequirements_| OutputRequirementExec_| -|_|_PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_| -|_|_PromSeriesDivideExec: tags=["k"]_| -|_|_ProjectionExec: expr=[i@0 as i, j@1 as j, k@2 as k]_| |_|_MergeScanExec: REDACTED |_|_| | physical_plan after aggregate_statistics_| SAME TEXT AS ABOVE_| @@ -154,15 +146,9 @@ TQL EXPLAIN VERBOSE (0, 10, '5s') test; | physical_plan after CombinePartialFinalAggregate_| SAME TEXT AS ABOVE_| | physical_plan after EnforceSorting_| SAME TEXT AS ABOVE_| | physical_plan after OptimizeAggregateOrder_| SAME TEXT AS ABOVE_| -| physical_plan after ProjectionPushdown_| OutputRequirementExec_| -|_|_PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_| -|_|_PromSeriesDivideExec: tags=["k"]_| -|_|_MergeScanExec: REDACTED -|_|_| +| physical_plan after ProjectionPushdown_| SAME TEXT AS ABOVE_| | physical_plan after coalesce_batches_| SAME TEXT AS ABOVE_| -| physical_plan after OutputRequirements_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_| -|_|_PromSeriesDivideExec: tags=["k"]_| -|_|_MergeScanExec: REDACTED +| physical_plan after OutputRequirements_| MergeScanExec: REDACTED |_|_| | physical_plan after LimitAggregation_| SAME TEXT AS ABOVE_| | physical_plan after ProjectionPushdown_| SAME TEXT AS ABOVE_| @@ -171,17 +157,11 @@ TQL EXPLAIN VERBOSE (0, 10, '5s') test; | physical_plan after MatchesConstantTerm_| SAME TEXT AS ABOVE_| | physical_plan after RemoveDuplicateRule_| SAME TEXT AS ABOVE_| | physical_plan after SanityCheckPlan_| SAME TEXT AS ABOVE_| -| physical_plan_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_| -|_|_PromSeriesDivideExec: tags=["k"]_| -|_|_MergeScanExec: REDACTED +| physical_plan_| MergeScanExec: REDACTED |_|_| -| physical_plan_with_stats_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j], statistics=[Rows=Inexact(0), Bytes=Absent, [(Col[0]:),(Col[1]:),(Col[2]:)]] | -|_|_PromSeriesDivideExec: tags=["k"], statistics=[Rows=Absent, Bytes=Absent, [(Col[0]:),(Col[1]:),(Col[2]:)]]_| -|_|_MergeScanExec: REDACTED +| physical_plan_with_stats_| MergeScanExec: REDACTED |_|_| -| physical_plan_with_schema_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j], schema=[i:Float64;N, j:Timestamp(Millisecond, None), k:Utf8;N]_| -|_|_PromSeriesDivideExec: tags=["k"], schema=[i:Float64;N, j:Timestamp(Millisecond, None), k:Utf8;N]_| -|_|_MergeScanExec: REDACTED +| physical_plan_with_schema_| MergeScanExec: REDACTED |_|_| +-+-+ diff --git a/tests/cases/standalone/common/tql/partition.result b/tests/cases/standalone/common/tql/partition.result new file mode 100644 index 0000000000..f7dc526a5f --- /dev/null +++ b/tests/cases/standalone/common/tql/partition.result @@ -0,0 +1,160 @@ +-- no partition +create table t ( + i double, + j timestamp time index, + k string primary key +); + +Affected Rows: 0 + +-- SQLNESS REPLACE (metrics.*) REDACTED +-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED +-- SQLNESS REPLACE (-+) - +-- SQLNESS REPLACE (\s\s+) _ +-- SQLNESS REPLACE (peers.*) REDACTED +-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE (Hash.*) REDACTED +tql analyze (0, 10, '1s') 100 - (avg by (k) (irate(t[1m])) * 100); + ++-+-+-+ +| stage | node | plan_| ++-+-+-+ +| 0_| 0_|_MergeScanExec: REDACTED +|_|_|_| +| 1_| 0_|_ProjectionExec: expr=[k@0 as k, j@1 as j, 100 - avg(prom_irate(j_range,i))@2 * 100 as Float64(100) - avg(prom_irate(j_range,i)) * Float64(100)] REDACTED +|_|_|_RepartitionExec: partitioning=REDACTED +|_|_|_SortPreservingMergeExec: [k@0 ASC NULLS LAST, j@1 ASC NULLS LAST] REDACTED +|_|_|_SortExec: expr=[k@0 ASC NULLS LAST, j@1 ASC NULLS LAST], preserve_partitioning=[true] REDACTED +|_|_|_AggregateExec: mode=FinalPartitioned, gby=[k@0 as k, j@1 as j], aggr=[avg(prom_irate(j_range,i))] REDACTED +|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED +|_|_|_RepartitionExec: partitioning=REDACTED +|_|_|_AggregateExec: mode=Partial, gby=[k@2 as k, j@0 as j], aggr=[avg(prom_irate(j_range,i))] REDACTED +|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED +|_|_|_FilterExec: prom_irate(j_range,i)@1 IS NOT NULL REDACTED +|_|_|_ProjectionExec: expr=[j@1 as j, prom_irate(j_range@3, i@0) as prom_irate(j_range,i), k@2 as k] REDACTED +|_|_|_PromRangeManipulateExec: req range=[0..10000], interval=[1000], eval range=[60000], time index=[j] REDACTED +|_|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [true] REDACTED +|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED +|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED +|_|_|_| +|_|_| Total rows: 0_| ++-+-+-+ + +drop table t; + +Affected Rows: 0 + +-- partition on tag +create table t ( + i double, + j timestamp time index, + k string, + l string, + primary key (k, l) +) partition on columns (k, l) (k < 'a', k >= 'a'); + +Affected Rows: 0 + +-- SQLNESS REPLACE (metrics.*) REDACTED +-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED +-- SQLNESS REPLACE (-+) - +-- SQLNESS REPLACE (\s\s+) _ +-- SQLNESS REPLACE (peers.*) REDACTED +-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE (Hash.*) REDACTED +tql analyze (0, 10, '1s') 100 - (avg by (k) (irate(t[1m])) * 100); + ++-+-+-+ +| stage | node | plan_| ++-+-+-+ +| 0_| 0_|_ProjectionExec: expr=[k@0 as k, j@1 as j, 100 - avg(prom_irate(j_range,i))@2 * 100 as Float64(100) - avg(prom_irate(j_range,i)) * Float64(100)] REDACTED +|_|_|_RepartitionExec: partitioning=REDACTED +|_|_|_SortPreservingMergeExec: [k@0 ASC NULLS LAST, j@1 ASC NULLS LAST] REDACTED +|_|_|_SortExec: expr=[k@0 ASC NULLS LAST, j@1 ASC NULLS LAST], preserve_partitioning=[true] REDACTED +|_|_|_AggregateExec: mode=FinalPartitioned, gby=[k@0 as k, j@1 as j], aggr=[avg(prom_irate(j_range,i))], ordering_mode=PartiallySorted([0]) REDACTED +|_|_|_SortExec: expr=[k@0 ASC NULLS LAST], preserve_partitioning=[true] REDACTED +|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED +|_|_|_RepartitionExec: partitioning=REDACTED +|_|_|_AggregateExec: mode=Partial, gby=[k@2 as k, j@0 as j], aggr=[avg(prom_irate(j_range,i))], ordering_mode=PartiallySorted([0]) REDACTED +|_|_|_ProjectionExec: expr=[j@0 as j, prom_irate(j_range,i)@1 as prom_irate(j_range,i), k@2 as k] REDACTED +|_|_|_SortExec: expr=[k@2 ASC, l@3 ASC, j@0 ASC], preserve_partitioning=[true] REDACTED +|_|_|_MergeScanExec: REDACTED +|_|_|_| +| 1_| 0_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED +|_|_|_FilterExec: prom_irate(j_range,i)@1 IS NOT NULL REDACTED +|_|_|_ProjectionExec: expr=[j@1 as j, prom_irate(j_range@4, i@0) as prom_irate(j_range,i), k@2 as k, l@3 as l] REDACTED +|_|_|_PromRangeManipulateExec: req range=[0..10000], interval=[1000], eval range=[60000], time index=[j] REDACTED +|_|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [true] REDACTED +|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED +|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED +|_|_|_| +| 1_| 1_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED +|_|_|_FilterExec: prom_irate(j_range,i)@1 IS NOT NULL REDACTED +|_|_|_ProjectionExec: expr=[j@1 as j, prom_irate(j_range@4, i@0) as prom_irate(j_range,i), k@2 as k, l@3 as l] REDACTED +|_|_|_PromRangeManipulateExec: req range=[0..10000], interval=[1000], eval range=[60000], time index=[j] REDACTED +|_|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [true] REDACTED +|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED +|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED +|_|_|_| +|_|_| Total rows: 0_| ++-+-+-+ + +drop table t; + +Affected Rows: 0 + +-- partition on value +create table t ( + i double, + j timestamp time index, + k string, + l string, + primary key (k, l) +) partition on columns (i) (i < 1.0, i >= 1.0); + +Affected Rows: 0 + +-- SQLNESS REPLACE (metrics.*) REDACTED +-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED +-- SQLNESS REPLACE (-+) - +-- SQLNESS REPLACE (\s\s+) _ +-- SQLNESS REPLACE (peers.*) REDACTED +-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE (Hash.*) REDACTED +tql analyze (0, 10, '1s') 100 - (avg by (k) (irate(t[1m])) * 100); + ++-+-+-+ +| stage | node | plan_| ++-+-+-+ +| 0_| 0_|_ProjectionExec: expr=[k@0 as k, j@1 as j, 100 - avg(prom_irate(j_range,i))@2 * 100 as Float64(100) - avg(prom_irate(j_range,i)) * Float64(100)] REDACTED +|_|_|_RepartitionExec: partitioning=REDACTED +|_|_|_SortPreservingMergeExec: [k@0 ASC NULLS LAST, j@1 ASC NULLS LAST] REDACTED +|_|_|_SortExec: expr=[k@0 ASC NULLS LAST, j@1 ASC NULLS LAST], preserve_partitioning=[true] REDACTED +|_|_|_AggregateExec: mode=FinalPartitioned, gby=[k@0 as k, j@1 as j], aggr=[avg(prom_irate(j_range,i))] REDACTED +|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED +|_|_|_RepartitionExec: partitioning=REDACTED +|_|_|_AggregateExec: mode=Partial, gby=[k@2 as k, j@0 as j], aggr=[avg(prom_irate(j_range,i))] REDACTED +|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED +|_|_|_FilterExec: prom_irate(j_range,i)@1 IS NOT NULL REDACTED +|_|_|_ProjectionExec: expr=[j@1 as j, prom_irate(j_range@4, i@0) as prom_irate(j_range,i), k@2 as k] REDACTED +|_|_|_PromRangeManipulateExec: req range=[0..10000], interval=[1000], eval range=[60000], time index=[j] REDACTED +|_|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [true] REDACTED +|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED +|_|_|_SortExec: expr=[k@2 ASC, l@3 ASC], preserve_partitioning=[true] REDACTED +|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED +|_|_|_RepartitionExec: partitioning=REDACTED +|_|_|_MergeScanExec: REDACTED +|_|_|_| +| 1_| 0_|_SortPreservingMergeExec: [k@2 ASC, l@3 ASC, j@1 ASC] REDACTED +|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED +|_|_|_| +| 1_| 1_|_SortPreservingMergeExec: [k@2 ASC, l@3 ASC, j@1 ASC] REDACTED +|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED +|_|_|_| +|_|_| Total rows: 0_| ++-+-+-+ + +drop table t; + +Affected Rows: 0 + diff --git a/tests/cases/standalone/common/tql/partition.sql b/tests/cases/standalone/common/tql/partition.sql new file mode 100644 index 0000000000..332caa4252 --- /dev/null +++ b/tests/cases/standalone/common/tql/partition.sql @@ -0,0 +1,57 @@ +-- no partition +create table t ( + i double, + j timestamp time index, + k string primary key +); + +-- SQLNESS REPLACE (metrics.*) REDACTED +-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED +-- SQLNESS REPLACE (-+) - +-- SQLNESS REPLACE (\s\s+) _ +-- SQLNESS REPLACE (peers.*) REDACTED +-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE (Hash.*) REDACTED +tql analyze (0, 10, '1s') 100 - (avg by (k) (irate(t[1m])) * 100); + +drop table t; + +-- partition on tag +create table t ( + i double, + j timestamp time index, + k string, + l string, + primary key (k, l) +) partition on columns (k, l) (k < 'a', k >= 'a'); + +-- SQLNESS REPLACE (metrics.*) REDACTED +-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED +-- SQLNESS REPLACE (-+) - +-- SQLNESS REPLACE (\s\s+) _ +-- SQLNESS REPLACE (peers.*) REDACTED +-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE (Hash.*) REDACTED +tql analyze (0, 10, '1s') 100 - (avg by (k) (irate(t[1m])) * 100); + +drop table t; + +-- partition on value +create table t ( + i double, + j timestamp time index, + k string, + l string, + primary key (k, l) +) partition on columns (i) (i < 1.0, i >= 1.0); + +-- SQLNESS REPLACE (metrics.*) REDACTED +-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED +-- SQLNESS REPLACE (-+) - +-- SQLNESS REPLACE (\s\s+) _ +-- SQLNESS REPLACE (peers.*) REDACTED +-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE (Hash.*) REDACTED +tql analyze (0, 10, '1s') 100 - (avg by (k) (irate(t[1m])) * 100); + +drop table t; diff --git a/tests/cases/standalone/optimizer/last_value.result b/tests/cases/standalone/optimizer/last_value.result index fe1bf50305..e6b6d0b047 100644 --- a/tests/cases/standalone/optimizer/last_value.result +++ b/tests/cases/standalone/optimizer/last_value.result @@ -46,7 +46,6 @@ explain analyze |_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED |_|_|_RepartitionExec: REDACTED |_|_|_AggregateExec: mode=Partial, gby=[host@1 as host], aggr=[last_value(t.host) ORDER BY [t.ts ASC NULLS LAST], last_value(t.not_pk) ORDER BY [t.ts ASC NULLS LAST], last_value(t.val) ORDER BY [t.ts ASC NULLS LAST]] REDACTED -|_|_|_RepartitionExec: REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), selector=LastRow REDACTED |_|_|_| |_|_| Total rows: 4_| diff --git a/tests/cases/standalone/optimizer/order_by.result b/tests/cases/standalone/optimizer/order_by.result index 51f19153e1..6bfd107bb6 100644 --- a/tests/cases/standalone/optimizer/order_by.result +++ b/tests/cases/standalone/optimizer/order_by.result @@ -91,6 +91,7 @@ SELECT i, t AS alias_ts FROM test_pk ORDER BY alias_ts DESC LIMIT 5; -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT i, t AS alias_ts FROM test_pk ORDER BY t DESC LIMIT 5; +-+-+-+ @@ -99,9 +100,9 @@ EXPLAIN ANALYZE SELECT i, t AS alias_ts FROM test_pk ORDER BY t DESC LIMIT 5; | 0_| 0_|_MergeScanExec: REDACTED |_|_|_| | 1_| 0_|_ProjectionExec: expr=[i@0 as i, alias_ts@1 as alias_ts] REDACTED -|_|_|_SortPreservingMergeExec: [t@2 DESC] REDACTED -|_|_|_WindowedSortExec: expr=t@2 DESC num_ranges=1 fetch=5 REDACTED -|_|_|_PartSortExec: expr=t@2 DESC num_ranges=1 limit=5 REDACTED +|_|_|_SortPreservingMergeExec: [t@2 DESC], fetch=5 REDACTED +|_|_|_WindowedSortExec: expr=t@2 DESC num_ranges=REDACTED fetch=5 REDACTED +|_|_|_PartSortExec: expr=t@2 DESC num_ranges=REDACTED limit=5 REDACTED |_|_|_ProjectionExec: expr=[i@0 as i, t@1 as alias_ts, t@1 as t] REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges) REDACTED |_|_|_| @@ -113,6 +114,7 @@ EXPLAIN ANALYZE SELECT i, t AS alias_ts FROM test_pk ORDER BY t DESC LIMIT 5; -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT i, t AS alias_ts FROM test_pk ORDER BY alias_ts DESC LIMIT 5; +-+-+-+ @@ -120,9 +122,9 @@ EXPLAIN ANALYZE SELECT i, t AS alias_ts FROM test_pk ORDER BY alias_ts DESC LIMI +-+-+-+ | 0_| 0_|_MergeScanExec: REDACTED |_|_|_| -| 1_| 0_|_SortPreservingMergeExec: [alias_ts@1 DESC] REDACTED -|_|_|_WindowedSortExec: expr=alias_ts@1 DESC num_ranges=1 fetch=5 REDACTED -|_|_|_PartSortExec: expr=alias_ts@1 DESC num_ranges=1 limit=5 REDACTED +| 1_| 0_|_SortPreservingMergeExec: [alias_ts@1 DESC], fetch=5 REDACTED +|_|_|_WindowedSortExec: expr=alias_ts@1 DESC num_ranges=REDACTED fetch=5 REDACTED +|_|_|_PartSortExec: expr=alias_ts@1 DESC num_ranges=REDACTED limit=5 REDACTED |_|_|_ProjectionExec: expr=[i@0 as i, t@1 as alias_ts] REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges) REDACTED |_|_|_| diff --git a/tests/cases/standalone/optimizer/order_by.sql b/tests/cases/standalone/optimizer/order_by.sql index 18802eeda8..f8cb7f6c9d 100644 --- a/tests/cases/standalone/optimizer/order_by.sql +++ b/tests/cases/standalone/optimizer/order_by.sql @@ -23,6 +23,7 @@ SELECT i, t AS alias_ts FROM test_pk ORDER BY alias_ts DESC LIMIT 5; -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT i, t AS alias_ts FROM test_pk ORDER BY t DESC LIMIT 5; -- SQLNESS REPLACE (-+) - @@ -30,6 +31,7 @@ EXPLAIN ANALYZE SELECT i, t AS alias_ts FROM test_pk ORDER BY t DESC LIMIT 5; -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT i, t AS alias_ts FROM test_pk ORDER BY alias_ts DESC LIMIT 5; DROP TABLE test_pk; diff --git a/tests/cases/standalone/optimizer/windowed_sort.result b/tests/cases/standalone/optimizer/windowed_sort.result index f28738a94d..71343655d0 100644 --- a/tests/cases/standalone/optimizer/windowed_sort.result +++ b/tests/cases/standalone/optimizer/windowed_sort.result @@ -69,7 +69,8 @@ ORDER BY +-+-+-+ | 0_| 0_|_MergeScanExec: REDACTED |_|_|_| -| 1_| 0_|_SortExec: expr=[collect_time@0 ASC NULLS LAST], preserve_partitioning=[false] REDACTED +| 1_| 0_|_SortPreservingMergeExec: [collect_time@0 ASC NULLS LAST] REDACTED +|_|_|_SortExec: expr=[collect_time@0 ASC NULLS LAST], preserve_partitioning=[true] REDACTED |_|_|_ProjectionExec: expr=[collect_time_utc@0 as collect_time, peak_current@1 as peak_current] REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges) REDACTED |_|_|_| @@ -116,7 +117,8 @@ ORDER BY +-+-+-+ | 0_| 0_|_MergeScanExec: REDACTED |_|_|_| -| 1_| 0_|_SortExec: expr=[collect_time_0@0 ASC NULLS LAST], preserve_partitioning=[false] REDACTED +| 1_| 0_|_SortPreservingMergeExec: [collect_time_0@0 ASC NULLS LAST] REDACTED +|_|_|_SortExec: expr=[collect_time_0@0 ASC NULLS LAST], preserve_partitioning=[true] REDACTED |_|_|_ProjectionExec: expr=[collect_time_utc@0 as collect_time_0, peak_current@1 as peak_current] REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges) REDACTED |_|_|_| @@ -151,6 +153,7 @@ ORDER BY -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT collect_time AS true_collect_time, collect_time_utc AS collect_time, @@ -166,8 +169,8 @@ ORDER BY | 0_| 0_|_MergeScanExec: REDACTED |_|_|_| | 1_| 0_|_SortPreservingMergeExec: [true_collect_time@0 DESC] REDACTED -|_|_|_WindowedSortExec: expr=true_collect_time@0 DESC num_ranges=1 REDACTED -|_|_|_PartSortExec: expr=true_collect_time@0 DESC num_ranges=1 REDACTED +|_|_|_WindowedSortExec: expr=true_collect_time@0 DESC num_ranges=REDACTED REDACTED +|_|_|_PartSortExec: expr=true_collect_time@0 DESC num_ranges=REDACTED REDACTED |_|_|_ProjectionExec: expr=[collect_time@0 as true_collect_time, collect_time_utc@1 as collect_time, peak_current@2 as peak_current] REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges) REDACTED |_|_|_| @@ -202,6 +205,7 @@ ORDER BY -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT collect_time_utc AS collect_time, collect_time AS true_collect_time, @@ -217,8 +221,8 @@ ORDER BY | 0_| 0_|_MergeScanExec: REDACTED |_|_|_| | 1_| 0_|_SortPreservingMergeExec: [true_collect_time@1 DESC] REDACTED -|_|_|_WindowedSortExec: expr=true_collect_time@1 DESC num_ranges=1 REDACTED -|_|_|_PartSortExec: expr=true_collect_time@1 DESC num_ranges=1 REDACTED +|_|_|_WindowedSortExec: expr=true_collect_time@1 DESC num_ranges=REDACTED REDACTED +|_|_|_PartSortExec: expr=true_collect_time@1 DESC num_ranges=REDACTED REDACTED |_|_|_ProjectionExec: expr=[collect_time_utc@1 as collect_time, collect_time@0 as true_collect_time, peak_current@2 as peak_current] REDACTED |_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges) REDACTED |_|_|_| diff --git a/tests/cases/standalone/optimizer/windowed_sort.sql b/tests/cases/standalone/optimizer/windowed_sort.sql index 72d3464af7..7573b71b91 100644 --- a/tests/cases/standalone/optimizer/windowed_sort.sql +++ b/tests/cases/standalone/optimizer/windowed_sort.sql @@ -84,6 +84,7 @@ ORDER BY -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT collect_time AS true_collect_time, collect_time_utc AS collect_time, @@ -108,6 +109,7 @@ ORDER BY -- SQLNESS REPLACE (peers.*) REDACTED -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +-- SQLNESS REPLACE num_ranges=\d+ num_ranges=REDACTED EXPLAIN ANALYZE SELECT collect_time_utc AS collect_time, collect_time AS true_collect_time, diff --git a/tests/cases/standalone/common/tql-explain-analyze/analyze.result b/tests/cases/standalone/tql-explain-analyze/analyze.result similarity index 61% rename from tests/cases/standalone/common/tql-explain-analyze/analyze.result rename to tests/cases/standalone/tql-explain-analyze/analyze.result index 63491df951..47d5810748 100644 --- a/tests/cases/standalone/common/tql-explain-analyze/analyze.result +++ b/tests/cases/standalone/tql-explain-analyze/analyze.result @@ -19,11 +19,11 @@ TQL ANALYZE (0, 10, '5s') test; +-+-+-+ | stage | node | plan_| +-+-+-+ -| 0_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED -|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED -|_|_|_MergeScanExec: REDACTED +| 0_| 0_|_MergeScanExec: REDACTED |_|_|_| -| 1_| 0_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED +| 1_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED +|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED +|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED |_|_|_| |_|_| Total rows: 4_| +-+-+-+ @@ -41,11 +41,11 @@ TQL ANALYZE (0, 10, '1s', '2s') test; +-+-+-+ | stage | node | plan_| +-+-+-+ -| 0_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[2000], interval=[1000], time index=[j] REDACTED -|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED -|_|_|_MergeScanExec: REDACTED +| 0_| 0_|_MergeScanExec: REDACTED |_|_|_| -| 1_| 0_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED +| 1_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[2000], interval=[1000], time index=[j] REDACTED +|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED +|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED |_|_|_| |_|_| Total rows: 4_| +-+-+-+ @@ -62,11 +62,11 @@ TQL ANALYZE ('1970-01-01T00:00:00'::timestamp, '1970-01-01T00:00:00'::timestamp +-+-+-+ | stage | node | plan_| +-+-+-+ -| 0_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED -|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED -|_|_|_MergeScanExec: REDACTED +| 0_| 0_|_MergeScanExec: REDACTED |_|_|_| -| 1_| 0_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED +| 1_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED +|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED +|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED |_|_|_| |_|_| Total rows: 4_| +-+-+-+ @@ -85,11 +85,11 @@ TQL ANALYZE VERBOSE (0, 10, '5s') test; +-+-+-+ | stage | node | plan_| +-+-+-+ -| 0_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED -|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED -|_|_|_MergeScanExec: REDACTED +| 0_| 0_|_MergeScanExec: REDACTED |_|_|_| -| 1_| 0_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries, projection=["i", "j", "k"], filters=[j >= TimestampMillisecond(-300000, None), j <= TimestampMillisecond(310000, None)], REDACTED +| 1_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED +|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED +|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries, projection=["i", "j", "k"], filters=[j >= TimestampMillisecond(-300000, None), j <= TimestampMillisecond(310000, None)], REDACTED |_|_|_| |_|_| Total rows: 4_| +-+-+-+ @@ -114,13 +114,17 @@ TQL ANALYZE (0, 10, '5s') test; +-+-+-+ | stage | node | plan_| +-+-+-+ -| 0_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED -|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED +| 0_| 0_|_SortPreservingMergeExec: [k@2 ASC, l@3 ASC, j@1 ASC] REDACTED +|_|_|_SortExec: expr=[k@2 ASC, l@3 ASC, j@1 ASC], preserve_partitioning=[true] REDACTED |_|_|_MergeScanExec: REDACTED |_|_|_| -| 1_| 0_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED +| 1_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED +|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED +|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED |_|_|_| -| 1_| 1_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED +| 1_| 1_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED +|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED +|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED |_|_|_| |_|_| Total rows: 0_| +-+-+-+ @@ -136,17 +140,25 @@ TQL ANALYZE (0, 10, '5s') rate(test[10s]); +-+-+-+ | stage | node | plan_| +-+-+-+ -| 0_| 0_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED +| 0_| 0_|_SortPreservingMergeExec: [k@2 ASC, l@3 ASC, j@0 ASC] REDACTED +|_|_|_SortExec: expr=[k@2 ASC, l@3 ASC, j@0 ASC], preserve_partitioning=[true] REDACTED +|_|_|_MergeScanExec: REDACTED +|_|_|_| +| 1_| 0_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED |_|_|_FilterExec: prom_rate(j_range,i,j,Int64(10000))@1 IS NOT NULL REDACTED |_|_|_ProjectionExec: expr=[j@1 as j, prom_rate(j_range@4, i@0, j@1, 10000) as prom_rate(j_range,i,j,Int64(10000)), k@2 as k, l@3 as l] REDACTED |_|_|_PromRangeManipulateExec: req range=[0..10000], interval=[5000], eval range=[10000], time index=[j] REDACTED |_|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [true] REDACTED |_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED -|_|_|_MergeScanExec: REDACTED +|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED |_|_|_| -| 1_| 0_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED -|_|_|_| -| 1_| 1_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED +| 1_| 1_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED +|_|_|_FilterExec: prom_rate(j_range,i,j,Int64(10000))@1 IS NOT NULL REDACTED +|_|_|_ProjectionExec: expr=[j@1 as j, prom_rate(j_range@4, i@0, j@1, 10000) as prom_rate(j_range,i,j,Int64(10000)), k@2 as k, l@3 as l] REDACTED +|_|_|_PromRangeManipulateExec: req range=[0..10000], interval=[5000], eval range=[10000], time index=[j] REDACTED +|_|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [true] REDACTED +|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED +|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED |_|_|_| |_|_| Total rows: 0_| +-+-+-+ diff --git a/tests/cases/standalone/tql-explain-analyze/analyze.sql b/tests/cases/standalone/tql-explain-analyze/analyze.sql new file mode 100644 index 0000000000..99de9a0a9d --- /dev/null +++ b/tests/cases/standalone/tql-explain-analyze/analyze.sql @@ -0,0 +1,66 @@ +CREATE TABLE test(i DOUBLE, j TIMESTAMP TIME INDEX, k STRING PRIMARY KEY); + +-- insert two points at 1ms and one point at 2ms +INSERT INTO test VALUES (1, 1, "a"), (1, 1, "b"), (2, 2, "a"); + +-- analyze at 0s, 5s and 10s. No point at 0s. +-- SQLNESS REPLACE (metrics.*) REDACTED +-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED +-- SQLNESS REPLACE (-+) - +-- SQLNESS REPLACE (\s\s+) _ +-- SQLNESS REPLACE (peers.*) REDACTED +-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +TQL ANALYZE (0, 10, '5s') test; + +-- 'lookback' parameter is not fully supported, the test has to be updated +-- analyze at 0s, 5s and 10s. No point at 0s. +-- SQLNESS REPLACE (metrics.*) REDACTED +-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED +-- SQLNESS REPLACE (-+) - +-- SQLNESS REPLACE (\s\s+) _ +-- SQLNESS REPLACE (peers.*) REDACTED +-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +TQL ANALYZE (0, 10, '1s', '2s') test; + +-- analyze at 0s, 5s and 10s. No point at 0s. +-- SQLNESS REPLACE (metrics.*) REDACTED +-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED +-- SQLNESS REPLACE (-+) - +-- SQLNESS REPLACE (\s\s+) _ +-- SQLNESS REPLACE (peers.*) REDACTED +-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +TQL ANALYZE ('1970-01-01T00:00:00'::timestamp, '1970-01-01T00:00:00'::timestamp + '10 seconds'::interval, '5s') test; + +-- analyze verbose at 0s, 5s and 10s. No point at 0s. +-- SQLNESS REPLACE (-+) - +-- SQLNESS REPLACE (\s\s+) _ +-- SQLNESS REPLACE (elapsed_compute.*) REDACTED +-- SQLNESS REPLACE (peers.*) REDACTED +-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED +-- SQLNESS REPLACE (metrics.*) REDACTED +-- SQLNESS REPLACE (Duration.*) REDACTED +-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +TQL ANALYZE VERBOSE (0, 10, '5s') test; + +DROP TABLE test; + +-- partition table +CREATE TABLE test(i DOUBLE, j TIMESTAMP TIME INDEX, k STRING, l STRING, PRIMARY KEY(k, l)) PARTITION ON COLUMNS (k) (k < 'a', k >= 'a'); + +-- SQLNESS REPLACE (metrics.*) REDACTED +-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED +-- SQLNESS REPLACE (-+) - +-- SQLNESS REPLACE (\s\s+) _ +-- SQLNESS REPLACE (peers.*) REDACTED +-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +TQL ANALYZE (0, 10, '5s') test; + +-- SQLNESS REPLACE (metrics.*) REDACTED +-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED +-- SQLNESS REPLACE (-+) - +-- SQLNESS REPLACE (\s\s+) _ +-- SQLNESS REPLACE (peers.*) REDACTED +-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED +TQL ANALYZE (0, 10, '5s') rate(test[10s]); + +drop table test;