mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-05-16 21:10:38 +00:00
chore: upgrade DataFusion family, again (#7578)
* chore: upgrade DataFusion family Signed-off-by: luofucong <luofc@foxmail.com> * chore: switch to released version of datafusion-pg-catalog --------- Signed-off-by: luofucong <luofc@foxmail.com> Co-authored-by: Ning Sun <sunning@greptime.com> Co-authored-by: Ning Sun <sunng@protonmail.com>
This commit is contained in:
@@ -10,7 +10,7 @@ workspace = true
|
||||
[dependencies]
|
||||
arrow.workspace = true
|
||||
arrow-schema.workspace = true
|
||||
async-compression = { version = "0.3", features = [
|
||||
async-compression = { version = "0.4", features = [
|
||||
"bzip2",
|
||||
"gzip",
|
||||
"xz",
|
||||
|
||||
@@ -203,8 +203,8 @@ pub enum Error {
|
||||
error: parquet::errors::ParquetError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build file stream"))]
|
||||
BuildFileStream {
|
||||
#[snafu(transparent)]
|
||||
DataFusion {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
@@ -241,8 +241,9 @@ impl ErrorExt for Error {
|
||||
| ReadRecordBatch { .. }
|
||||
| WriteRecordBatch { .. }
|
||||
| EncodeRecordBatch { .. }
|
||||
| BuildFileStream { .. }
|
||||
| OrcReader { .. } => StatusCode::Unexpected,
|
||||
|
||||
DataFusion { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -313,16 +313,13 @@ pub async fn file_to_stream(
|
||||
filename.to_string(),
|
||||
0,
|
||||
)]))
|
||||
.with_projection_indices(projection)
|
||||
.with_projection_indices(projection)?
|
||||
.with_file_compression_type(df_compression)
|
||||
.build();
|
||||
|
||||
let store = Arc::new(OpendalStore::new(store.clone()));
|
||||
let file_opener = file_source
|
||||
.with_projection(&config)
|
||||
.create_file_opener(store, &config, 0);
|
||||
let stream = FileStream::new(&config, 0, file_opener, &ExecutionPlanMetricsSet::new())
|
||||
.context(error::BuildFileStreamSnafu)?;
|
||||
let file_opener = config.file_source().create_file_opener(store, &config, 0)?;
|
||||
let stream = FileStream::new(&config, 0, file_opener, &ExecutionPlanMetricsSet::new())?;
|
||||
|
||||
Ok(Box::pin(stream))
|
||||
}
|
||||
|
||||
@@ -46,7 +46,10 @@ struct Test<'a> {
|
||||
impl Test<'_> {
|
||||
async fn run(self, store: &ObjectStore) {
|
||||
let store = Arc::new(object_store_opendal::OpendalStore::new(store.clone()));
|
||||
let file_opener = self.file_source.create_file_opener(store, &self.config, 0);
|
||||
let file_opener = self
|
||||
.file_source
|
||||
.create_file_opener(store, &self.config, 0)
|
||||
.unwrap();
|
||||
|
||||
let result = FileStream::new(
|
||||
&self.config,
|
||||
|
||||
@@ -104,6 +104,7 @@ pub async fn setup_stream_to_json_test(origin_path: &str, threshold: impl Fn(usi
|
||||
schema.clone(),
|
||||
FileCompressionType::UNCOMPRESSED,
|
||||
Arc::new(object_store_opendal::OpendalStore::new(store.clone())),
|
||||
true,
|
||||
);
|
||||
|
||||
let size = store.read(origin_path).await.unwrap().len();
|
||||
@@ -154,11 +155,13 @@ pub async fn setup_stream_to_csv_test(
|
||||
let config = scan_config(None, origin_path, csv_source.clone());
|
||||
let size = store.read(origin_path).await.unwrap().len();
|
||||
|
||||
let csv_opener = csv_source.create_file_opener(
|
||||
Arc::new(object_store_opendal::OpendalStore::new(store.clone())),
|
||||
&config,
|
||||
0,
|
||||
);
|
||||
let csv_opener = csv_source
|
||||
.create_file_opener(
|
||||
Arc::new(object_store_opendal::OpendalStore::new(store.clone())),
|
||||
&config,
|
||||
0,
|
||||
)
|
||||
.unwrap();
|
||||
let stream = FileStream::new(&config, 0, csv_opener, &ExecutionPlanMetricsSet::new()).unwrap();
|
||||
|
||||
let (tmp_store, dir) = test_tmp_store("test_stream_to_csv");
|
||||
|
||||
@@ -61,18 +61,18 @@ use crate::scalars::uddsketch_calc::UddSketchCalcFunction;
|
||||
pub struct MockInputExec {
|
||||
input: Vec<RecordBatch>,
|
||||
schema: SchemaRef,
|
||||
properties: PlanProperties,
|
||||
properties: Arc<PlanProperties>,
|
||||
}
|
||||
|
||||
impl MockInputExec {
|
||||
pub fn new(input: Vec<RecordBatch>, schema: SchemaRef) -> Self {
|
||||
Self {
|
||||
properties: PlanProperties::new(
|
||||
properties: Arc::new(PlanProperties::new(
|
||||
EquivalenceProperties::new(schema.clone()),
|
||||
Partitioning::UnknownPartitioning(1),
|
||||
EmissionType::Incremental,
|
||||
Boundedness::Bounded,
|
||||
),
|
||||
)),
|
||||
input,
|
||||
schema,
|
||||
}
|
||||
@@ -94,7 +94,7 @@ impl ExecutionPlan for MockInputExec {
|
||||
self
|
||||
}
|
||||
|
||||
fn properties(&self) -> &PlanProperties {
|
||||
fn properties(&self) -> &Arc<PlanProperties> {
|
||||
&self.properties
|
||||
}
|
||||
|
||||
|
||||
@@ -428,7 +428,7 @@ impl Accumulator for CountHashAccumulator {
|
||||
&self.random_state,
|
||||
&mut self.batch_hashes,
|
||||
)?;
|
||||
for hash in hashes.as_slice() {
|
||||
for hash in hashes {
|
||||
self.values.insert(*hash);
|
||||
}
|
||||
Ok(())
|
||||
|
||||
@@ -48,7 +48,7 @@ pub fn rename_logical_plan_columns(
|
||||
plan.schema().qualified_field_from_column(&old_column)?;
|
||||
|
||||
for (qualifier, field) in plan.schema().iter() {
|
||||
if qualifier.eq(&qualifier_rename) && field.as_ref() == field_rename {
|
||||
if qualifier.eq(&qualifier_rename) && field == field_rename {
|
||||
projection.push(col(Column::from((qualifier, field))).alias(new_name));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ pub struct StreamScanAdapter {
|
||||
stream: Mutex<Option<SendableRecordBatchStream>>,
|
||||
schema: SchemaRef,
|
||||
arrow_schema: ArrowSchemaRef,
|
||||
properties: PlanProperties,
|
||||
properties: Arc<PlanProperties>,
|
||||
output_ordering: Option<Vec<PhysicalSortExpr>>,
|
||||
}
|
||||
|
||||
@@ -49,12 +49,12 @@ impl StreamScanAdapter {
|
||||
pub fn new(stream: SendableRecordBatchStream) -> Self {
|
||||
let schema = stream.schema();
|
||||
let arrow_schema = schema.arrow_schema().clone();
|
||||
let properties = PlanProperties::new(
|
||||
let properties = Arc::new(PlanProperties::new(
|
||||
EquivalenceProperties::new(arrow_schema.clone()),
|
||||
Partitioning::UnknownPartitioning(1),
|
||||
EmissionType::Incremental,
|
||||
Boundedness::Bounded,
|
||||
);
|
||||
));
|
||||
|
||||
Self {
|
||||
stream: Mutex::new(Some(stream)),
|
||||
@@ -91,7 +91,7 @@ impl ExecutionPlan for StreamScanAdapter {
|
||||
self.arrow_schema.clone()
|
||||
}
|
||||
|
||||
fn properties(&self) -> &PlanProperties {
|
||||
fn properties(&self) -> &Arc<PlanProperties> {
|
||||
&self.properties
|
||||
}
|
||||
|
||||
|
||||
@@ -1229,7 +1229,8 @@ impl TryFrom<ScalarValue> for Value {
|
||||
| ScalarValue::Utf8View(_)
|
||||
| ScalarValue::BinaryView(_)
|
||||
| ScalarValue::Map(_)
|
||||
| ScalarValue::Date64(_) => {
|
||||
| ScalarValue::Date64(_)
|
||||
| ScalarValue::RunEndEncoded(_, _, _) => {
|
||||
return error::UnsupportedArrowTypeSnafu {
|
||||
arrow_type: v.data_type(),
|
||||
}
|
||||
|
||||
@@ -256,7 +256,8 @@ impl Helper {
|
||||
| ScalarValue::Utf8View(_)
|
||||
| ScalarValue::BinaryView(_)
|
||||
| ScalarValue::Map(_)
|
||||
| ScalarValue::Date64(_) => {
|
||||
| ScalarValue::Date64(_)
|
||||
| ScalarValue::RunEndEncoded(_, _, _) => {
|
||||
return error::ConversionSnafu {
|
||||
from: format!("Unsupported scalar value: {value}"),
|
||||
}
|
||||
|
||||
@@ -178,8 +178,8 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to extract column from filter"))]
|
||||
ExtractColumnFromFilter {
|
||||
#[snafu(transparent)]
|
||||
DataFusion {
|
||||
#[snafu(source)]
|
||||
error: DataFusionError,
|
||||
#[snafu(implicit)]
|
||||
@@ -225,8 +225,9 @@ impl ErrorExt for Error {
|
||||
| ManifestExists { .. }
|
||||
| BuildStream { .. }
|
||||
| ParquetScanPlan { .. }
|
||||
| UnexpectedEngine { .. }
|
||||
| ExtractColumnFromFilter { .. } => StatusCode::Unexpected,
|
||||
| UnexpectedEngine { .. } => StatusCode::Unexpected,
|
||||
|
||||
DataFusion { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -37,10 +37,7 @@ use snafu::{GenerateImplicitData, ResultExt, ensure};
|
||||
use store_api::storage::ScanRequest;
|
||||
|
||||
use self::file_stream::ScanPlanConfig;
|
||||
use crate::error::{
|
||||
BuildBackendSnafu, ExtractColumnFromFilterSnafu, ProjectSchemaSnafu,
|
||||
ProjectionOutOfBoundsSnafu, Result,
|
||||
};
|
||||
use crate::error::{BuildBackendSnafu, ProjectSchemaSnafu, ProjectionOutOfBoundsSnafu, Result};
|
||||
use crate::region::FileRegion;
|
||||
|
||||
impl FileRegion {
|
||||
@@ -126,8 +123,7 @@ impl FileRegion {
|
||||
|
||||
let mut aux_column_set = HashSet::new();
|
||||
for scan_filter in scan_filters {
|
||||
df_logical_expr_utils::expr_to_columns(scan_filter, &mut aux_column_set)
|
||||
.context(ExtractColumnFromFilterSnafu)?;
|
||||
df_logical_expr_utils::expr_to_columns(scan_filter, &mut aux_column_set)?;
|
||||
|
||||
let all_file_columns = aux_column_set
|
||||
.iter()
|
||||
|
||||
@@ -56,7 +56,7 @@ fn build_record_batch_stream(
|
||||
|
||||
let config =
|
||||
FileScanConfigBuilder::new(ObjectStoreUrl::local_filesystem(), file_source.clone())
|
||||
.with_projection_indices(scan_plan_config.projection.cloned())
|
||||
.with_projection_indices(scan_plan_config.projection.cloned())?
|
||||
.with_limit(limit)
|
||||
.with_file_group(FileGroup::new(files))
|
||||
.build();
|
||||
@@ -65,7 +65,7 @@ fn build_record_batch_stream(
|
||||
scan_plan_config.store.clone(),
|
||||
));
|
||||
|
||||
let file_opener = file_source.create_file_opener(store, &config, 0);
|
||||
let file_opener = config.file_source().create_file_opener(store, &config, 0)?;
|
||||
let stream = FileStream::new(
|
||||
&config,
|
||||
0, // partition: hard-code
|
||||
@@ -146,7 +146,7 @@ fn new_parquet_stream_with_exec_plan(
|
||||
let file_scan_config =
|
||||
FileScanConfigBuilder::new(ObjectStoreUrl::local_filesystem(), Arc::new(parquet_source))
|
||||
.with_file_group(file_group)
|
||||
.with_projection_indices(projection.cloned())
|
||||
.with_projection_indices(projection.cloned())?
|
||||
.with_limit(*limit)
|
||||
.build();
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ use datatypes::arrow::error::ArrowError;
|
||||
use parquet::arrow::arrow_reader::{ParquetRecordBatchReader, RowGroups, RowSelection};
|
||||
use parquet::arrow::{FieldLevels, ProjectionMask, parquet_to_arrow_field_levels};
|
||||
use parquet::column::page::{PageIterator, PageReader};
|
||||
use parquet::file::metadata::ParquetMetaData;
|
||||
use parquet::file::metadata::{ParquetMetaData, RowGroupMetaData};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error;
|
||||
@@ -103,6 +103,14 @@ impl RowGroups for MemtableRowGroupPageFetcher<'_> {
|
||||
reader: Some(self.column_page_reader(i)),
|
||||
}))
|
||||
}
|
||||
|
||||
fn row_groups(&self) -> Box<dyn Iterator<Item = &RowGroupMetaData> + '_> {
|
||||
Box::new(std::iter::once(self.base.row_group_metadata()))
|
||||
}
|
||||
|
||||
fn metadata(&self) -> &ParquetMetaData {
|
||||
self.base.parquet_metadata()
|
||||
}
|
||||
}
|
||||
|
||||
impl RowGroupReaderContext for BulkIterContextRef {
|
||||
|
||||
@@ -205,7 +205,8 @@ impl ParquetFetchMetrics {
|
||||
}
|
||||
|
||||
pub(crate) struct RowGroupBase<'a> {
|
||||
metadata: &'a RowGroupMetaData,
|
||||
parquet_metadata: &'a ParquetMetaData,
|
||||
row_group_idx: usize,
|
||||
pub(crate) offset_index: Option<&'a [OffsetIndexMetaData]>,
|
||||
/// Compressed page of each column.
|
||||
column_chunks: Vec<Option<Arc<ColumnChunkData>>>,
|
||||
@@ -225,7 +226,8 @@ impl<'a> RowGroupBase<'a> {
|
||||
.map(|x| x[row_group_idx].as_slice());
|
||||
|
||||
Self {
|
||||
metadata,
|
||||
parquet_metadata: parquet_meta,
|
||||
row_group_idx,
|
||||
offset_index,
|
||||
column_chunks: vec![None; metadata.columns().len()],
|
||||
row_count: metadata.num_rows() as usize,
|
||||
@@ -244,7 +246,7 @@ impl<'a> RowGroupBase<'a> {
|
||||
let ranges = self
|
||||
.column_chunks
|
||||
.iter()
|
||||
.zip(self.metadata.columns())
|
||||
.zip(self.row_group_metadata().columns())
|
||||
.enumerate()
|
||||
.filter(|&(idx, (chunk, _chunk_meta))| chunk.is_none() && projection.leaf_included(idx))
|
||||
.flat_map(|(idx, (_chunk, chunk_meta))| {
|
||||
@@ -293,8 +295,12 @@ impl<'a> RowGroupBase<'a> {
|
||||
chunks.push(chunk_data.next().unwrap());
|
||||
}
|
||||
|
||||
let column = self
|
||||
.parquet_metadata
|
||||
.row_group(self.row_group_idx)
|
||||
.column(idx);
|
||||
*chunk = Some(Arc::new(ColumnChunkData::Sparse {
|
||||
length: self.metadata.column(idx).byte_range().1 as usize,
|
||||
length: column.byte_range().1 as usize,
|
||||
data: offsets.into_iter().zip(chunks).collect(),
|
||||
}))
|
||||
}
|
||||
@@ -307,7 +313,7 @@ impl<'a> RowGroupBase<'a> {
|
||||
.enumerate()
|
||||
.filter(|&(idx, chunk)| chunk.is_none() && projection.leaf_included(idx))
|
||||
.map(|(idx, _chunk)| {
|
||||
let column = self.metadata.column(idx);
|
||||
let column = self.row_group_metadata().column(idx);
|
||||
let (start, length) = column.byte_range();
|
||||
start..(start + length)
|
||||
})
|
||||
@@ -333,7 +339,10 @@ impl<'a> RowGroupBase<'a> {
|
||||
continue;
|
||||
};
|
||||
|
||||
let column = self.metadata.column(idx);
|
||||
let column = self
|
||||
.parquet_metadata
|
||||
.row_group(self.row_group_idx)
|
||||
.column(idx);
|
||||
*chunk = Some(Arc::new(ColumnChunkData::Dense {
|
||||
offset: column.byte_range().0 as usize,
|
||||
data,
|
||||
@@ -360,7 +369,7 @@ impl<'a> RowGroupBase<'a> {
|
||||
.map(|index| index[col_idx].page_locations.clone());
|
||||
SerializedPageReader::new(
|
||||
data.clone(),
|
||||
self.metadata.column(col_idx),
|
||||
self.row_group_metadata().column(col_idx),
|
||||
self.row_count,
|
||||
page_locations,
|
||||
)?
|
||||
@@ -369,6 +378,14 @@ impl<'a> RowGroupBase<'a> {
|
||||
|
||||
Ok(page_reader)
|
||||
}
|
||||
|
||||
pub(crate) fn parquet_metadata(&self) -> &ParquetMetaData {
|
||||
self.parquet_metadata
|
||||
}
|
||||
|
||||
pub(crate) fn row_group_metadata(&self) -> &RowGroupMetaData {
|
||||
self.parquet_metadata().row_group(self.row_group_idx)
|
||||
}
|
||||
}
|
||||
|
||||
/// An in-memory collection of column chunks
|
||||
@@ -599,6 +616,14 @@ impl RowGroups for InMemoryRowGroup<'_> {
|
||||
reader: Some(Ok(Box::new(page_reader))),
|
||||
}))
|
||||
}
|
||||
|
||||
fn row_groups(&self) -> Box<dyn Iterator<Item = &RowGroupMetaData> + '_> {
|
||||
Box::new(std::iter::once(self.base.row_group_metadata()))
|
||||
}
|
||||
|
||||
fn metadata(&self) -> &ParquetMetaData {
|
||||
self.base.parquet_metadata()
|
||||
}
|
||||
}
|
||||
|
||||
/// An in-memory column chunk
|
||||
|
||||
@@ -550,15 +550,10 @@ fn find_primary_keys(
|
||||
let columns_pk = columns
|
||||
.iter()
|
||||
.filter_map(|x| {
|
||||
if x.options().iter().any(|o| {
|
||||
matches!(
|
||||
o.option,
|
||||
ColumnOption::Unique {
|
||||
is_primary: true,
|
||||
..
|
||||
}
|
||||
)
|
||||
}) {
|
||||
if x.options()
|
||||
.iter()
|
||||
.any(|o| matches!(o.option, ColumnOption::PrimaryKey(_)))
|
||||
{
|
||||
Some(x.name().value.clone())
|
||||
} else {
|
||||
None
|
||||
|
||||
@@ -238,12 +238,12 @@ impl Absent {
|
||||
|
||||
pub fn to_execution_plan(&self, exec_input: Arc<dyn ExecutionPlan>) -> Arc<dyn ExecutionPlan> {
|
||||
let output_schema = Arc::new(self.output_schema.as_arrow().clone());
|
||||
let properties = PlanProperties::new(
|
||||
let properties = Arc::new(PlanProperties::new(
|
||||
EquivalenceProperties::new(output_schema.clone()),
|
||||
Partitioning::UnknownPartitioning(1),
|
||||
EmissionType::Incremental,
|
||||
Boundedness::Bounded,
|
||||
);
|
||||
));
|
||||
Arc::new(AbsentExec {
|
||||
start: self.start,
|
||||
end: self.end,
|
||||
@@ -323,7 +323,7 @@ pub struct AbsentExec {
|
||||
fake_labels: Vec<(String, String)>,
|
||||
output_schema: SchemaRef,
|
||||
input: Arc<dyn ExecutionPlan>,
|
||||
properties: PlanProperties,
|
||||
properties: Arc<PlanProperties>,
|
||||
metric: ExecutionPlanMetricsSet,
|
||||
}
|
||||
|
||||
@@ -336,7 +336,7 @@ impl ExecutionPlan for AbsentExec {
|
||||
self.output_schema.clone()
|
||||
}
|
||||
|
||||
fn properties(&self) -> &PlanProperties {
|
||||
fn properties(&self) -> &Arc<PlanProperties> {
|
||||
&self.properties
|
||||
}
|
||||
|
||||
@@ -624,12 +624,12 @@ mod tests {
|
||||
fake_labels: vec![],
|
||||
output_schema: output_schema.clone(),
|
||||
input: Arc::new(memory_exec),
|
||||
properties: PlanProperties::new(
|
||||
properties: Arc::new(PlanProperties::new(
|
||||
EquivalenceProperties::new(output_schema.clone()),
|
||||
Partitioning::UnknownPartitioning(1),
|
||||
EmissionType::Incremental,
|
||||
Boundedness::Bounded,
|
||||
),
|
||||
)),
|
||||
metric: ExecutionPlanMetricsSet::new(),
|
||||
};
|
||||
|
||||
@@ -692,12 +692,12 @@ mod tests {
|
||||
fake_labels: vec![],
|
||||
output_schema: output_schema.clone(),
|
||||
input: Arc::new(memory_exec),
|
||||
properties: PlanProperties::new(
|
||||
properties: Arc::new(PlanProperties::new(
|
||||
EquivalenceProperties::new(output_schema.clone()),
|
||||
Partitioning::UnknownPartitioning(1),
|
||||
EmissionType::Incremental,
|
||||
Boundedness::Bounded,
|
||||
),
|
||||
)),
|
||||
metric: ExecutionPlanMetricsSet::new(),
|
||||
};
|
||||
|
||||
|
||||
@@ -81,7 +81,7 @@ impl EmptyMetric {
|
||||
) -> DataFusionResult<Self> {
|
||||
let qualifier = Some(TableReference::bare(""));
|
||||
let ts_only_schema = build_ts_only_schema(&time_index_column_name);
|
||||
let mut fields = vec![(qualifier.clone(), Arc::new(ts_only_schema.field(0).clone()))];
|
||||
let mut fields = vec![(qualifier.clone(), ts_only_schema.field(0).clone())];
|
||||
if let Some(field_expr) = &field_expr {
|
||||
let field_data_type = field_expr.get_type(&ts_only_schema)?;
|
||||
fields.push((
|
||||
@@ -232,8 +232,8 @@ impl ExecutionPlan for EmptyMetricExec {
|
||||
self.result_schema.clone()
|
||||
}
|
||||
|
||||
fn properties(&self) -> &PlanProperties {
|
||||
self.properties.as_ref()
|
||||
fn properties(&self) -> &Arc<PlanProperties> {
|
||||
&self.properties
|
||||
}
|
||||
|
||||
fn maintains_input_order(&self) -> Vec<bool> {
|
||||
|
||||
@@ -296,7 +296,7 @@ impl HistogramFold {
|
||||
)) as _);
|
||||
|
||||
let output_schema: SchemaRef = self.output_schema.inner().clone();
|
||||
let properties = PlanProperties::new(
|
||||
let properties = Arc::new(PlanProperties::new(
|
||||
EquivalenceProperties::new(output_schema.clone()),
|
||||
Partitioning::Hash(
|
||||
partition_exprs.clone(),
|
||||
@@ -304,7 +304,7 @@ impl HistogramFold {
|
||||
),
|
||||
EmissionType::Incremental,
|
||||
Boundedness::Bounded,
|
||||
);
|
||||
));
|
||||
Arc::new(HistogramFoldExec {
|
||||
le_column_index,
|
||||
field_column_index,
|
||||
@@ -416,7 +416,7 @@ pub struct HistogramFoldExec {
|
||||
partition_exprs: Vec<Arc<dyn PhysicalExpr>>,
|
||||
quantile: f64,
|
||||
metric: ExecutionPlanMetricsSet,
|
||||
properties: PlanProperties,
|
||||
properties: Arc<PlanProperties>,
|
||||
}
|
||||
|
||||
impl ExecutionPlan for HistogramFoldExec {
|
||||
@@ -424,7 +424,7 @@ impl ExecutionPlan for HistogramFoldExec {
|
||||
self
|
||||
}
|
||||
|
||||
fn properties(&self) -> &PlanProperties {
|
||||
fn properties(&self) -> &Arc<PlanProperties> {
|
||||
&self.properties
|
||||
}
|
||||
|
||||
@@ -486,7 +486,7 @@ impl ExecutionPlan for HistogramFoldExec {
|
||||
) -> DataFusionResult<Arc<dyn ExecutionPlan>> {
|
||||
assert!(!children.is_empty());
|
||||
let new_input = children[0].clone();
|
||||
let properties = PlanProperties::new(
|
||||
let properties = Arc::new(PlanProperties::new(
|
||||
EquivalenceProperties::new(self.output_schema.clone()),
|
||||
Partitioning::Hash(
|
||||
self.partition_exprs.clone(),
|
||||
@@ -494,7 +494,7 @@ impl ExecutionPlan for HistogramFoldExec {
|
||||
),
|
||||
EmissionType::Incremental,
|
||||
Boundedness::Bounded,
|
||||
);
|
||||
));
|
||||
Ok(Arc::new(Self {
|
||||
input: new_input,
|
||||
metric: self.metric.clone(),
|
||||
@@ -1241,7 +1241,7 @@ mod test {
|
||||
type PlanPropsResult = (
|
||||
Vec<Arc<dyn PhysicalExpr>>,
|
||||
Vec<Arc<dyn PhysicalExpr>>,
|
||||
PlanProperties,
|
||||
Arc<PlanProperties>,
|
||||
);
|
||||
|
||||
fn build_test_plan_properties(
|
||||
@@ -1282,7 +1282,7 @@ mod test {
|
||||
Boundedness::Bounded,
|
||||
);
|
||||
|
||||
(tag_columns, partition_exprs, properties)
|
||||
(tag_columns, partition_exprs, Arc::new(properties))
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -293,7 +293,7 @@ impl ExecutionPlan for InstantManipulateExec {
|
||||
self.input.schema()
|
||||
}
|
||||
|
||||
fn properties(&self) -> &PlanProperties {
|
||||
fn properties(&self) -> &Arc<PlanProperties> {
|
||||
self.input.properties()
|
||||
}
|
||||
|
||||
|
||||
@@ -273,6 +273,10 @@ impl ExecutionPlan for SeriesNormalizeExec {
|
||||
}
|
||||
|
||||
fn required_input_distribution(&self) -> Vec<Distribution> {
|
||||
if self.tag_columns.is_empty() {
|
||||
return vec![Distribution::SinglePartition];
|
||||
}
|
||||
|
||||
let schema = self.input.schema();
|
||||
vec![Distribution::HashPartitioned(
|
||||
self.tag_columns
|
||||
@@ -283,7 +287,7 @@ impl ExecutionPlan for SeriesNormalizeExec {
|
||||
)]
|
||||
}
|
||||
|
||||
fn properties(&self) -> &PlanProperties {
|
||||
fn properties(&self) -> &Arc<PlanProperties> {
|
||||
self.input.properties()
|
||||
}
|
||||
|
||||
|
||||
@@ -129,7 +129,7 @@ impl RangeManipulate {
|
||||
let mut new_columns = Vec::with_capacity(columns.len() + 1);
|
||||
for i in 0..columns.len() {
|
||||
let x = input_schema.qualified_field(i);
|
||||
new_columns.push((x.0.cloned(), Arc::new(x.1.clone())));
|
||||
new_columns.push((x.0.cloned(), x.1.clone()));
|
||||
}
|
||||
|
||||
// process time index column
|
||||
@@ -170,12 +170,12 @@ impl RangeManipulate {
|
||||
pub fn to_execution_plan(&self, exec_input: Arc<dyn ExecutionPlan>) -> Arc<dyn ExecutionPlan> {
|
||||
let output_schema: SchemaRef = self.output_schema.inner().clone();
|
||||
let properties = exec_input.properties();
|
||||
let properties = PlanProperties::new(
|
||||
let properties = Arc::new(PlanProperties::new(
|
||||
EquivalenceProperties::new(output_schema.clone()),
|
||||
properties.partitioning.clone(),
|
||||
properties.emission_type,
|
||||
properties.boundedness,
|
||||
);
|
||||
));
|
||||
Arc::new(RangeManipulateExec {
|
||||
start: self.start,
|
||||
end: self.end,
|
||||
@@ -421,7 +421,7 @@ pub struct RangeManipulateExec {
|
||||
input: Arc<dyn ExecutionPlan>,
|
||||
output_schema: SchemaRef,
|
||||
metric: ExecutionPlanMetricsSet,
|
||||
properties: PlanProperties,
|
||||
properties: Arc<PlanProperties>,
|
||||
}
|
||||
|
||||
impl ExecutionPlan for RangeManipulateExec {
|
||||
@@ -433,7 +433,7 @@ impl ExecutionPlan for RangeManipulateExec {
|
||||
self.output_schema.clone()
|
||||
}
|
||||
|
||||
fn properties(&self) -> &PlanProperties {
|
||||
fn properties(&self) -> &Arc<PlanProperties> {
|
||||
&self.properties
|
||||
}
|
||||
|
||||
@@ -463,12 +463,12 @@ impl ExecutionPlan for RangeManipulateExec {
|
||||
assert!(!children.is_empty());
|
||||
let exec_input = children[0].clone();
|
||||
let properties = exec_input.properties();
|
||||
let properties = PlanProperties::new(
|
||||
let properties = Arc::new(PlanProperties::new(
|
||||
EquivalenceProperties::new(self.output_schema.clone()),
|
||||
properties.partitioning.clone(),
|
||||
properties.emission_type,
|
||||
properties.boundedness,
|
||||
);
|
||||
));
|
||||
Ok(Arc::new(Self {
|
||||
start: self.start,
|
||||
end: self.end,
|
||||
@@ -856,12 +856,12 @@ mod test {
|
||||
.as_arrow()
|
||||
.clone(),
|
||||
);
|
||||
let properties = PlanProperties::new(
|
||||
let properties = Arc::new(PlanProperties::new(
|
||||
EquivalenceProperties::new(manipulate_output_schema.clone()),
|
||||
Partitioning::UnknownPartitioning(1),
|
||||
EmissionType::Incremental,
|
||||
Boundedness::Bounded,
|
||||
);
|
||||
));
|
||||
let normalize_exec = Arc::new(RangeManipulateExec {
|
||||
start,
|
||||
end,
|
||||
|
||||
@@ -94,7 +94,7 @@ impl ScalarCalculate {
|
||||
let qualifier = table_name.map(TableReference::bare);
|
||||
let schema = DFSchema::new_with_metadata(
|
||||
vec![
|
||||
(qualifier.clone(), Arc::new(ts_field)),
|
||||
(qualifier.clone(), ts_field),
|
||||
(qualifier, Arc::new(val_field)),
|
||||
],
|
||||
input_schema.metadata().clone(),
|
||||
@@ -139,12 +139,12 @@ impl ScalarCalculate {
|
||||
.map_err(|e| DataFusionError::ArrowError(Box::new(e), None))?;
|
||||
let schema = Arc::new(Schema::new(fields));
|
||||
let properties = exec_input.properties();
|
||||
let properties = PlanProperties::new(
|
||||
let properties = Arc::new(PlanProperties::new(
|
||||
EquivalenceProperties::new(schema.clone()),
|
||||
Partitioning::UnknownPartitioning(1),
|
||||
properties.emission_type,
|
||||
properties.boundedness,
|
||||
);
|
||||
));
|
||||
Ok(Arc::new(ScalarCalculateExec {
|
||||
start: self.start,
|
||||
end: self.end,
|
||||
@@ -384,7 +384,7 @@ struct ScalarCalculateExec {
|
||||
input: Arc<dyn ExecutionPlan>,
|
||||
tag_columns: Vec<String>,
|
||||
metric: ExecutionPlanMetricsSet,
|
||||
properties: PlanProperties,
|
||||
properties: Arc<PlanProperties>,
|
||||
}
|
||||
|
||||
impl ExecutionPlan for ScalarCalculateExec {
|
||||
@@ -396,7 +396,7 @@ impl ExecutionPlan for ScalarCalculateExec {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn properties(&self) -> &PlanProperties {
|
||||
fn properties(&self) -> &Arc<PlanProperties> {
|
||||
&self.properties
|
||||
}
|
||||
|
||||
@@ -769,12 +769,12 @@ mod test {
|
||||
Field::new("ts", DataType::Timestamp(TimeUnit::Millisecond, None), true),
|
||||
Field::new("val", DataType::Float64, true),
|
||||
]));
|
||||
let properties = PlanProperties::new(
|
||||
let properties = Arc::new(PlanProperties::new(
|
||||
EquivalenceProperties::new(schema.clone()),
|
||||
Partitioning::UnknownPartitioning(1),
|
||||
EmissionType::Incremental,
|
||||
Boundedness::Bounded,
|
||||
);
|
||||
));
|
||||
let scalar_exec = Arc::new(ScalarCalculateExec {
|
||||
start: 0,
|
||||
end: 15_000,
|
||||
|
||||
@@ -389,11 +389,14 @@ impl ExecutionPlan for SeriesDivideExec {
|
||||
self.input.schema()
|
||||
}
|
||||
|
||||
fn properties(&self) -> &PlanProperties {
|
||||
fn properties(&self) -> &Arc<PlanProperties> {
|
||||
self.input.properties()
|
||||
}
|
||||
|
||||
fn required_input_distribution(&self) -> Vec<Distribution> {
|
||||
if self.tag_columns.is_empty() {
|
||||
return vec![Distribution::SinglePartition];
|
||||
}
|
||||
let schema = self.input.schema();
|
||||
vec![Distribution::HashPartitioned(
|
||||
self.tag_columns
|
||||
|
||||
@@ -307,8 +307,8 @@ impl ExecutionPlan for UnionDistinctOnExec {
|
||||
vec![Distribution::SinglePartition, Distribution::SinglePartition]
|
||||
}
|
||||
|
||||
fn properties(&self) -> &PlanProperties {
|
||||
self.properties.as_ref()
|
||||
fn properties(&self) -> &Arc<PlanProperties> {
|
||||
&self.properties
|
||||
}
|
||||
|
||||
fn children(&self) -> Vec<&Arc<dyn ExecutionPlan>> {
|
||||
|
||||
@@ -14,9 +14,6 @@
|
||||
|
||||
//!An extended "array" based on [DictionaryArray].
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use datafusion::arrow::buffer::NullBuffer;
|
||||
use datafusion::arrow::datatypes::Field;
|
||||
use datatypes::arrow::array::{Array, ArrayData, ArrayRef, DictionaryArray, Int64Array};
|
||||
use datatypes::arrow::datatypes::{DataType, Int64Type};
|
||||
@@ -225,52 +222,6 @@ impl RangeArray {
|
||||
}
|
||||
}
|
||||
|
||||
impl Array for RangeArray {
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn into_data(self) -> ArrayData {
|
||||
self.array.into_data()
|
||||
}
|
||||
|
||||
fn to_data(&self) -> ArrayData {
|
||||
self.array.to_data()
|
||||
}
|
||||
|
||||
fn slice(&self, offset: usize, length: usize) -> ArrayRef {
|
||||
Arc::new(self.array.slice(offset, length))
|
||||
}
|
||||
|
||||
fn nulls(&self) -> Option<&NullBuffer> {
|
||||
self.array.nulls()
|
||||
}
|
||||
|
||||
fn data_type(&self) -> &DataType {
|
||||
self.array.data_type()
|
||||
}
|
||||
|
||||
fn len(&self) -> usize {
|
||||
self.len()
|
||||
}
|
||||
|
||||
fn is_empty(&self) -> bool {
|
||||
self.is_empty()
|
||||
}
|
||||
|
||||
fn offset(&self) -> usize {
|
||||
self.array.offset()
|
||||
}
|
||||
|
||||
fn get_buffer_memory_size(&self) -> usize {
|
||||
self.array.get_buffer_memory_size()
|
||||
}
|
||||
|
||||
fn get_array_memory_size(&self) -> usize {
|
||||
self.array.get_array_memory_size()
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for RangeArray {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let ranges = self
|
||||
|
||||
@@ -49,7 +49,7 @@ const PLAN: &str = "plan";
|
||||
pub struct DistAnalyzeExec {
|
||||
input: Arc<dyn ExecutionPlan>,
|
||||
schema: SchemaRef,
|
||||
properties: PlanProperties,
|
||||
properties: Arc<PlanProperties>,
|
||||
verbose: bool,
|
||||
format: AnalyzeFormat,
|
||||
}
|
||||
@@ -62,7 +62,7 @@ impl DistAnalyzeExec {
|
||||
Field::new(NODE, DataType::UInt32, true),
|
||||
Field::new(PLAN, DataType::Utf8, true),
|
||||
]));
|
||||
let properties = Self::compute_properties(&input, schema.clone());
|
||||
let properties = Arc::new(Self::compute_properties(&input, schema.clone()));
|
||||
Self {
|
||||
input,
|
||||
schema,
|
||||
@@ -108,7 +108,7 @@ impl ExecutionPlan for DistAnalyzeExec {
|
||||
self
|
||||
}
|
||||
|
||||
fn properties(&self) -> &PlanProperties {
|
||||
fn properties(&self) -> &Arc<PlanProperties> {
|
||||
&self.properties
|
||||
}
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ pub(crate) struct PatchOptimizerContext {
|
||||
}
|
||||
|
||||
impl OptimizerConfig for PatchOptimizerContext {
|
||||
fn query_execution_start_time(&self) -> DateTime<Utc> {
|
||||
fn query_execution_start_time(&self) -> Option<DateTime<Utc>> {
|
||||
self.inner.query_execution_start_time()
|
||||
}
|
||||
|
||||
|
||||
@@ -334,7 +334,8 @@ impl Categorizer {
|
||||
Expr::Unnest(_)
|
||||
| Expr::GroupingSet(_)
|
||||
| Expr::Placeholder(_)
|
||||
| Expr::OuterReferenceColumn(_, _) => Commutativity::Unimplemented,
|
||||
| Expr::OuterReferenceColumn(_, _)
|
||||
| Expr::SetComparison(_) => Commutativity::Unimplemented,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -139,7 +139,7 @@ pub struct MergeScanExec {
|
||||
arrow_schema: ArrowSchemaRef,
|
||||
region_query_handler: RegionQueryHandlerRef,
|
||||
metric: ExecutionPlanMetricsSet,
|
||||
properties: PlanProperties,
|
||||
properties: Arc<PlanProperties>,
|
||||
/// Metrics from sub stages
|
||||
sub_stage_metrics: Arc<Mutex<HashMap<RegionId, RecordBatchMetrics>>>,
|
||||
/// Metrics for each partition
|
||||
@@ -226,12 +226,12 @@ impl MergeScanExec {
|
||||
.collect();
|
||||
let partitioning = Partitioning::Hash(partition_exprs, target_partition);
|
||||
|
||||
let properties = PlanProperties::new(
|
||||
let properties = Arc::new(PlanProperties::new(
|
||||
eq_properties,
|
||||
partitioning,
|
||||
EmissionType::Incremental,
|
||||
Boundedness::Bounded,
|
||||
);
|
||||
));
|
||||
Ok(Self {
|
||||
table,
|
||||
regions,
|
||||
@@ -453,12 +453,12 @@ impl MergeScanExec {
|
||||
arrow_schema: self.arrow_schema.clone(),
|
||||
region_query_handler: self.region_query_handler.clone(),
|
||||
metric: self.metric.clone(),
|
||||
properties: PlanProperties::new(
|
||||
properties: Arc::new(PlanProperties::new(
|
||||
self.properties.eq_properties.clone(),
|
||||
Partitioning::Hash(overlaps, self.target_partition),
|
||||
self.properties.emission_type,
|
||||
self.properties.boundedness,
|
||||
),
|
||||
)),
|
||||
sub_stage_metrics: self.sub_stage_metrics.clone(),
|
||||
partition_metrics: self.partition_metrics.clone(),
|
||||
query_ctx: self.query_ctx.clone(),
|
||||
@@ -584,7 +584,7 @@ impl ExecutionPlan for MergeScanExec {
|
||||
self.arrow_schema.clone()
|
||||
}
|
||||
|
||||
fn properties(&self) -> &PlanProperties {
|
||||
fn properties(&self) -> &Arc<PlanProperties> {
|
||||
&self.properties
|
||||
}
|
||||
|
||||
|
||||
@@ -729,9 +729,9 @@ mod tests {
|
||||
};
|
||||
|
||||
let plan = planner.query_to_plan(log_query).await.unwrap();
|
||||
let expected = "Limit: skip=0, fetch=100 [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N, is_active:Boolean;N]\
|
||||
\n Filter: greptime.public.test_table.timestamp >= Utf8(\"2021-01-01T00:00:00Z\") AND greptime.public.test_table.timestamp <= Utf8(\"2021-01-02T00:00:00Z\") AND greptime.public.test_table.message LIKE Utf8(\"%error%\") [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N, is_active:Boolean;N]\
|
||||
\n TableScan: greptime.public.test_table [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N, is_active:Boolean;N]";
|
||||
let expected = "Limit: skip=0, fetch=100 [message:Utf8, timestamp:Timestamp(ms), host:Utf8;N, is_active:Boolean;N]\
|
||||
\n Filter: greptime.public.test_table.timestamp >= Utf8(\"2021-01-01T00:00:00Z\") AND greptime.public.test_table.timestamp <= Utf8(\"2021-01-02T00:00:00Z\") AND greptime.public.test_table.message LIKE Utf8(\"%error%\") [message:Utf8, timestamp:Timestamp(ms), host:Utf8;N, is_active:Boolean;N]\
|
||||
\n TableScan: greptime.public.test_table [message:Utf8, timestamp:Timestamp(ms), host:Utf8;N, is_active:Boolean;N]";
|
||||
|
||||
assert_eq!(plan.display_indent_schema().to_string(), expected);
|
||||
}
|
||||
@@ -850,9 +850,9 @@ mod tests {
|
||||
};
|
||||
|
||||
let plan = planner.query_to_plan(log_query).await.unwrap();
|
||||
let expected = "Limit: skip=10, fetch=1000 [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N, is_active:Boolean;N]\
|
||||
\n Filter: greptime.public.test_table.timestamp >= Utf8(\"2021-01-01T00:00:00Z\") AND greptime.public.test_table.timestamp <= Utf8(\"2021-01-02T00:00:00Z\") AND greptime.public.test_table.message LIKE Utf8(\"%error%\") [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N, is_active:Boolean;N]\
|
||||
\n TableScan: greptime.public.test_table [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N, is_active:Boolean;N]";
|
||||
let expected = "Limit: skip=10, fetch=1000 [message:Utf8, timestamp:Timestamp(ms), host:Utf8;N, is_active:Boolean;N]\
|
||||
\n Filter: greptime.public.test_table.timestamp >= Utf8(\"2021-01-01T00:00:00Z\") AND greptime.public.test_table.timestamp <= Utf8(\"2021-01-02T00:00:00Z\") AND greptime.public.test_table.message LIKE Utf8(\"%error%\") [message:Utf8, timestamp:Timestamp(ms), host:Utf8;N, is_active:Boolean;N]\
|
||||
\n TableScan: greptime.public.test_table [message:Utf8, timestamp:Timestamp(ms), host:Utf8;N, is_active:Boolean;N]";
|
||||
|
||||
assert_eq!(plan.display_indent_schema().to_string(), expected);
|
||||
}
|
||||
@@ -885,9 +885,9 @@ mod tests {
|
||||
};
|
||||
|
||||
let plan = planner.query_to_plan(log_query).await.unwrap();
|
||||
let expected = "Limit: skip=0, fetch=1000 [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N, is_active:Boolean;N]\
|
||||
\n Filter: greptime.public.test_table.timestamp >= Utf8(\"2021-01-01T00:00:00Z\") AND greptime.public.test_table.timestamp <= Utf8(\"2021-01-02T00:00:00Z\") AND greptime.public.test_table.message LIKE Utf8(\"%error%\") [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N, is_active:Boolean;N]\
|
||||
\n TableScan: greptime.public.test_table [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N, is_active:Boolean;N]";
|
||||
let expected = "Limit: skip=0, fetch=1000 [message:Utf8, timestamp:Timestamp(ms), host:Utf8;N, is_active:Boolean;N]\
|
||||
\n Filter: greptime.public.test_table.timestamp >= Utf8(\"2021-01-01T00:00:00Z\") AND greptime.public.test_table.timestamp <= Utf8(\"2021-01-02T00:00:00Z\") AND greptime.public.test_table.message LIKE Utf8(\"%error%\") [message:Utf8, timestamp:Timestamp(ms), host:Utf8;N, is_active:Boolean;N]\
|
||||
\n TableScan: greptime.public.test_table [message:Utf8, timestamp:Timestamp(ms), host:Utf8;N, is_active:Boolean;N]";
|
||||
|
||||
assert_eq!(plan.display_indent_schema().to_string(), expected);
|
||||
}
|
||||
@@ -933,9 +933,9 @@ mod tests {
|
||||
|
||||
let plan = planner.query_to_plan(log_query).await.unwrap();
|
||||
let expected = "Aggregate: groupBy=[[greptime.public.test_table.host]], aggr=[[count(greptime.public.test_table.message) AS count_result]] [host:Utf8;N, count_result:Int64]\
|
||||
\n Limit: skip=0, fetch=100 [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N, is_active:Boolean;N]\
|
||||
\n Filter: greptime.public.test_table.timestamp >= Utf8(\"2021-01-01T00:00:00Z\") AND greptime.public.test_table.timestamp <= Utf8(\"2021-01-02T00:00:00Z\") [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N, is_active:Boolean;N]\
|
||||
\n TableScan: greptime.public.test_table [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N, is_active:Boolean;N]";
|
||||
\n Limit: skip=0, fetch=100 [message:Utf8, timestamp:Timestamp(ms), host:Utf8;N, is_active:Boolean;N]\
|
||||
\n Filter: greptime.public.test_table.timestamp >= Utf8(\"2021-01-01T00:00:00Z\") AND greptime.public.test_table.timestamp <= Utf8(\"2021-01-02T00:00:00Z\") [message:Utf8, timestamp:Timestamp(ms), host:Utf8;N, is_active:Boolean;N]\
|
||||
\n TableScan: greptime.public.test_table [message:Utf8, timestamp:Timestamp(ms), host:Utf8;N, is_active:Boolean;N]";
|
||||
|
||||
assert_eq!(plan.display_indent_schema().to_string(), expected);
|
||||
}
|
||||
@@ -964,18 +964,18 @@ mod tests {
|
||||
exprs: vec![LogExpr::ScalarFunc {
|
||||
name: "date_trunc".to_string(),
|
||||
args: vec![
|
||||
LogExpr::NamedIdent("timestamp".to_string()),
|
||||
LogExpr::Literal("day".to_string()),
|
||||
LogExpr::NamedIdent("timestamp".to_string()),
|
||||
],
|
||||
alias: Some("time_bucket".to_string()),
|
||||
}],
|
||||
};
|
||||
|
||||
let plan = planner.query_to_plan(log_query).await.unwrap();
|
||||
let expected = "Projection: date_trunc(greptime.public.test_table.timestamp, Utf8(\"day\")) AS time_bucket [time_bucket:Timestamp(Nanosecond, None);N]\
|
||||
\n Limit: skip=0, fetch=100 [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N, is_active:Boolean;N]\
|
||||
\n Filter: greptime.public.test_table.timestamp >= Utf8(\"2021-01-01T00:00:00Z\") AND greptime.public.test_table.timestamp <= Utf8(\"2021-01-02T00:00:00Z\") [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N, is_active:Boolean;N]\
|
||||
\n TableScan: greptime.public.test_table [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N, is_active:Boolean;N]";
|
||||
let expected = "Projection: date_trunc(Utf8(\"day\"), greptime.public.test_table.timestamp) AS time_bucket [time_bucket:Timestamp(ms)]\
|
||||
\n Limit: skip=0, fetch=100 [message:Utf8, timestamp:Timestamp(ms), host:Utf8;N, is_active:Boolean;N]\
|
||||
\n Filter: greptime.public.test_table.timestamp >= Utf8(\"2021-01-01T00:00:00Z\") AND greptime.public.test_table.timestamp <= Utf8(\"2021-01-02T00:00:00Z\") [message:Utf8, timestamp:Timestamp(ms), host:Utf8;N, is_active:Boolean;N]\
|
||||
\n TableScan: greptime.public.test_table [message:Utf8, timestamp:Timestamp(ms), host:Utf8;N, is_active:Boolean;N]";
|
||||
|
||||
assert_eq!(plan.display_indent_schema().to_string(), expected);
|
||||
}
|
||||
@@ -1055,11 +1055,11 @@ mod tests {
|
||||
};
|
||||
|
||||
let plan = planner.query_to_plan(log_query).await.unwrap();
|
||||
let expected = "Aggregate: groupBy=[[2__date_histogram__time_bucket]], aggr=[[count(2__date_histogram__time_bucket) AS count_result]] [2__date_histogram__time_bucket:Timestamp(Nanosecond, None);N, count_result:Int64]\
|
||||
\n Projection: date_bin(Utf8(\"30 seconds\"), greptime.public.test_table.timestamp) AS 2__date_histogram__time_bucket [2__date_histogram__time_bucket:Timestamp(Nanosecond, None);N]\
|
||||
\n Limit: skip=0, fetch=1000 [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N, is_active:Boolean;N]\
|
||||
\n Filter: greptime.public.test_table.timestamp >= Utf8(\"2021-01-01T00:00:00Z\") AND greptime.public.test_table.timestamp <= Utf8(\"2021-01-02T00:00:00Z\") [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N, is_active:Boolean;N]\
|
||||
\n TableScan: greptime.public.test_table [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N, is_active:Boolean;N]";
|
||||
let expected = "Aggregate: groupBy=[[2__date_histogram__time_bucket]], aggr=[[count(2__date_histogram__time_bucket) AS count_result]] [2__date_histogram__time_bucket:Timestamp(ns);N, count_result:Int64]\
|
||||
\n Projection: date_bin(Utf8(\"30 seconds\"), greptime.public.test_table.timestamp) AS 2__date_histogram__time_bucket [2__date_histogram__time_bucket:Timestamp(ns);N]\
|
||||
\n Limit: skip=0, fetch=1000 [message:Utf8, timestamp:Timestamp(ms), host:Utf8;N, is_active:Boolean;N]\
|
||||
\n Filter: greptime.public.test_table.timestamp >= Utf8(\"2021-01-01T00:00:00Z\") AND greptime.public.test_table.timestamp <= Utf8(\"2021-01-02T00:00:00Z\") [message:Utf8, timestamp:Timestamp(ms), host:Utf8;N, is_active:Boolean;N]\
|
||||
\n TableScan: greptime.public.test_table [message:Utf8, timestamp:Timestamp(ms), host:Utf8;N, is_active:Boolean;N]";
|
||||
|
||||
assert_eq!(plan.display_indent_schema().to_string(), expected);
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ use datafusion::config::ConfigOptions;
|
||||
use datafusion::error::Result as DfResult;
|
||||
use datafusion::physical_optimizer::PhysicalOptimizerRule;
|
||||
use datafusion::physical_plan::ExecutionPlan;
|
||||
use datafusion::physical_plan::filter::FilterExec;
|
||||
use datafusion::physical_plan::filter::{FilterExec, FilterExecBuilder};
|
||||
use datafusion_common::ScalarValue;
|
||||
use datafusion_common::tree_node::{Transformed, TreeNode};
|
||||
use datafusion_expr::ColumnarValue;
|
||||
@@ -205,9 +205,10 @@ impl PhysicalOptimizerRule for MatchesConstantTermOptimizer {
|
||||
})?;
|
||||
|
||||
if new_pred.transformed {
|
||||
let exec = FilterExec::try_new(new_pred.data, filter.input().clone())?
|
||||
.with_default_selectivity(filter.default_selectivity())?
|
||||
.with_projection(filter.projection().cloned())?;
|
||||
let exec = FilterExecBuilder::new(new_pred.data, filter.input().clone())
|
||||
.with_default_selectivity(filter.default_selectivity())
|
||||
.apply_projection_by_ref(filter.projection().as_ref())
|
||||
.and_then(|x| x.build())?;
|
||||
return Ok(Transformed::yes(Arc::new(exec) as _));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,13 +17,12 @@ use std::sync::Arc;
|
||||
use datafusion::config::ConfigOptions;
|
||||
use datafusion::physical_optimizer::PhysicalOptimizerRule;
|
||||
use datafusion::physical_plan::ExecutionPlan;
|
||||
use datafusion::physical_plan::coalesce_batches::CoalesceBatchesExec;
|
||||
use datafusion::physical_plan::repartition::RepartitionExec;
|
||||
use datafusion_common::Result as DfResult;
|
||||
use datafusion_common::tree_node::{Transformed, TreeNode};
|
||||
|
||||
/// This is [PhysicalOptimizerRule] to remove duplicate physical plans such as two
|
||||
/// adjoining [CoalesceBatchesExec] or [RepartitionExec]. They won't have any effect
|
||||
/// adjoining [RepartitionExec]. They won't have any effect
|
||||
/// if one runs right after another.
|
||||
///
|
||||
/// This rule is expected to be run in the final stage of the optimization process.
|
||||
@@ -52,9 +51,7 @@ impl RemoveDuplicate {
|
||||
fn do_optimize(plan: Arc<dyn ExecutionPlan>) -> DfResult<Arc<dyn ExecutionPlan>> {
|
||||
let result = plan
|
||||
.transform_down(|plan| {
|
||||
if plan.as_any().is::<CoalesceBatchesExec>()
|
||||
|| plan.as_any().is::<RepartitionExec>()
|
||||
{
|
||||
if plan.as_any().is::<RepartitionExec>() {
|
||||
// check child
|
||||
let child = plan.children()[0].clone();
|
||||
if child.as_any().type_id() == plan.as_any().type_id() {
|
||||
@@ -72,49 +69,3 @@ impl RemoveDuplicate {
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_schema::Schema;
|
||||
use datafusion::physical_plan::displayable;
|
||||
use datafusion::physical_plan::empty::EmptyExec;
|
||||
use datafusion_physical_expr::Partitioning;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn remove_coalesce_batches() {
|
||||
let empty = Arc::new(EmptyExec::new(Arc::new(Schema::empty())));
|
||||
let coalesce_batches = Arc::new(CoalesceBatchesExec::new(empty, 1024));
|
||||
let another_coalesce_batches = Arc::new(CoalesceBatchesExec::new(coalesce_batches, 8192));
|
||||
|
||||
let optimized = RemoveDuplicate::do_optimize(another_coalesce_batches).unwrap();
|
||||
let formatted = displayable(optimized.as_ref()).indent(true).to_string();
|
||||
let expected = "CoalesceBatchesExec: target_batch_size=8192\
|
||||
\n EmptyExec\n";
|
||||
|
||||
assert_eq!(expected, formatted);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_continuous_coalesce_batches() {
|
||||
let empty = Arc::new(EmptyExec::new(Arc::new(Schema::empty())));
|
||||
let coalesce_batches = Arc::new(CoalesceBatchesExec::new(empty, 1024));
|
||||
let repartition = Arc::new(
|
||||
RepartitionExec::try_new(coalesce_batches, Partitioning::UnknownPartitioning(1))
|
||||
.unwrap(),
|
||||
);
|
||||
let another_coalesce_batches = Arc::new(CoalesceBatchesExec::new(repartition, 8192));
|
||||
|
||||
let optimized = RemoveDuplicate::do_optimize(another_coalesce_batches).unwrap();
|
||||
let formatted = displayable(optimized.as_ref()).indent(true).to_string();
|
||||
let expected = "CoalesceBatchesExec: target_batch_size=8192\
|
||||
\n RepartitionExec: partitioning=UnknownPartitioning(1), input_partitions=1\
|
||||
\n CoalesceBatchesExec: target_batch_size=1024\
|
||||
\n EmptyExec\n";
|
||||
|
||||
assert_eq!(expected, formatted);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@ use std::sync::Arc;
|
||||
|
||||
use datafusion::physical_optimizer::PhysicalOptimizerRule;
|
||||
use datafusion::physical_plan::ExecutionPlan;
|
||||
use datafusion::physical_plan::coalesce_batches::CoalesceBatchesExec;
|
||||
use datafusion::physical_plan::coalesce_partitions::CoalescePartitionsExec;
|
||||
use datafusion::physical_plan::coop::CooperativeExec;
|
||||
use datafusion::physical_plan::filter::FilterExec;
|
||||
@@ -80,8 +79,6 @@ impl WindowedSortPhysicalRule {
|
||||
let preserve_partitioning = sort_exec.preserve_partitioning();
|
||||
|
||||
let sort_input = remove_repartition(sort_exec.input().clone())?.data;
|
||||
let sort_input =
|
||||
remove_coalesce_batches_exec(sort_input, sort_exec.fetch())?.data;
|
||||
|
||||
// Gets scanner info from the input without repartition before filter.
|
||||
let Some(scanner_info) = fetch_partition_range(sort_input.clone())? else {
|
||||
@@ -164,7 +161,6 @@ fn fetch_partition_range(input: Arc<dyn ExecutionPlan>) -> DataFusionResult<Opti
|
||||
let mut time_index = HashSet::new();
|
||||
let mut alias_map = Vec::new();
|
||||
let mut tag_columns = None;
|
||||
let mut is_batch_coalesced = false;
|
||||
|
||||
input.transform_up(|plan| {
|
||||
if plan.as_any().is::<CooperativeExec>() {
|
||||
@@ -180,16 +176,9 @@ fn fetch_partition_range(input: Arc<dyn ExecutionPlan>) -> DataFusionResult<Opti
|
||||
partition_ranges = None;
|
||||
}
|
||||
|
||||
if plan.as_any().is::<CoalesceBatchesExec>() {
|
||||
is_batch_coalesced = true;
|
||||
}
|
||||
|
||||
// only a very limited set of plans can exist between region scan and sort exec
|
||||
// other plans might make this optimize wrong, so be safe here by limiting it
|
||||
if !(plan.as_any().is::<ProjectionExec>()
|
||||
|| plan.as_any().is::<FilterExec>()
|
||||
|| plan.as_any().is::<CoalesceBatchesExec>())
|
||||
{
|
||||
if !(plan.as_any().is::<ProjectionExec>() || plan.as_any().is::<FilterExec>()) {
|
||||
partition_ranges = None;
|
||||
}
|
||||
|
||||
@@ -219,10 +208,7 @@ fn fetch_partition_range(input: Arc<dyn ExecutionPlan>) -> DataFusionResult<Opti
|
||||
time_index = HashSet::from([region_scan_exec.time_index()]);
|
||||
tag_columns = Some(region_scan_exec.tag_columns());
|
||||
|
||||
// set distinguish_partition_ranges to true, this is an incorrect workaround
|
||||
if !is_batch_coalesced {
|
||||
region_scan_exec.with_distinguish_partition_range(true);
|
||||
}
|
||||
region_scan_exec.with_distinguish_partition_range(true);
|
||||
}
|
||||
|
||||
Ok(Transformed::no(plan))
|
||||
@@ -260,33 +246,6 @@ fn remove_repartition(
|
||||
})
|
||||
}
|
||||
|
||||
/// Remove `CoalesceBatchesExec` if the limit is less than the batch size.
|
||||
///
|
||||
/// so that if limit is too small we can avoid need to scan for more rows than necessary
|
||||
fn remove_coalesce_batches_exec(
|
||||
plan: Arc<dyn ExecutionPlan>,
|
||||
fetch: Option<usize>,
|
||||
) -> DataFusionResult<Transformed<Arc<dyn ExecutionPlan>>> {
|
||||
let Some(fetch) = fetch else {
|
||||
return Ok(Transformed::no(plan));
|
||||
};
|
||||
|
||||
// Avoid removing multiple coalesce batches
|
||||
let mut is_done = false;
|
||||
|
||||
plan.transform_down(|plan| {
|
||||
if let Some(coalesce_batches_exec) = plan.as_any().downcast_ref::<CoalesceBatchesExec>() {
|
||||
let target_batch_size = coalesce_batches_exec.target_batch_size();
|
||||
if fetch < target_batch_size && !is_done {
|
||||
is_done = true;
|
||||
return Ok(Transformed::yes(coalesce_batches_exec.input().clone()));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Transformed::no(plan))
|
||||
})
|
||||
}
|
||||
|
||||
/// Resolves alias of the time index column.
|
||||
///
|
||||
/// i.e if a is time index, alias= {a:b, b:c}, then result should be {a, b}(not {a, c}) because projection is not transitive
|
||||
|
||||
@@ -114,7 +114,7 @@ pub struct PartSortExec {
|
||||
/// Execution metrics
|
||||
metrics: ExecutionPlanMetricsSet,
|
||||
partition_ranges: Vec<Vec<PartitionRange>>,
|
||||
properties: PlanProperties,
|
||||
properties: Arc<PlanProperties>,
|
||||
/// Filter matching the state of the sort for dynamic filter pushdown.
|
||||
/// If `limit` is `Some`, this will also be set and a TopK operator may be used.
|
||||
/// If `limit` is `None`, this will be `None`.
|
||||
@@ -132,12 +132,12 @@ impl PartSortExec {
|
||||
|
||||
let metrics = ExecutionPlanMetricsSet::new();
|
||||
let properties = input.properties();
|
||||
let properties = PlanProperties::new(
|
||||
let properties = Arc::new(PlanProperties::new(
|
||||
input.equivalence_properties().clone(),
|
||||
input.output_partitioning().clone(),
|
||||
properties.emission_type,
|
||||
properties.boundedness,
|
||||
);
|
||||
));
|
||||
|
||||
let filter = limit
|
||||
.is_some()
|
||||
@@ -220,7 +220,7 @@ impl ExecutionPlan for PartSortExec {
|
||||
self.input.schema()
|
||||
}
|
||||
|
||||
fn properties(&self) -> &PlanProperties {
|
||||
fn properties(&self) -> &Arc<PlanProperties> {
|
||||
&self.properties
|
||||
}
|
||||
|
||||
|
||||
@@ -4365,13 +4365,13 @@ mod test {
|
||||
.unwrap();
|
||||
|
||||
let expected = String::from(
|
||||
"Filter: TEMPLATE(field_0) IS NOT NULL [timestamp:Timestamp(Millisecond, None), TEMPLATE(field_0):Float64;N, tag_0:Utf8]\
|
||||
\n Projection: some_metric.timestamp, TEMPLATE(some_metric.field_0) AS TEMPLATE(field_0), some_metric.tag_0 [timestamp:Timestamp(Millisecond, None), TEMPLATE(field_0):Float64;N, tag_0:Utf8]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Filter: some_metric.tag_0 != Utf8(\"bar\") AND some_metric.timestamp >= TimestampMillisecond(-999, None) AND some_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]"
|
||||
"Filter: TEMPLATE(field_0) IS NOT NULL [timestamp:Timestamp(ms), TEMPLATE(field_0):Float64;N, tag_0:Utf8]\
|
||||
\n Projection: some_metric.timestamp, TEMPLATE(some_metric.field_0) AS TEMPLATE(field_0), some_metric.tag_0 [timestamp:Timestamp(ms), TEMPLATE(field_0):Float64;N, tag_0:Utf8]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Filter: some_metric.tag_0 != Utf8(\"bar\") AND some_metric.timestamp >= TimestampMillisecond(-999, None) AND some_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]"
|
||||
).replace("TEMPLATE", plan_name);
|
||||
|
||||
assert_eq!(plan.display_indent_schema().to_string(), expected);
|
||||
@@ -4574,13 +4574,13 @@ mod test {
|
||||
.await
|
||||
.unwrap();
|
||||
let expected_no_without = String::from(
|
||||
"Sort: some_metric.tag_1 ASC NULLS LAST, some_metric.timestamp ASC NULLS LAST [tag_1:Utf8, timestamp:Timestamp(Millisecond, None), TEMPLATE(some_metric.field_0):Float64;N, TEMPLATE(some_metric.field_1):Float64;N]\
|
||||
\n Aggregate: groupBy=[[some_metric.tag_1, some_metric.timestamp]], aggr=[[TEMPLATE(some_metric.field_0), TEMPLATE(some_metric.field_1)]] [tag_1:Utf8, timestamp:Timestamp(Millisecond, None), TEMPLATE(some_metric.field_0):Float64;N, TEMPLATE(some_metric.field_1):Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\", \"tag_1\"] [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N]\
|
||||
\n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.tag_1 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N]\
|
||||
\n Filter: some_metric.tag_0 != Utf8(\"bar\") AND some_metric.timestamp >= TimestampMillisecond(-999, None) AND some_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N]\
|
||||
\n TableScan: some_metric [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N]"
|
||||
"Sort: some_metric.tag_1 ASC NULLS LAST, some_metric.timestamp ASC NULLS LAST [tag_1:Utf8, timestamp:Timestamp(ms), TEMPLATE(some_metric.field_0):Float64;N, TEMPLATE(some_metric.field_1):Float64;N]\
|
||||
\n Aggregate: groupBy=[[some_metric.tag_1, some_metric.timestamp]], aggr=[[TEMPLATE(some_metric.field_0), TEMPLATE(some_metric.field_1)]] [tag_1:Utf8, timestamp:Timestamp(ms), TEMPLATE(some_metric.field_0):Float64;N, TEMPLATE(some_metric.field_1):Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, field_1:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\", \"tag_1\"] [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, field_1:Float64;N]\
|
||||
\n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.tag_1 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, field_1:Float64;N]\
|
||||
\n Filter: some_metric.tag_0 != Utf8(\"bar\") AND some_metric.timestamp >= TimestampMillisecond(-999, None) AND some_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, field_1:Float64;N]\
|
||||
\n TableScan: some_metric [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, field_1:Float64;N]"
|
||||
).replace("TEMPLATE", plan_name);
|
||||
assert_eq!(
|
||||
plan.display_indent_schema().to_string(),
|
||||
@@ -4604,13 +4604,13 @@ mod test {
|
||||
.await
|
||||
.unwrap();
|
||||
let expected_without = String::from(
|
||||
"Sort: some_metric.tag_0 ASC NULLS LAST, some_metric.timestamp ASC NULLS LAST [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), TEMPLATE(some_metric.field_0):Float64;N, TEMPLATE(some_metric.field_1):Float64;N]\
|
||||
\n Aggregate: groupBy=[[some_metric.tag_0, some_metric.timestamp]], aggr=[[TEMPLATE(some_metric.field_0), TEMPLATE(some_metric.field_1)]] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), TEMPLATE(some_metric.field_0):Float64;N, TEMPLATE(some_metric.field_1):Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\", \"tag_1\"] [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N]\
|
||||
\n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.tag_1 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N]\
|
||||
\n Filter: some_metric.tag_0 != Utf8(\"bar\") AND some_metric.timestamp >= TimestampMillisecond(-999, None) AND some_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N]\
|
||||
\n TableScan: some_metric [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N]"
|
||||
"Sort: some_metric.tag_0 ASC NULLS LAST, some_metric.timestamp ASC NULLS LAST [tag_0:Utf8, timestamp:Timestamp(ms), TEMPLATE(some_metric.field_0):Float64;N, TEMPLATE(some_metric.field_1):Float64;N]\
|
||||
\n Aggregate: groupBy=[[some_metric.tag_0, some_metric.timestamp]], aggr=[[TEMPLATE(some_metric.field_0), TEMPLATE(some_metric.field_1)]] [tag_0:Utf8, timestamp:Timestamp(ms), TEMPLATE(some_metric.field_0):Float64;N, TEMPLATE(some_metric.field_1):Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, field_1:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\", \"tag_1\"] [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, field_1:Float64;N]\
|
||||
\n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.tag_1 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, field_1:Float64;N]\
|
||||
\n Filter: some_metric.tag_0 != Utf8(\"bar\") AND some_metric.timestamp >= TimestampMillisecond(-999, None) AND some_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, field_1:Float64;N]\
|
||||
\n TableScan: some_metric [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, field_1:Float64;N]"
|
||||
).replace("TEMPLATE", plan_name);
|
||||
assert_eq!(plan.display_indent_schema().to_string(), expected_without);
|
||||
}
|
||||
@@ -5167,20 +5167,20 @@ mod test {
|
||||
.unwrap();
|
||||
|
||||
let expected = String::from(
|
||||
"Projection: rhs.tag_0, rhs.timestamp, lhs.field_0 + rhs.field_0 AS lhs.field_0 + rhs.field_0 [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), lhs.field_0 + rhs.field_0:Float64;N]\
|
||||
\n Inner Join: lhs.tag_0 = rhs.tag_0, lhs.timestamp = rhs.timestamp [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n SubqueryAlias: lhs [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Filter: some_metric.tag_0 = Utf8(\"foo\") AND some_metric.timestamp >= TimestampMillisecond(-999, None) AND some_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n SubqueryAlias: rhs [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Filter: some_metric.tag_0 = Utf8(\"bar\") AND some_metric.timestamp >= TimestampMillisecond(-999, None) AND some_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]",
|
||||
"Projection: rhs.tag_0, rhs.timestamp, lhs.field_0 + rhs.field_0 AS lhs.field_0 + rhs.field_0 [tag_0:Utf8, timestamp:Timestamp(ms), lhs.field_0 + rhs.field_0:Float64;N]\
|
||||
\n Inner Join: lhs.tag_0 = rhs.tag_0, lhs.timestamp = rhs.timestamp [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n SubqueryAlias: lhs [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Filter: some_metric.tag_0 = Utf8(\"foo\") AND some_metric.timestamp >= TimestampMillisecond(-999, None) AND some_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n SubqueryAlias: rhs [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Filter: some_metric.tag_0 = Utf8(\"bar\") AND some_metric.timestamp >= TimestampMillisecond(-999, None) AND some_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]",
|
||||
);
|
||||
|
||||
assert_eq!(plan.display_indent_schema().to_string(), expected);
|
||||
@@ -5222,12 +5222,12 @@ mod test {
|
||||
async fn binary_op_literal_column() {
|
||||
let query = r#"1 + some_metric{tag_0="bar"}"#;
|
||||
let expected = String::from(
|
||||
"Projection: some_metric.tag_0, some_metric.timestamp, Float64(1) + some_metric.field_0 AS Float64(1) + field_0 [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), Float64(1) + field_0:Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Filter: some_metric.tag_0 = Utf8(\"bar\") AND some_metric.timestamp >= TimestampMillisecond(-999, None) AND some_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]",
|
||||
"Projection: some_metric.tag_0, some_metric.timestamp, Float64(1) + some_metric.field_0 AS Float64(1) + field_0 [tag_0:Utf8, timestamp:Timestamp(ms), Float64(1) + field_0:Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Filter: some_metric.tag_0 = Utf8(\"bar\") AND some_metric.timestamp >= TimestampMillisecond(-999, None) AND some_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]",
|
||||
);
|
||||
|
||||
indie_query_plan_compare(query, expected).await;
|
||||
@@ -5236,8 +5236,8 @@ mod test {
|
||||
#[tokio::test]
|
||||
async fn binary_op_literal_literal() {
|
||||
let query = r#"1 + 1"#;
|
||||
let expected = r#"EmptyMetric: range=[0..100000000], interval=[5000] [time:Timestamp(Millisecond, None), value:Float64;N]
|
||||
TableScan: dummy [time:Timestamp(Millisecond, None), value:Float64;N]"#;
|
||||
let expected = r#"EmptyMetric: range=[0..100000000], interval=[5000] [time:Timestamp(ms), value:Float64;N]
|
||||
TableScan: dummy [time:Timestamp(ms), value:Float64;N]"#;
|
||||
indie_query_plan_compare(query, expected).await;
|
||||
}
|
||||
|
||||
@@ -5245,12 +5245,12 @@ mod test {
|
||||
async fn simple_bool_grammar() {
|
||||
let query = "some_metric != bool 1.2345";
|
||||
let expected = String::from(
|
||||
"Projection: some_metric.tag_0, some_metric.timestamp, CAST(some_metric.field_0 != Float64(1.2345) AS Float64) AS field_0 != Float64(1.2345) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0 != Float64(1.2345):Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Filter: some_metric.timestamp >= TimestampMillisecond(-999, None) AND some_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]",
|
||||
"Projection: some_metric.tag_0, some_metric.timestamp, CAST(some_metric.field_0 != Float64(1.2345) AS Float64) AS field_0 != Float64(1.2345) [tag_0:Utf8, timestamp:Timestamp(ms), field_0 != Float64(1.2345):Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Filter: some_metric.timestamp >= TimestampMillisecond(-999, None) AND some_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]",
|
||||
);
|
||||
|
||||
indie_query_plan_compare(query, expected).await;
|
||||
@@ -5260,12 +5260,12 @@ mod test {
|
||||
async fn bool_with_additional_arithmetic() {
|
||||
let query = "some_metric + (1 == bool 2)";
|
||||
let expected = String::from(
|
||||
"Projection: some_metric.tag_0, some_metric.timestamp, some_metric.field_0 + CAST(Float64(1) = Float64(2) AS Float64) AS field_0 + Float64(1) = Float64(2) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0 + Float64(1) = Float64(2):Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Filter: some_metric.timestamp >= TimestampMillisecond(-999, None) AND some_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]",
|
||||
"Projection: some_metric.tag_0, some_metric.timestamp, some_metric.field_0 + CAST(Float64(1) = Float64(2) AS Float64) AS field_0 + Float64(1) = Float64(2) [tag_0:Utf8, timestamp:Timestamp(ms), field_0 + Float64(1) = Float64(2):Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Filter: some_metric.timestamp >= TimestampMillisecond(-999, None) AND some_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]",
|
||||
);
|
||||
|
||||
indie_query_plan_compare(query, expected).await;
|
||||
@@ -5275,12 +5275,12 @@ mod test {
|
||||
async fn simple_unary() {
|
||||
let query = "-some_metric";
|
||||
let expected = String::from(
|
||||
"Projection: some_metric.tag_0, some_metric.timestamp, (- some_metric.field_0) AS (- field_0) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), (- field_0):Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Filter: some_metric.timestamp >= TimestampMillisecond(-999, None) AND some_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]",
|
||||
"Projection: some_metric.tag_0, some_metric.timestamp, (- some_metric.field_0) AS (- field_0) [tag_0:Utf8, timestamp:Timestamp(ms), (- field_0):Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Filter: some_metric.timestamp >= TimestampMillisecond(-999, None) AND some_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]",
|
||||
);
|
||||
|
||||
indie_query_plan_compare(query, expected).await;
|
||||
@@ -5290,14 +5290,14 @@ mod test {
|
||||
async fn increase_aggr() {
|
||||
let query = "increase(some_metric[5m])";
|
||||
let expected = String::from(
|
||||
"Filter: prom_increase(timestamp_range,field_0,timestamp,Int64(300000)) IS NOT NULL [timestamp:Timestamp(Millisecond, None), prom_increase(timestamp_range,field_0,timestamp,Int64(300000)):Float64;N, tag_0:Utf8]\
|
||||
\n Projection: some_metric.timestamp, prom_increase(timestamp_range, field_0, some_metric.timestamp, Int64(300000)) AS prom_increase(timestamp_range,field_0,timestamp,Int64(300000)), some_metric.tag_0 [timestamp:Timestamp(Millisecond, None), prom_increase(timestamp_range,field_0,timestamp,Int64(300000)):Float64;N, tag_0:Utf8]\
|
||||
\n PromRangeManipulate: req range=[0..100000000], interval=[5000], eval range=[300000], time index=[timestamp], values=[\"field_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Dictionary(Int64, Float64);N, timestamp_range:Dictionary(Int64, Timestamp(Millisecond, None))]\
|
||||
\n PromSeriesNormalize: offset=[0], time index=[timestamp], filter NaN: [true] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Filter: some_metric.timestamp >= TimestampMillisecond(-299999, None) AND some_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]",
|
||||
"Filter: prom_increase(timestamp_range,field_0,timestamp,Int64(300000)) IS NOT NULL [timestamp:Timestamp(ms), prom_increase(timestamp_range,field_0,timestamp,Int64(300000)):Float64;N, tag_0:Utf8]\
|
||||
\n Projection: some_metric.timestamp, prom_increase(timestamp_range, field_0, some_metric.timestamp, Int64(300000)) AS prom_increase(timestamp_range,field_0,timestamp,Int64(300000)), some_metric.tag_0 [timestamp:Timestamp(ms), prom_increase(timestamp_range,field_0,timestamp,Int64(300000)):Float64;N, tag_0:Utf8]\
|
||||
\n PromRangeManipulate: req range=[0..100000000], interval=[5000], eval range=[300000], time index=[timestamp], values=[\"field_0\"] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Dictionary(Int64, Float64);N, timestamp_range:Dictionary(Int64, Timestamp(ms))]\
|
||||
\n PromSeriesNormalize: offset=[0], time index=[timestamp], filter NaN: [true] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Filter: some_metric.timestamp >= TimestampMillisecond(-299999, None) AND some_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]",
|
||||
);
|
||||
|
||||
indie_query_plan_compare(query, expected).await;
|
||||
@@ -5307,12 +5307,12 @@ mod test {
|
||||
async fn less_filter_on_value() {
|
||||
let query = "some_metric < 1.2345";
|
||||
let expected = String::from(
|
||||
"Filter: some_metric.field_0 < Float64(1.2345) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Filter: some_metric.timestamp >= TimestampMillisecond(-999, None) AND some_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]",
|
||||
"Filter: some_metric.field_0 < Float64(1.2345) [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Filter: some_metric.timestamp >= TimestampMillisecond(-999, None) AND some_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]",
|
||||
);
|
||||
|
||||
indie_query_plan_compare(query, expected).await;
|
||||
@@ -5322,14 +5322,14 @@ mod test {
|
||||
async fn count_over_time() {
|
||||
let query = "count_over_time(some_metric[5m])";
|
||||
let expected = String::from(
|
||||
"Filter: prom_count_over_time(timestamp_range,field_0) IS NOT NULL [timestamp:Timestamp(Millisecond, None), prom_count_over_time(timestamp_range,field_0):Float64;N, tag_0:Utf8]\
|
||||
\n Projection: some_metric.timestamp, prom_count_over_time(timestamp_range, field_0) AS prom_count_over_time(timestamp_range,field_0), some_metric.tag_0 [timestamp:Timestamp(Millisecond, None), prom_count_over_time(timestamp_range,field_0):Float64;N, tag_0:Utf8]\
|
||||
\n PromRangeManipulate: req range=[0..100000000], interval=[5000], eval range=[300000], time index=[timestamp], values=[\"field_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Dictionary(Int64, Float64);N, timestamp_range:Dictionary(Int64, Timestamp(Millisecond, None))]\
|
||||
\n PromSeriesNormalize: offset=[0], time index=[timestamp], filter NaN: [true] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Filter: some_metric.timestamp >= TimestampMillisecond(-299999, None) AND some_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]",
|
||||
"Filter: prom_count_over_time(timestamp_range,field_0) IS NOT NULL [timestamp:Timestamp(ms), prom_count_over_time(timestamp_range,field_0):Float64;N, tag_0:Utf8]\
|
||||
\n Projection: some_metric.timestamp, prom_count_over_time(timestamp_range, field_0) AS prom_count_over_time(timestamp_range,field_0), some_metric.tag_0 [timestamp:Timestamp(ms), prom_count_over_time(timestamp_range,field_0):Float64;N, tag_0:Utf8]\
|
||||
\n PromRangeManipulate: req range=[0..100000000], interval=[5000], eval range=[300000], time index=[timestamp], values=[\"field_0\"] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Dictionary(Int64, Float64);N, timestamp_range:Dictionary(Int64, Timestamp(ms))]\
|
||||
\n PromSeriesNormalize: offset=[0], time index=[timestamp], filter NaN: [true] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Filter: some_metric.timestamp >= TimestampMillisecond(-299999, None) AND some_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]",
|
||||
);
|
||||
|
||||
indie_query_plan_compare(query, expected).await;
|
||||
@@ -5739,42 +5739,42 @@ mod test {
|
||||
async fn custom_schema() {
|
||||
let query = "some_alt_metric{__schema__=\"greptime_private\"}";
|
||||
let expected = String::from(
|
||||
"PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Sort: greptime_private.some_alt_metric.tag_0 ASC NULLS FIRST, greptime_private.some_alt_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Filter: greptime_private.some_alt_metric.timestamp >= TimestampMillisecond(-999, None) AND greptime_private.some_alt_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n TableScan: greptime_private.some_alt_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]",
|
||||
"PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Sort: greptime_private.some_alt_metric.tag_0 ASC NULLS FIRST, greptime_private.some_alt_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Filter: greptime_private.some_alt_metric.timestamp >= TimestampMillisecond(-999, None) AND greptime_private.some_alt_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n TableScan: greptime_private.some_alt_metric [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]",
|
||||
);
|
||||
|
||||
indie_query_plan_compare(query, expected).await;
|
||||
|
||||
let query = "some_alt_metric{__database__=\"greptime_private\"}";
|
||||
let expected = String::from(
|
||||
"PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Sort: greptime_private.some_alt_metric.tag_0 ASC NULLS FIRST, greptime_private.some_alt_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Filter: greptime_private.some_alt_metric.timestamp >= TimestampMillisecond(-999, None) AND greptime_private.some_alt_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n TableScan: greptime_private.some_alt_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]",
|
||||
"PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Sort: greptime_private.some_alt_metric.tag_0 ASC NULLS FIRST, greptime_private.some_alt_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Filter: greptime_private.some_alt_metric.timestamp >= TimestampMillisecond(-999, None) AND greptime_private.some_alt_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n TableScan: greptime_private.some_alt_metric [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]",
|
||||
);
|
||||
|
||||
indie_query_plan_compare(query, expected).await;
|
||||
|
||||
let query = "some_alt_metric{__schema__=\"greptime_private\"} / some_metric";
|
||||
let expected = String::from(
|
||||
"Projection: some_metric.tag_0, some_metric.timestamp, greptime_private.some_alt_metric.field_0 / some_metric.field_0 AS greptime_private.some_alt_metric.field_0 / some_metric.field_0 [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), greptime_private.some_alt_metric.field_0 / some_metric.field_0:Float64;N]\
|
||||
\n Inner Join: greptime_private.some_alt_metric.tag_0 = some_metric.tag_0, greptime_private.some_alt_metric.timestamp = some_metric.timestamp [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n SubqueryAlias: greptime_private.some_alt_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Sort: greptime_private.some_alt_metric.tag_0 ASC NULLS FIRST, greptime_private.some_alt_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Filter: greptime_private.some_alt_metric.timestamp >= TimestampMillisecond(-999, None) AND greptime_private.some_alt_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n TableScan: greptime_private.some_alt_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n SubqueryAlias: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n Filter: some_metric.timestamp >= TimestampMillisecond(-999, None) AND some_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]",
|
||||
"Projection: some_metric.tag_0, some_metric.timestamp, greptime_private.some_alt_metric.field_0 / some_metric.field_0 AS greptime_private.some_alt_metric.field_0 / some_metric.field_0 [tag_0:Utf8, timestamp:Timestamp(ms), greptime_private.some_alt_metric.field_0 / some_metric.field_0:Float64;N]\
|
||||
\n Inner Join: greptime_private.some_alt_metric.tag_0 = some_metric.tag_0, greptime_private.some_alt_metric.timestamp = some_metric.timestamp [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n SubqueryAlias: greptime_private.some_alt_metric [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Sort: greptime_private.some_alt_metric.tag_0 ASC NULLS FIRST, greptime_private.some_alt_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Filter: greptime_private.some_alt_metric.timestamp >= TimestampMillisecond(-999, None) AND greptime_private.some_alt_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n TableScan: greptime_private.some_alt_metric [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n SubqueryAlias: some_metric [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n Filter: some_metric.timestamp >= TimestampMillisecond(-999, None) AND some_metric.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]\
|
||||
\n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]",
|
||||
);
|
||||
|
||||
indie_query_plan_compare(query, expected).await;
|
||||
@@ -5891,12 +5891,12 @@ mod test {
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
plan.display_indent_schema().to_string(),
|
||||
"PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [field:Float64;N, tag:Utf8, timestamp:Timestamp(Millisecond, None)]\
|
||||
\n PromSeriesDivide: tags=[\"tag\"] [field:Float64;N, tag:Utf8, timestamp:Timestamp(Millisecond, None)]\
|
||||
\n Sort: metrics.tag ASC NULLS FIRST, metrics.timestamp ASC NULLS FIRST [field:Float64;N, tag:Utf8, timestamp:Timestamp(Millisecond, None)]\
|
||||
\n Filter: metrics.tag = Utf8(\"1\") AND metrics.timestamp >= TimestampMillisecond(-999, None) AND metrics.timestamp <= TimestampMillisecond(100000000, None) [field:Float64;N, tag:Utf8, timestamp:Timestamp(Millisecond, None)]\
|
||||
\n Projection: metrics.field, metrics.tag, CAST(metrics.timestamp AS Timestamp(ms)) AS timestamp [field:Float64;N, tag:Utf8, timestamp:Timestamp(Millisecond, None)]\
|
||||
\n TableScan: metrics [tag:Utf8, timestamp:Timestamp(Nanosecond, None), field:Float64;N]"
|
||||
"PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [field:Float64;N, tag:Utf8, timestamp:Timestamp(ms)]\
|
||||
\n PromSeriesDivide: tags=[\"tag\"] [field:Float64;N, tag:Utf8, timestamp:Timestamp(ms)]\
|
||||
\n Sort: metrics.tag ASC NULLS FIRST, metrics.timestamp ASC NULLS FIRST [field:Float64;N, tag:Utf8, timestamp:Timestamp(ms)]\
|
||||
\n Filter: metrics.tag = Utf8(\"1\") AND metrics.timestamp >= TimestampMillisecond(-999, None) AND metrics.timestamp <= TimestampMillisecond(100000000, None) [field:Float64;N, tag:Utf8, timestamp:Timestamp(ms)]\
|
||||
\n Projection: metrics.field, metrics.tag, CAST(metrics.timestamp AS Timestamp(ms)) AS timestamp [field:Float64;N, tag:Utf8, timestamp:Timestamp(ms)]\
|
||||
\n TableScan: metrics [tag:Utf8, timestamp:Timestamp(ns), field:Float64;N]"
|
||||
);
|
||||
let plan = PromPlanner::stmt_to_plan(
|
||||
DfTableSourceProvider::new(
|
||||
@@ -5921,15 +5921,15 @@ mod test {
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
plan.display_indent_schema().to_string(),
|
||||
"Filter: prom_avg_over_time(timestamp_range,field) IS NOT NULL [timestamp:Timestamp(Millisecond, None), prom_avg_over_time(timestamp_range,field):Float64;N, tag:Utf8]\
|
||||
\n Projection: metrics.timestamp, prom_avg_over_time(timestamp_range, field) AS prom_avg_over_time(timestamp_range,field), metrics.tag [timestamp:Timestamp(Millisecond, None), prom_avg_over_time(timestamp_range,field):Float64;N, tag:Utf8]\
|
||||
\n PromRangeManipulate: req range=[0..100000000], interval=[5000], eval range=[5000], time index=[timestamp], values=[\"field\"] [field:Dictionary(Int64, Float64);N, tag:Utf8, timestamp:Timestamp(Millisecond, None), timestamp_range:Dictionary(Int64, Timestamp(Millisecond, None))]\
|
||||
\n PromSeriesNormalize: offset=[0], time index=[timestamp], filter NaN: [true] [field:Float64;N, tag:Utf8, timestamp:Timestamp(Millisecond, None)]\
|
||||
\n PromSeriesDivide: tags=[\"tag\"] [field:Float64;N, tag:Utf8, timestamp:Timestamp(Millisecond, None)]\
|
||||
\n Sort: metrics.tag ASC NULLS FIRST, metrics.timestamp ASC NULLS FIRST [field:Float64;N, tag:Utf8, timestamp:Timestamp(Millisecond, None)]\
|
||||
\n Filter: metrics.tag = Utf8(\"1\") AND metrics.timestamp >= TimestampMillisecond(-4999, None) AND metrics.timestamp <= TimestampMillisecond(100000000, None) [field:Float64;N, tag:Utf8, timestamp:Timestamp(Millisecond, None)]\
|
||||
\n Projection: metrics.field, metrics.tag, CAST(metrics.timestamp AS Timestamp(ms)) AS timestamp [field:Float64;N, tag:Utf8, timestamp:Timestamp(Millisecond, None)]\
|
||||
\n TableScan: metrics [tag:Utf8, timestamp:Timestamp(Nanosecond, None), field:Float64;N]"
|
||||
"Filter: prom_avg_over_time(timestamp_range,field) IS NOT NULL [timestamp:Timestamp(ms), prom_avg_over_time(timestamp_range,field):Float64;N, tag:Utf8]\
|
||||
\n Projection: metrics.timestamp, prom_avg_over_time(timestamp_range, field) AS prom_avg_over_time(timestamp_range,field), metrics.tag [timestamp:Timestamp(ms), prom_avg_over_time(timestamp_range,field):Float64;N, tag:Utf8]\
|
||||
\n PromRangeManipulate: req range=[0..100000000], interval=[5000], eval range=[5000], time index=[timestamp], values=[\"field\"] [field:Dictionary(Int64, Float64);N, tag:Utf8, timestamp:Timestamp(ms), timestamp_range:Dictionary(Int64, Timestamp(ms))]\
|
||||
\n PromSeriesNormalize: offset=[0], time index=[timestamp], filter NaN: [true] [field:Float64;N, tag:Utf8, timestamp:Timestamp(ms)]\
|
||||
\n PromSeriesDivide: tags=[\"tag\"] [field:Float64;N, tag:Utf8, timestamp:Timestamp(ms)]\
|
||||
\n Sort: metrics.tag ASC NULLS FIRST, metrics.timestamp ASC NULLS FIRST [field:Float64;N, tag:Utf8, timestamp:Timestamp(ms)]\
|
||||
\n Filter: metrics.tag = Utf8(\"1\") AND metrics.timestamp >= TimestampMillisecond(-4999, None) AND metrics.timestamp <= TimestampMillisecond(100000000, None) [field:Float64;N, tag:Utf8, timestamp:Timestamp(ms)]\
|
||||
\n Projection: metrics.field, metrics.tag, CAST(metrics.timestamp AS Timestamp(ms)) AS timestamp [field:Float64;N, tag:Utf8, timestamp:Timestamp(ms)]\
|
||||
\n TableScan: metrics [tag:Utf8, timestamp:Timestamp(ns), field:Float64;N]"
|
||||
);
|
||||
}
|
||||
|
||||
@@ -5986,13 +5986,13 @@ mod test {
|
||||
.unwrap();
|
||||
|
||||
let expected = r#"
|
||||
Filter: up.field_0 IS NOT NULL [timestamp:Timestamp(Millisecond, None), field_0:Float64;N, foo:Utf8;N, tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8]
|
||||
Projection: up.timestamp, up.field_0, concat_ws(Utf8(","), up.tag_1, up.tag_2, up.tag_3) AS foo, up.tag_0, up.tag_1, up.tag_2, up.tag_3 [timestamp:Timestamp(Millisecond, None), field_0:Float64;N, foo:Utf8;N, tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8]
|
||||
PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]
|
||||
PromSeriesDivide: tags=["tag_0", "tag_1", "tag_2", "tag_3"] [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]
|
||||
Sort: up.tag_0 ASC NULLS FIRST, up.tag_1 ASC NULLS FIRST, up.tag_2 ASC NULLS FIRST, up.tag_3 ASC NULLS FIRST, up.timestamp ASC NULLS FIRST [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]
|
||||
Filter: up.tag_0 = Utf8("api-server") AND up.timestamp >= TimestampMillisecond(-999, None) AND up.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]
|
||||
TableScan: up [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]"#;
|
||||
Filter: up.field_0 IS NOT NULL [timestamp:Timestamp(ms), field_0:Float64;N, foo:Utf8;N, tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8]
|
||||
Projection: up.timestamp, up.field_0, concat_ws(Utf8(","), up.tag_1, up.tag_2, up.tag_3) AS foo, up.tag_0, up.tag_1, up.tag_2, up.tag_3 [timestamp:Timestamp(ms), field_0:Float64;N, foo:Utf8;N, tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8]
|
||||
PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]
|
||||
PromSeriesDivide: tags=["tag_0", "tag_1", "tag_2", "tag_3"] [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]
|
||||
Sort: up.tag_0 ASC NULLS FIRST, up.tag_1 ASC NULLS FIRST, up.tag_2 ASC NULLS FIRST, up.tag_3 ASC NULLS FIRST, up.timestamp ASC NULLS FIRST [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]
|
||||
Filter: up.tag_0 = Utf8("api-server") AND up.timestamp >= TimestampMillisecond(-999, None) AND up.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]
|
||||
TableScan: up [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]"#;
|
||||
|
||||
let ret = plan.display_indent_schema().to_string();
|
||||
assert_eq!(format!("\n{ret}"), expected, "\n{}", ret);
|
||||
@@ -6023,13 +6023,13 @@ Filter: up.field_0 IS NOT NULL [timestamp:Timestamp(Millisecond, None), field_0:
|
||||
.unwrap();
|
||||
|
||||
let expected = r#"
|
||||
Filter: up.field_0 IS NOT NULL [timestamp:Timestamp(Millisecond, None), field_0:Float64;N, foo:Utf8;N, tag_0:Utf8]
|
||||
Projection: up.timestamp, up.field_0, regexp_replace(up.tag_0, Utf8("^(?s:(.*):.*)$"), Utf8("$1")) AS foo, up.tag_0 [timestamp:Timestamp(Millisecond, None), field_0:Float64;N, foo:Utf8;N, tag_0:Utf8]
|
||||
PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]
|
||||
PromSeriesDivide: tags=["tag_0"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]
|
||||
Sort: up.tag_0 ASC NULLS FIRST, up.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]
|
||||
Filter: up.tag_0 = Utf8("a:c") AND up.timestamp >= TimestampMillisecond(-999, None) AND up.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]
|
||||
TableScan: up [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]"#;
|
||||
Filter: up.field_0 IS NOT NULL [timestamp:Timestamp(ms), field_0:Float64;N, foo:Utf8;N, tag_0:Utf8]
|
||||
Projection: up.timestamp, up.field_0, regexp_replace(up.tag_0, Utf8("^(?s:(.*):.*)$"), Utf8("$1")) AS foo, up.tag_0 [timestamp:Timestamp(ms), field_0:Float64;N, foo:Utf8;N, tag_0:Utf8]
|
||||
PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]
|
||||
PromSeriesDivide: tags=["tag_0"] [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]
|
||||
Sort: up.tag_0 ASC NULLS FIRST, up.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]
|
||||
Filter: up.tag_0 = Utf8("a:c") AND up.timestamp >= TimestampMillisecond(-999, None) AND up.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]
|
||||
TableScan: up [tag_0:Utf8, timestamp:Timestamp(ms), field_0:Float64;N]"#;
|
||||
|
||||
let ret = plan.display_indent_schema().to_string();
|
||||
assert_eq!(format!("\n{ret}"), expected, "\n{}", ret);
|
||||
@@ -6064,13 +6064,13 @@ Filter: up.field_0 IS NOT NULL [timestamp:Timestamp(Millisecond, None), field_0:
|
||||
PromPlanner::stmt_to_plan(table_provider, &eval_stmt, &build_query_engine_state())
|
||||
.await
|
||||
.unwrap();
|
||||
let expected = "Sort: prometheus_tsdb_head_series.timestamp ASC NULLS LAST [timestamp:Timestamp(Millisecond, None), sum(prometheus_tsdb_head_series.field_0):Float64;N, sum(prometheus_tsdb_head_series.field_1):Float64;N, sum(prometheus_tsdb_head_series.field_2):Float64;N]\
|
||||
\n Aggregate: groupBy=[[prometheus_tsdb_head_series.timestamp]], aggr=[[sum(prometheus_tsdb_head_series.field_0), sum(prometheus_tsdb_head_series.field_1), sum(prometheus_tsdb_head_series.field_2)]] [timestamp:Timestamp(Millisecond, None), sum(prometheus_tsdb_head_series.field_0):Float64;N, sum(prometheus_tsdb_head_series.field_1):Float64;N, sum(prometheus_tsdb_head_series.field_2):Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\", \"tag_1\", \"tag_2\"] [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N]\
|
||||
\n Sort: prometheus_tsdb_head_series.tag_0 ASC NULLS FIRST, prometheus_tsdb_head_series.tag_1 ASC NULLS FIRST, prometheus_tsdb_head_series.tag_2 ASC NULLS FIRST, prometheus_tsdb_head_series.timestamp ASC NULLS FIRST [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N]\
|
||||
\n Filter: prometheus_tsdb_head_series.tag_1 ~ Utf8(\"^(?:(10.0.160.237:8080|10.0.160.237:9090))$\") AND prometheus_tsdb_head_series.timestamp >= TimestampMillisecond(-999, None) AND prometheus_tsdb_head_series.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N]\
|
||||
\n TableScan: prometheus_tsdb_head_series [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N]";
|
||||
let expected = "Sort: prometheus_tsdb_head_series.timestamp ASC NULLS LAST [timestamp:Timestamp(ms), sum(prometheus_tsdb_head_series.field_0):Float64;N, sum(prometheus_tsdb_head_series.field_1):Float64;N, sum(prometheus_tsdb_head_series.field_2):Float64;N]\
|
||||
\n Aggregate: groupBy=[[prometheus_tsdb_head_series.timestamp]], aggr=[[sum(prometheus_tsdb_head_series.field_0), sum(prometheus_tsdb_head_series.field_1), sum(prometheus_tsdb_head_series.field_2)]] [timestamp:Timestamp(ms), sum(prometheus_tsdb_head_series.field_0):Float64;N, sum(prometheus_tsdb_head_series.field_1):Float64;N, sum(prometheus_tsdb_head_series.field_2):Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\", \"tag_1\", \"tag_2\"] [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N]\
|
||||
\n Sort: prometheus_tsdb_head_series.tag_0 ASC NULLS FIRST, prometheus_tsdb_head_series.tag_1 ASC NULLS FIRST, prometheus_tsdb_head_series.tag_2 ASC NULLS FIRST, prometheus_tsdb_head_series.timestamp ASC NULLS FIRST [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N]\
|
||||
\n Filter: prometheus_tsdb_head_series.tag_1 ~ Utf8(\"^(?:(10.0.160.237:8080|10.0.160.237:9090))$\") AND prometheus_tsdb_head_series.timestamp >= TimestampMillisecond(-999, None) AND prometheus_tsdb_head_series.timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N]\
|
||||
\n TableScan: prometheus_tsdb_head_series [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N]";
|
||||
assert_eq!(plan.display_indent_schema().to_string(), expected);
|
||||
}
|
||||
|
||||
@@ -6108,17 +6108,17 @@ Filter: up.field_0 IS NOT NULL [timestamp:Timestamp(Millisecond, None), field_0:
|
||||
PromPlanner::stmt_to_plan(table_provider, &eval_stmt, &build_query_engine_state())
|
||||
.await
|
||||
.unwrap();
|
||||
let expected = "Projection: sum(prometheus_tsdb_head_series.greptime_value), prometheus_tsdb_head_series.ip, prometheus_tsdb_head_series.greptime_timestamp [sum(prometheus_tsdb_head_series.greptime_value):Float64;N, ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None)]\
|
||||
\n Sort: prometheus_tsdb_head_series.greptime_timestamp ASC NULLS LAST, row_number() PARTITION BY [prometheus_tsdb_head_series.greptime_timestamp] ORDER BY [sum(prometheus_tsdb_head_series.greptime_value) DESC NULLS FIRST, prometheus_tsdb_head_series.ip DESC NULLS FIRST] ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW ASC NULLS LAST [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), sum(prometheus_tsdb_head_series.greptime_value):Float64;N, row_number() PARTITION BY [prometheus_tsdb_head_series.greptime_timestamp] ORDER BY [sum(prometheus_tsdb_head_series.greptime_value) DESC NULLS FIRST, prometheus_tsdb_head_series.ip DESC NULLS FIRST] ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW:UInt64]\
|
||||
\n Filter: row_number() PARTITION BY [prometheus_tsdb_head_series.greptime_timestamp] ORDER BY [sum(prometheus_tsdb_head_series.greptime_value) DESC NULLS FIRST, prometheus_tsdb_head_series.ip DESC NULLS FIRST] ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW <= Float64(10) [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), sum(prometheus_tsdb_head_series.greptime_value):Float64;N, row_number() PARTITION BY [prometheus_tsdb_head_series.greptime_timestamp] ORDER BY [sum(prometheus_tsdb_head_series.greptime_value) DESC NULLS FIRST, prometheus_tsdb_head_series.ip DESC NULLS FIRST] ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW:UInt64]\
|
||||
\n WindowAggr: windowExpr=[[row_number() PARTITION BY [prometheus_tsdb_head_series.greptime_timestamp] ORDER BY [sum(prometheus_tsdb_head_series.greptime_value) DESC NULLS FIRST, prometheus_tsdb_head_series.ip DESC NULLS FIRST] ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW]] [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), sum(prometheus_tsdb_head_series.greptime_value):Float64;N, row_number() PARTITION BY [prometheus_tsdb_head_series.greptime_timestamp] ORDER BY [sum(prometheus_tsdb_head_series.greptime_value) DESC NULLS FIRST, prometheus_tsdb_head_series.ip DESC NULLS FIRST] ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW:UInt64]\
|
||||
\n Sort: prometheus_tsdb_head_series.ip ASC NULLS LAST, prometheus_tsdb_head_series.greptime_timestamp ASC NULLS LAST [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), sum(prometheus_tsdb_head_series.greptime_value):Float64;N]\
|
||||
\n Aggregate: groupBy=[[prometheus_tsdb_head_series.ip, prometheus_tsdb_head_series.greptime_timestamp]], aggr=[[sum(prometheus_tsdb_head_series.greptime_value)]] [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), sum(prometheus_tsdb_head_series.greptime_value):Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[greptime_timestamp] [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"ip\"] [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]\
|
||||
\n Sort: prometheus_tsdb_head_series.ip ASC NULLS FIRST, prometheus_tsdb_head_series.greptime_timestamp ASC NULLS FIRST [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]\
|
||||
\n Filter: prometheus_tsdb_head_series.ip ~ Utf8(\"^(?:(10.0.160.237:8080|10.0.160.237:9090))$\") AND prometheus_tsdb_head_series.greptime_timestamp >= TimestampMillisecond(-999, None) AND prometheus_tsdb_head_series.greptime_timestamp <= TimestampMillisecond(100000000, None) [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]\
|
||||
\n TableScan: prometheus_tsdb_head_series [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]";
|
||||
let expected = "Projection: sum(prometheus_tsdb_head_series.greptime_value), prometheus_tsdb_head_series.ip, prometheus_tsdb_head_series.greptime_timestamp [sum(prometheus_tsdb_head_series.greptime_value):Float64;N, ip:Utf8, greptime_timestamp:Timestamp(ms)]\
|
||||
\n Sort: prometheus_tsdb_head_series.greptime_timestamp ASC NULLS LAST, row_number() PARTITION BY [prometheus_tsdb_head_series.greptime_timestamp] ORDER BY [sum(prometheus_tsdb_head_series.greptime_value) DESC NULLS FIRST, prometheus_tsdb_head_series.ip DESC NULLS FIRST] ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW ASC NULLS LAST [ip:Utf8, greptime_timestamp:Timestamp(ms), sum(prometheus_tsdb_head_series.greptime_value):Float64;N, row_number() PARTITION BY [prometheus_tsdb_head_series.greptime_timestamp] ORDER BY [sum(prometheus_tsdb_head_series.greptime_value) DESC NULLS FIRST, prometheus_tsdb_head_series.ip DESC NULLS FIRST] ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW:UInt64]\
|
||||
\n Filter: row_number() PARTITION BY [prometheus_tsdb_head_series.greptime_timestamp] ORDER BY [sum(prometheus_tsdb_head_series.greptime_value) DESC NULLS FIRST, prometheus_tsdb_head_series.ip DESC NULLS FIRST] ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW <= Float64(10) [ip:Utf8, greptime_timestamp:Timestamp(ms), sum(prometheus_tsdb_head_series.greptime_value):Float64;N, row_number() PARTITION BY [prometheus_tsdb_head_series.greptime_timestamp] ORDER BY [sum(prometheus_tsdb_head_series.greptime_value) DESC NULLS FIRST, prometheus_tsdb_head_series.ip DESC NULLS FIRST] ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW:UInt64]\
|
||||
\n WindowAggr: windowExpr=[[row_number() PARTITION BY [prometheus_tsdb_head_series.greptime_timestamp] ORDER BY [sum(prometheus_tsdb_head_series.greptime_value) DESC NULLS FIRST, prometheus_tsdb_head_series.ip DESC NULLS FIRST] ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW]] [ip:Utf8, greptime_timestamp:Timestamp(ms), sum(prometheus_tsdb_head_series.greptime_value):Float64;N, row_number() PARTITION BY [prometheus_tsdb_head_series.greptime_timestamp] ORDER BY [sum(prometheus_tsdb_head_series.greptime_value) DESC NULLS FIRST, prometheus_tsdb_head_series.ip DESC NULLS FIRST] ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW:UInt64]\
|
||||
\n Sort: prometheus_tsdb_head_series.ip ASC NULLS LAST, prometheus_tsdb_head_series.greptime_timestamp ASC NULLS LAST [ip:Utf8, greptime_timestamp:Timestamp(ms), sum(prometheus_tsdb_head_series.greptime_value):Float64;N]\
|
||||
\n Aggregate: groupBy=[[prometheus_tsdb_head_series.ip, prometheus_tsdb_head_series.greptime_timestamp]], aggr=[[sum(prometheus_tsdb_head_series.greptime_value)]] [ip:Utf8, greptime_timestamp:Timestamp(ms), sum(prometheus_tsdb_head_series.greptime_value):Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[greptime_timestamp] [ip:Utf8, greptime_timestamp:Timestamp(ms), greptime_value:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"ip\"] [ip:Utf8, greptime_timestamp:Timestamp(ms), greptime_value:Float64;N]\
|
||||
\n Sort: prometheus_tsdb_head_series.ip ASC NULLS FIRST, prometheus_tsdb_head_series.greptime_timestamp ASC NULLS FIRST [ip:Utf8, greptime_timestamp:Timestamp(ms), greptime_value:Float64;N]\
|
||||
\n Filter: prometheus_tsdb_head_series.ip ~ Utf8(\"^(?:(10.0.160.237:8080|10.0.160.237:9090))$\") AND prometheus_tsdb_head_series.greptime_timestamp >= TimestampMillisecond(-999, None) AND prometheus_tsdb_head_series.greptime_timestamp <= TimestampMillisecond(100000000, None) [ip:Utf8, greptime_timestamp:Timestamp(ms), greptime_value:Float64;N]\
|
||||
\n TableScan: prometheus_tsdb_head_series [ip:Utf8, greptime_timestamp:Timestamp(ms), greptime_value:Float64;N]";
|
||||
|
||||
assert_eq!(plan.display_indent_schema().to_string(), expected);
|
||||
}
|
||||
@@ -6157,15 +6157,15 @@ Filter: up.field_0 IS NOT NULL [timestamp:Timestamp(Millisecond, None), field_0:
|
||||
PromPlanner::stmt_to_plan(table_provider, &eval_stmt, &build_query_engine_state())
|
||||
.await
|
||||
.unwrap();
|
||||
let expected = "Projection: count(prometheus_tsdb_head_series.greptime_value), prometheus_tsdb_head_series.ip, prometheus_tsdb_head_series.greptime_timestamp, series [count(prometheus_tsdb_head_series.greptime_value):Int64, ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), series:Float64;N]\
|
||||
\n Sort: prometheus_tsdb_head_series.ip ASC NULLS LAST, prometheus_tsdb_head_series.greptime_timestamp ASC NULLS LAST, prometheus_tsdb_head_series.greptime_value ASC NULLS LAST [count(prometheus_tsdb_head_series.greptime_value):Int64, ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), series:Float64;N, greptime_value:Float64;N]\
|
||||
\n Projection: count(prometheus_tsdb_head_series.greptime_value), prometheus_tsdb_head_series.ip, prometheus_tsdb_head_series.greptime_timestamp, prometheus_tsdb_head_series.greptime_value AS series, prometheus_tsdb_head_series.greptime_value [count(prometheus_tsdb_head_series.greptime_value):Int64, ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), series:Float64;N, greptime_value:Float64;N]\
|
||||
\n Aggregate: groupBy=[[prometheus_tsdb_head_series.ip, prometheus_tsdb_head_series.greptime_timestamp, prometheus_tsdb_head_series.greptime_value]], aggr=[[count(prometheus_tsdb_head_series.greptime_value)]] [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N, count(prometheus_tsdb_head_series.greptime_value):Int64]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[greptime_timestamp] [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"ip\"] [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]\
|
||||
\n Sort: prometheus_tsdb_head_series.ip ASC NULLS FIRST, prometheus_tsdb_head_series.greptime_timestamp ASC NULLS FIRST [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]\
|
||||
\n Filter: prometheus_tsdb_head_series.ip ~ Utf8(\"^(?:(10.0.160.237:8080|10.0.160.237:9090))$\") AND prometheus_tsdb_head_series.greptime_timestamp >= TimestampMillisecond(-999, None) AND prometheus_tsdb_head_series.greptime_timestamp <= TimestampMillisecond(100000000, None) [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]\
|
||||
\n TableScan: prometheus_tsdb_head_series [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]";
|
||||
let expected = "Projection: count(prometheus_tsdb_head_series.greptime_value), prometheus_tsdb_head_series.ip, prometheus_tsdb_head_series.greptime_timestamp, series [count(prometheus_tsdb_head_series.greptime_value):Int64, ip:Utf8, greptime_timestamp:Timestamp(ms), series:Float64;N]\
|
||||
\n Sort: prometheus_tsdb_head_series.ip ASC NULLS LAST, prometheus_tsdb_head_series.greptime_timestamp ASC NULLS LAST, prometheus_tsdb_head_series.greptime_value ASC NULLS LAST [count(prometheus_tsdb_head_series.greptime_value):Int64, ip:Utf8, greptime_timestamp:Timestamp(ms), series:Float64;N, greptime_value:Float64;N]\
|
||||
\n Projection: count(prometheus_tsdb_head_series.greptime_value), prometheus_tsdb_head_series.ip, prometheus_tsdb_head_series.greptime_timestamp, prometheus_tsdb_head_series.greptime_value AS series, prometheus_tsdb_head_series.greptime_value [count(prometheus_tsdb_head_series.greptime_value):Int64, ip:Utf8, greptime_timestamp:Timestamp(ms), series:Float64;N, greptime_value:Float64;N]\
|
||||
\n Aggregate: groupBy=[[prometheus_tsdb_head_series.ip, prometheus_tsdb_head_series.greptime_timestamp, prometheus_tsdb_head_series.greptime_value]], aggr=[[count(prometheus_tsdb_head_series.greptime_value)]] [ip:Utf8, greptime_timestamp:Timestamp(ms), greptime_value:Float64;N, count(prometheus_tsdb_head_series.greptime_value):Int64]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[greptime_timestamp] [ip:Utf8, greptime_timestamp:Timestamp(ms), greptime_value:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"ip\"] [ip:Utf8, greptime_timestamp:Timestamp(ms), greptime_value:Float64;N]\
|
||||
\n Sort: prometheus_tsdb_head_series.ip ASC NULLS FIRST, prometheus_tsdb_head_series.greptime_timestamp ASC NULLS FIRST [ip:Utf8, greptime_timestamp:Timestamp(ms), greptime_value:Float64;N]\
|
||||
\n Filter: prometheus_tsdb_head_series.ip ~ Utf8(\"^(?:(10.0.160.237:8080|10.0.160.237:9090))$\") AND prometheus_tsdb_head_series.greptime_timestamp >= TimestampMillisecond(-999, None) AND prometheus_tsdb_head_series.greptime_timestamp <= TimestampMillisecond(100000000, None) [ip:Utf8, greptime_timestamp:Timestamp(ms), greptime_value:Float64;N]\
|
||||
\n TableScan: prometheus_tsdb_head_series [ip:Utf8, greptime_timestamp:Timestamp(ms), greptime_value:Float64;N]";
|
||||
|
||||
assert_eq!(plan.display_indent_schema().to_string(), expected);
|
||||
}
|
||||
@@ -6206,16 +6206,16 @@ Filter: up.field_0 IS NOT NULL [timestamp:Timestamp(Millisecond, None), field_0:
|
||||
.await
|
||||
.unwrap();
|
||||
let expected = r#"
|
||||
Projection: count(prometheus_tsdb_head_series.greptime_value) AS my_series, prometheus_tsdb_head_series.ip, prometheus_tsdb_head_series.greptime_timestamp [my_series:Int64, ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None)]
|
||||
Projection: count(prometheus_tsdb_head_series.greptime_value), prometheus_tsdb_head_series.ip, prometheus_tsdb_head_series.greptime_timestamp, series [count(prometheus_tsdb_head_series.greptime_value):Int64, ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), series:Float64;N]
|
||||
Sort: prometheus_tsdb_head_series.ip ASC NULLS LAST, prometheus_tsdb_head_series.greptime_timestamp ASC NULLS LAST, prometheus_tsdb_head_series.greptime_value ASC NULLS LAST [count(prometheus_tsdb_head_series.greptime_value):Int64, ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), series:Float64;N, greptime_value:Float64;N]
|
||||
Projection: count(prometheus_tsdb_head_series.greptime_value), prometheus_tsdb_head_series.ip, prometheus_tsdb_head_series.greptime_timestamp, prometheus_tsdb_head_series.greptime_value AS series, prometheus_tsdb_head_series.greptime_value [count(prometheus_tsdb_head_series.greptime_value):Int64, ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), series:Float64;N, greptime_value:Float64;N]
|
||||
Aggregate: groupBy=[[prometheus_tsdb_head_series.ip, prometheus_tsdb_head_series.greptime_timestamp, prometheus_tsdb_head_series.greptime_value]], aggr=[[count(prometheus_tsdb_head_series.greptime_value)]] [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N, count(prometheus_tsdb_head_series.greptime_value):Int64]
|
||||
PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[greptime_timestamp] [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]
|
||||
PromSeriesDivide: tags=["ip"] [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]
|
||||
Sort: prometheus_tsdb_head_series.ip ASC NULLS FIRST, prometheus_tsdb_head_series.greptime_timestamp ASC NULLS FIRST [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]
|
||||
Filter: prometheus_tsdb_head_series.ip ~ Utf8("^(?:(10.0.160.237:8080|10.0.160.237:9090))$") AND prometheus_tsdb_head_series.greptime_timestamp >= TimestampMillisecond(-999, None) AND prometheus_tsdb_head_series.greptime_timestamp <= TimestampMillisecond(100000000, None) [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]
|
||||
TableScan: prometheus_tsdb_head_series [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]"#;
|
||||
Projection: count(prometheus_tsdb_head_series.greptime_value) AS my_series, prometheus_tsdb_head_series.ip, prometheus_tsdb_head_series.greptime_timestamp [my_series:Int64, ip:Utf8, greptime_timestamp:Timestamp(ms)]
|
||||
Projection: count(prometheus_tsdb_head_series.greptime_value), prometheus_tsdb_head_series.ip, prometheus_tsdb_head_series.greptime_timestamp, series [count(prometheus_tsdb_head_series.greptime_value):Int64, ip:Utf8, greptime_timestamp:Timestamp(ms), series:Float64;N]
|
||||
Sort: prometheus_tsdb_head_series.ip ASC NULLS LAST, prometheus_tsdb_head_series.greptime_timestamp ASC NULLS LAST, prometheus_tsdb_head_series.greptime_value ASC NULLS LAST [count(prometheus_tsdb_head_series.greptime_value):Int64, ip:Utf8, greptime_timestamp:Timestamp(ms), series:Float64;N, greptime_value:Float64;N]
|
||||
Projection: count(prometheus_tsdb_head_series.greptime_value), prometheus_tsdb_head_series.ip, prometheus_tsdb_head_series.greptime_timestamp, prometheus_tsdb_head_series.greptime_value AS series, prometheus_tsdb_head_series.greptime_value [count(prometheus_tsdb_head_series.greptime_value):Int64, ip:Utf8, greptime_timestamp:Timestamp(ms), series:Float64;N, greptime_value:Float64;N]
|
||||
Aggregate: groupBy=[[prometheus_tsdb_head_series.ip, prometheus_tsdb_head_series.greptime_timestamp, prometheus_tsdb_head_series.greptime_value]], aggr=[[count(prometheus_tsdb_head_series.greptime_value)]] [ip:Utf8, greptime_timestamp:Timestamp(ms), greptime_value:Float64;N, count(prometheus_tsdb_head_series.greptime_value):Int64]
|
||||
PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[greptime_timestamp] [ip:Utf8, greptime_timestamp:Timestamp(ms), greptime_value:Float64;N]
|
||||
PromSeriesDivide: tags=["ip"] [ip:Utf8, greptime_timestamp:Timestamp(ms), greptime_value:Float64;N]
|
||||
Sort: prometheus_tsdb_head_series.ip ASC NULLS FIRST, prometheus_tsdb_head_series.greptime_timestamp ASC NULLS FIRST [ip:Utf8, greptime_timestamp:Timestamp(ms), greptime_value:Float64;N]
|
||||
Filter: prometheus_tsdb_head_series.ip ~ Utf8("^(?:(10.0.160.237:8080|10.0.160.237:9090))$") AND prometheus_tsdb_head_series.greptime_timestamp >= TimestampMillisecond(-999, None) AND prometheus_tsdb_head_series.greptime_timestamp <= TimestampMillisecond(100000000, None) [ip:Utf8, greptime_timestamp:Timestamp(ms), greptime_value:Float64;N]
|
||||
TableScan: prometheus_tsdb_head_series [ip:Utf8, greptime_timestamp:Timestamp(ms), greptime_value:Float64;N]"#;
|
||||
assert_eq!(format!("\n{}", plan.display_indent_schema()), expected);
|
||||
}
|
||||
|
||||
@@ -6253,15 +6253,15 @@ Projection: count(prometheus_tsdb_head_series.greptime_value) AS my_series, prom
|
||||
PromPlanner::stmt_to_plan(table_provider, &eval_stmt, &build_query_engine_state())
|
||||
.await
|
||||
.unwrap();
|
||||
let expected = "Sort: prometheus_tsdb_head_series.greptime_timestamp ASC NULLS LAST [greptime_timestamp:Timestamp(Millisecond, None), quantile(Float64(0.3),sum(prometheus_tsdb_head_series.greptime_value)):Float64;N]\
|
||||
\n Aggregate: groupBy=[[prometheus_tsdb_head_series.greptime_timestamp]], aggr=[[quantile(Float64(0.3), sum(prometheus_tsdb_head_series.greptime_value))]] [greptime_timestamp:Timestamp(Millisecond, None), quantile(Float64(0.3),sum(prometheus_tsdb_head_series.greptime_value)):Float64;N]\
|
||||
\n Sort: prometheus_tsdb_head_series.ip ASC NULLS LAST, prometheus_tsdb_head_series.greptime_timestamp ASC NULLS LAST [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), sum(prometheus_tsdb_head_series.greptime_value):Float64;N]\
|
||||
\n Aggregate: groupBy=[[prometheus_tsdb_head_series.ip, prometheus_tsdb_head_series.greptime_timestamp]], aggr=[[sum(prometheus_tsdb_head_series.greptime_value)]] [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), sum(prometheus_tsdb_head_series.greptime_value):Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[greptime_timestamp] [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"ip\"] [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]\
|
||||
\n Sort: prometheus_tsdb_head_series.ip ASC NULLS FIRST, prometheus_tsdb_head_series.greptime_timestamp ASC NULLS FIRST [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]\
|
||||
\n Filter: prometheus_tsdb_head_series.ip ~ Utf8(\"^(?:(10.0.160.237:8080|10.0.160.237:9090))$\") AND prometheus_tsdb_head_series.greptime_timestamp >= TimestampMillisecond(-999, None) AND prometheus_tsdb_head_series.greptime_timestamp <= TimestampMillisecond(100000000, None) [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]\
|
||||
\n TableScan: prometheus_tsdb_head_series [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]";
|
||||
let expected = "Sort: prometheus_tsdb_head_series.greptime_timestamp ASC NULLS LAST [greptime_timestamp:Timestamp(ms), quantile(Float64(0.3),sum(prometheus_tsdb_head_series.greptime_value)):Float64;N]\
|
||||
\n Aggregate: groupBy=[[prometheus_tsdb_head_series.greptime_timestamp]], aggr=[[quantile(Float64(0.3), sum(prometheus_tsdb_head_series.greptime_value))]] [greptime_timestamp:Timestamp(ms), quantile(Float64(0.3),sum(prometheus_tsdb_head_series.greptime_value)):Float64;N]\
|
||||
\n Sort: prometheus_tsdb_head_series.ip ASC NULLS LAST, prometheus_tsdb_head_series.greptime_timestamp ASC NULLS LAST [ip:Utf8, greptime_timestamp:Timestamp(ms), sum(prometheus_tsdb_head_series.greptime_value):Float64;N]\
|
||||
\n Aggregate: groupBy=[[prometheus_tsdb_head_series.ip, prometheus_tsdb_head_series.greptime_timestamp]], aggr=[[sum(prometheus_tsdb_head_series.greptime_value)]] [ip:Utf8, greptime_timestamp:Timestamp(ms), sum(prometheus_tsdb_head_series.greptime_value):Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[greptime_timestamp] [ip:Utf8, greptime_timestamp:Timestamp(ms), greptime_value:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"ip\"] [ip:Utf8, greptime_timestamp:Timestamp(ms), greptime_value:Float64;N]\
|
||||
\n Sort: prometheus_tsdb_head_series.ip ASC NULLS FIRST, prometheus_tsdb_head_series.greptime_timestamp ASC NULLS FIRST [ip:Utf8, greptime_timestamp:Timestamp(ms), greptime_value:Float64;N]\
|
||||
\n Filter: prometheus_tsdb_head_series.ip ~ Utf8(\"^(?:(10.0.160.237:8080|10.0.160.237:9090))$\") AND prometheus_tsdb_head_series.greptime_timestamp >= TimestampMillisecond(-999, None) AND prometheus_tsdb_head_series.greptime_timestamp <= TimestampMillisecond(100000000, None) [ip:Utf8, greptime_timestamp:Timestamp(ms), greptime_value:Float64;N]\
|
||||
\n TableScan: prometheus_tsdb_head_series [ip:Utf8, greptime_timestamp:Timestamp(ms), greptime_value:Float64;N]";
|
||||
|
||||
assert_eq!(plan.display_indent_schema().to_string(), expected);
|
||||
}
|
||||
@@ -6291,22 +6291,22 @@ Projection: count(prometheus_tsdb_head_series.greptime_value) AS my_series, prom
|
||||
PromPlanner::stmt_to_plan(table_provider, &eval_stmt, &build_query_engine_state())
|
||||
.await
|
||||
.unwrap();
|
||||
let expected = r#"UnionDistinctOn: on col=[["job"]], ts_col=[greptime_timestamp] [greptime_timestamp:Timestamp(Millisecond, None), job:Utf8, sum(metric_exists.greptime_value):Float64;N]
|
||||
SubqueryAlias: metric_exists [greptime_timestamp:Timestamp(Millisecond, None), job:Utf8, sum(metric_exists.greptime_value):Float64;N]
|
||||
Projection: metric_exists.greptime_timestamp, metric_exists.job, sum(metric_exists.greptime_value) [greptime_timestamp:Timestamp(Millisecond, None), job:Utf8, sum(metric_exists.greptime_value):Float64;N]
|
||||
Sort: metric_exists.job ASC NULLS LAST, metric_exists.greptime_timestamp ASC NULLS LAST [job:Utf8, greptime_timestamp:Timestamp(Millisecond, None), sum(metric_exists.greptime_value):Float64;N]
|
||||
Aggregate: groupBy=[[metric_exists.job, metric_exists.greptime_timestamp]], aggr=[[sum(metric_exists.greptime_value)]] [job:Utf8, greptime_timestamp:Timestamp(Millisecond, None), sum(metric_exists.greptime_value):Float64;N]
|
||||
PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[greptime_timestamp] [job:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]
|
||||
PromSeriesDivide: tags=["job"] [job:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]
|
||||
Sort: metric_exists.job ASC NULLS FIRST, metric_exists.greptime_timestamp ASC NULLS FIRST [job:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]
|
||||
Filter: metric_exists.greptime_timestamp >= TimestampMillisecond(-999, None) AND metric_exists.greptime_timestamp <= TimestampMillisecond(100000000, None) [job:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]
|
||||
TableScan: metric_exists [job:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]
|
||||
SubqueryAlias: [greptime_timestamp:Timestamp(Millisecond, None), job:Utf8;N, sum(.value):Float64;N]
|
||||
Projection: .time AS greptime_timestamp, Utf8(NULL) AS job, sum(.value) [greptime_timestamp:Timestamp(Millisecond, None), job:Utf8;N, sum(.value):Float64;N]
|
||||
Sort: .time ASC NULLS LAST [time:Timestamp(Millisecond, None), sum(.value):Float64;N]
|
||||
Aggregate: groupBy=[[.time]], aggr=[[sum(.value)]] [time:Timestamp(Millisecond, None), sum(.value):Float64;N]
|
||||
EmptyMetric: range=[0..-1], interval=[5000] [time:Timestamp(Millisecond, None), value:Float64;N]
|
||||
TableScan: dummy [time:Timestamp(Millisecond, None), value:Float64;N]"#;
|
||||
let expected = r#"UnionDistinctOn: on col=[["job"]], ts_col=[greptime_timestamp] [greptime_timestamp:Timestamp(ms), job:Utf8, sum(metric_exists.greptime_value):Float64;N]
|
||||
SubqueryAlias: metric_exists [greptime_timestamp:Timestamp(ms), job:Utf8, sum(metric_exists.greptime_value):Float64;N]
|
||||
Projection: metric_exists.greptime_timestamp, metric_exists.job, sum(metric_exists.greptime_value) [greptime_timestamp:Timestamp(ms), job:Utf8, sum(metric_exists.greptime_value):Float64;N]
|
||||
Sort: metric_exists.job ASC NULLS LAST, metric_exists.greptime_timestamp ASC NULLS LAST [job:Utf8, greptime_timestamp:Timestamp(ms), sum(metric_exists.greptime_value):Float64;N]
|
||||
Aggregate: groupBy=[[metric_exists.job, metric_exists.greptime_timestamp]], aggr=[[sum(metric_exists.greptime_value)]] [job:Utf8, greptime_timestamp:Timestamp(ms), sum(metric_exists.greptime_value):Float64;N]
|
||||
PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[greptime_timestamp] [job:Utf8, greptime_timestamp:Timestamp(ms), greptime_value:Float64;N]
|
||||
PromSeriesDivide: tags=["job"] [job:Utf8, greptime_timestamp:Timestamp(ms), greptime_value:Float64;N]
|
||||
Sort: metric_exists.job ASC NULLS FIRST, metric_exists.greptime_timestamp ASC NULLS FIRST [job:Utf8, greptime_timestamp:Timestamp(ms), greptime_value:Float64;N]
|
||||
Filter: metric_exists.greptime_timestamp >= TimestampMillisecond(-999, None) AND metric_exists.greptime_timestamp <= TimestampMillisecond(100000000, None) [job:Utf8, greptime_timestamp:Timestamp(ms), greptime_value:Float64;N]
|
||||
TableScan: metric_exists [job:Utf8, greptime_timestamp:Timestamp(ms), greptime_value:Float64;N]
|
||||
SubqueryAlias: [greptime_timestamp:Timestamp(ms), job:Utf8;N, sum(.value):Float64;N]
|
||||
Projection: .time AS greptime_timestamp, Utf8(NULL) AS job, sum(.value) [greptime_timestamp:Timestamp(ms), job:Utf8;N, sum(.value):Float64;N]
|
||||
Sort: .time ASC NULLS LAST [time:Timestamp(ms), sum(.value):Float64;N]
|
||||
Aggregate: groupBy=[[.time]], aggr=[[sum(.value)]] [time:Timestamp(ms), sum(.value):Float64;N]
|
||||
EmptyMetric: range=[0..-1], interval=[5000] [time:Timestamp(ms), value:Float64;N]
|
||||
TableScan: dummy [time:Timestamp(ms), value:Float64;N]"#;
|
||||
|
||||
assert_eq!(plan.display_indent_schema().to_string(), expected);
|
||||
}
|
||||
|
||||
@@ -414,7 +414,7 @@ impl RangeSelect {
|
||||
.iter()
|
||||
.map(|i| {
|
||||
let f = schema_before_project.qualified_field(*i);
|
||||
(f.0.cloned(), Arc::new(f.1.clone()))
|
||||
(f.0.cloned(), f.1.clone())
|
||||
})
|
||||
.collect();
|
||||
Arc::new(DFSchema::new_with_metadata(
|
||||
@@ -688,12 +688,12 @@ impl RangeSelect {
|
||||
schema_before_project.clone()
|
||||
};
|
||||
let by = self.create_physical_expr_list(false, &self.by, input_dfschema, session_state)?;
|
||||
let cache = PlanProperties::new(
|
||||
let cache = Arc::new(PlanProperties::new(
|
||||
EquivalenceProperties::new(schema.clone()),
|
||||
Partitioning::UnknownPartitioning(1),
|
||||
EmissionType::Incremental,
|
||||
Boundedness::Bounded,
|
||||
);
|
||||
));
|
||||
Ok(Arc::new(RangeSelectExec {
|
||||
input: exec_input,
|
||||
range_exec,
|
||||
@@ -760,7 +760,7 @@ pub struct RangeSelectExec {
|
||||
metric: ExecutionPlanMetricsSet,
|
||||
schema_project: Option<Vec<usize>>,
|
||||
schema_before_project: SchemaRef,
|
||||
cache: PlanProperties,
|
||||
cache: Arc<PlanProperties>,
|
||||
}
|
||||
|
||||
impl DisplayAs for RangeSelectExec {
|
||||
@@ -801,7 +801,7 @@ impl ExecutionPlan for RangeSelectExec {
|
||||
vec![Distribution::SinglePartition]
|
||||
}
|
||||
|
||||
fn properties(&self) -> &PlanProperties {
|
||||
fn properties(&self) -> &Arc<PlanProperties> {
|
||||
&self.cache
|
||||
}
|
||||
|
||||
@@ -1340,12 +1340,12 @@ mod test {
|
||||
Field::new(TIME_INDEX_COLUMN, TimestampMillisecondType::DATA_TYPE, true),
|
||||
Field::new("host", DataType::Utf8, true),
|
||||
]));
|
||||
let cache = PlanProperties::new(
|
||||
let cache = Arc::new(PlanProperties::new(
|
||||
EquivalenceProperties::new(schema.clone()),
|
||||
Partitioning::UnknownPartitioning(1),
|
||||
EmissionType::Incremental,
|
||||
Boundedness::Bounded,
|
||||
);
|
||||
));
|
||||
let input_schema = memory_exec.schema().clone();
|
||||
let range_select_exec = Arc::new(RangeSelectExec {
|
||||
input: memory_exec,
|
||||
|
||||
@@ -19,7 +19,6 @@ use std::time::Duration;
|
||||
use arrow_schema::DataType;
|
||||
use async_recursion::async_recursion;
|
||||
use catalog::table_source::DfTableSourceProvider;
|
||||
use chrono::Utc;
|
||||
use common_time::interval::{MS_PER_DAY, NANOS_PER_MILLI};
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use common_time::{IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth, Timestamp, Timezone};
|
||||
@@ -28,7 +27,6 @@ use datafusion::prelude::Column;
|
||||
use datafusion::scalar::ScalarValue;
|
||||
use datafusion_common::tree_node::{Transformed, TreeNode, TreeNodeRecursion, TreeNodeRewriter};
|
||||
use datafusion_common::{DFSchema, DataFusionError, Result as DFResult};
|
||||
use datafusion_expr::execution_props::ExecutionProps;
|
||||
use datafusion_expr::expr::WildcardOptions;
|
||||
use datafusion_expr::simplify::SimplifyContext;
|
||||
use datafusion_expr::{
|
||||
@@ -146,8 +144,7 @@ fn evaluate_expr_to_millisecond(args: &[Expr], i: usize, interval_only: bool) ->
|
||||
if interval_only && !interval_only_in_expr(expr) {
|
||||
return Err(dispose_parse_error(Some(expr)));
|
||||
}
|
||||
let execution_props = ExecutionProps::new().with_query_execution_start_time(Utc::now());
|
||||
let info = SimplifyContext::new(&execution_props).with_schema(Arc::new(DFSchema::empty()));
|
||||
let info = SimplifyContext::default().with_current_time();
|
||||
let simplify_expr = ExprSimplifier::new(info).simplify(expr.clone())?;
|
||||
match simplify_expr {
|
||||
Expr::Literal(ScalarValue::TimestampNanosecond(ts_nanos, _), _)
|
||||
@@ -702,8 +699,8 @@ mod test {
|
||||
async fn range_no_project() {
|
||||
let query = r#"SELECT timestamp, tag_0, tag_1, avg(field_0 + field_1) RANGE '5m' FROM test ALIGN '1h' by (tag_0,tag_1);"#;
|
||||
let expected = String::from(
|
||||
"RangeSelect: range_exprs=[avg(test.field_0 + test.field_1) RANGE 5m], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8, avg(test.field_0 + test.field_1) RANGE 5m:Float64;N]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]",
|
||||
"RangeSelect: range_exprs=[avg(test.field_0 + test.field_1) RANGE 5m], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [timestamp:Timestamp(ms), tag_0:Utf8, tag_1:Utf8, avg(test.field_0 + test.field_1) RANGE 5m:Float64;N]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]",
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
}
|
||||
@@ -713,8 +710,8 @@ mod test {
|
||||
let query = r#"SELECT (avg(field_0 + field_1)/4) RANGE '5m' FROM test ALIGN '1h' by (tag_0,tag_1);"#;
|
||||
let expected = String::from(
|
||||
"Projection: avg(test.field_0 + test.field_1) RANGE 5m / Int64(4) [avg(test.field_0 + test.field_1) RANGE 5m / Int64(4):Float64;N]\
|
||||
\n RangeSelect: range_exprs=[avg(test.field_0 + test.field_1) RANGE 5m], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [avg(test.field_0 + test.field_1) RANGE 5m:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]",
|
||||
\n RangeSelect: range_exprs=[avg(test.field_0 + test.field_1) RANGE 5m], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [avg(test.field_0 + test.field_1) RANGE 5m:Float64;N, timestamp:Timestamp(ms), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]",
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
}
|
||||
@@ -725,8 +722,8 @@ mod test {
|
||||
r#"SELECT (covar(field_0 + field_1, field_1)/4) RANGE '5m' FROM test ALIGN '1h';"#;
|
||||
let expected = String::from(
|
||||
"Projection: covar_samp(test.field_0 + test.field_1,test.field_1) RANGE 5m / Int64(4) [covar_samp(test.field_0 + test.field_1,test.field_1) RANGE 5m / Int64(4):Float64;N]\
|
||||
\n RangeSelect: range_exprs=[covar_samp(test.field_0 + test.field_1,test.field_1) RANGE 5m], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1, test.tag_2, test.tag_3, test.tag_4], time_index=timestamp [covar_samp(test.field_0 + test.field_1,test.field_1) RANGE 5m:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]",
|
||||
\n RangeSelect: range_exprs=[covar_samp(test.field_0 + test.field_1,test.field_1) RANGE 5m], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1, test.tag_2, test.tag_3, test.tag_4], time_index=timestamp [covar_samp(test.field_0 + test.field_1,test.field_1) RANGE 5m:Float64;N, timestamp:Timestamp(ms), tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]",
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
}
|
||||
@@ -736,8 +733,8 @@ mod test {
|
||||
let query = r#"SELECT ((avg(field_0)+sum(field_1))/4) RANGE '5m' FROM test ALIGN '1h' by (tag_0,tag_1) FILL NULL;"#;
|
||||
let expected = String::from(
|
||||
"Projection: (avg(test.field_0) RANGE 5m FILL NULL + sum(test.field_1) RANGE 5m FILL NULL) / Int64(4) [avg(test.field_0) RANGE 5m FILL NULL + sum(test.field_1) RANGE 5m FILL NULL / Int64(4):Float64;N]\
|
||||
\n RangeSelect: range_exprs=[avg(test.field_0) RANGE 5m FILL NULL, sum(test.field_1) RANGE 5m FILL NULL], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [avg(test.field_0) RANGE 5m FILL NULL:Float64;N, sum(test.field_1) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]",
|
||||
\n RangeSelect: range_exprs=[avg(test.field_0) RANGE 5m FILL NULL, sum(test.field_1) RANGE 5m FILL NULL], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [avg(test.field_0) RANGE 5m FILL NULL:Float64;N, sum(test.field_1) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(ms), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]",
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
}
|
||||
@@ -749,8 +746,8 @@ mod test {
|
||||
"Projection: foo + Int64(1) [foo + Int64(1):Float64;N]\
|
||||
\n Filter: foo > Int64(1) [foo:Float64;N]\
|
||||
\n Projection: (avg(test.field_0) RANGE 5m FILL NULL + sum(test.field_1) RANGE 5m FILL NULL) / Int64(4) AS foo [foo:Float64;N]\
|
||||
\n RangeSelect: range_exprs=[avg(test.field_0) RANGE 5m FILL NULL, sum(test.field_1) RANGE 5m FILL NULL], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [avg(test.field_0) RANGE 5m FILL NULL:Float64;N, sum(test.field_1) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]",
|
||||
\n RangeSelect: range_exprs=[avg(test.field_0) RANGE 5m FILL NULL, sum(test.field_1) RANGE 5m FILL NULL], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [avg(test.field_0) RANGE 5m FILL NULL:Float64;N, sum(test.field_1) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(ms), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]",
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
}
|
||||
@@ -760,10 +757,10 @@ mod test {
|
||||
let query = r#"SELECT ((avg(a)+sum(b))/4) RANGE '5m' FROM (SELECT field_0 as a, field_1 as b, tag_0 as c, tag_1 as d, timestamp from test where field_0 > 1.0) ALIGN '1h' by (c, d) FILL NULL;"#;
|
||||
let expected = String::from(
|
||||
"Projection: (avg(a) RANGE 5m FILL NULL + sum(b) RANGE 5m FILL NULL) / Int64(4) [avg(a) RANGE 5m FILL NULL + sum(b) RANGE 5m FILL NULL / Int64(4):Float64;N]\
|
||||
\n RangeSelect: range_exprs=[avg(a) RANGE 5m FILL NULL, sum(b) RANGE 5m FILL NULL], align=3600000ms, align_to=0ms, align_by=[c, d], time_index=timestamp [avg(a) RANGE 5m FILL NULL:Float64;N, sum(b) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), c:Utf8, d:Utf8]\
|
||||
\n Projection: test.field_0 AS a, test.field_1 AS b, test.tag_0 AS c, test.tag_1 AS d, test.timestamp [a:Float64;N, b:Float64;N, c:Utf8, d:Utf8, timestamp:Timestamp(Millisecond, None)]\
|
||||
\n Filter: test.field_0 > Float64(1) [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]",
|
||||
\n RangeSelect: range_exprs=[avg(a) RANGE 5m FILL NULL, sum(b) RANGE 5m FILL NULL], align=3600000ms, align_to=0ms, align_by=[c, d], time_index=timestamp [avg(a) RANGE 5m FILL NULL:Float64;N, sum(b) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(ms), c:Utf8, d:Utf8]\
|
||||
\n Projection: test.field_0 AS a, test.field_1 AS b, test.tag_0 AS c, test.tag_1 AS d, test.timestamp [a:Float64;N, b:Float64;N, c:Utf8, d:Utf8, timestamp:Timestamp(ms)]\
|
||||
\n Filter: test.field_0 > Float64(1) [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]",
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
}
|
||||
@@ -773,8 +770,8 @@ mod test {
|
||||
let query = r#"SELECT sin(avg(field_0 + field_1) RANGE '5m' + 1) FROM test ALIGN '1h' by (tag_0,tag_1);"#;
|
||||
let expected = String::from(
|
||||
"Projection: sin(avg(test.field_0 + test.field_1) RANGE 5m + Int64(1)) [sin(avg(test.field_0 + test.field_1) RANGE 5m + Int64(1)):Float64;N]\
|
||||
\n RangeSelect: range_exprs=[avg(test.field_0 + test.field_1) RANGE 5m], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [avg(test.field_0 + test.field_1) RANGE 5m:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]",
|
||||
\n RangeSelect: range_exprs=[avg(test.field_0 + test.field_1) RANGE 5m], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [avg(test.field_0 + test.field_1) RANGE 5m:Float64;N, timestamp:Timestamp(ms), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]",
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
}
|
||||
@@ -784,8 +781,8 @@ mod test {
|
||||
let query = r#"SELECT avg(field_0) RANGE '5m' FILL 6.0 + avg(field_0) RANGE '5m' FILL 6.0 FROM test ALIGN '1h' by (tag_0,tag_1);"#;
|
||||
let expected = String::from(
|
||||
"Projection: avg(test.field_0) RANGE 5m FILL 6 + avg(test.field_0) RANGE 5m FILL 6 [avg(test.field_0) RANGE 5m FILL 6 + avg(test.field_0) RANGE 5m FILL 6:Float64]\
|
||||
\n RangeSelect: range_exprs=[avg(test.field_0) RANGE 5m FILL 6], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [avg(test.field_0) RANGE 5m FILL 6:Float64, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]",
|
||||
\n RangeSelect: range_exprs=[avg(test.field_0) RANGE 5m FILL 6], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [avg(test.field_0) RANGE 5m FILL 6:Float64, timestamp:Timestamp(ms), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]",
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
}
|
||||
@@ -795,8 +792,8 @@ mod test {
|
||||
let query = r#"SELECT round(sin(avg(field_0 + field_1) RANGE '5m' + 1)) FROM test ALIGN '1h' by (tag_0,tag_1);"#;
|
||||
let expected = String::from(
|
||||
"Projection: round(sin(avg(test.field_0 + test.field_1) RANGE 5m + Int64(1))) [round(sin(avg(test.field_0 + test.field_1) RANGE 5m + Int64(1))):Float64;N]\
|
||||
\n RangeSelect: range_exprs=[avg(test.field_0 + test.field_1) RANGE 5m], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [avg(test.field_0 + test.field_1) RANGE 5m:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]",
|
||||
\n RangeSelect: range_exprs=[avg(test.field_0 + test.field_1) RANGE 5m], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [avg(test.field_0 + test.field_1) RANGE 5m:Float64;N, timestamp:Timestamp(ms), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]",
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
}
|
||||
@@ -806,8 +803,8 @@ mod test {
|
||||
let query = r#"SELECT gcd(CAST(max(field_0 + 1) Range '5m' FILL NULL AS Int64), CAST(tag_0 AS Int64)) + round(max(field_2+1) Range '6m' FILL NULL + 1) + max(field_2+3) Range '10m' FILL NULL * CAST(tag_1 AS Float64) + 1 FROM test ALIGN '1h' by (tag_0, tag_1);"#;
|
||||
let expected = String::from(
|
||||
"Projection: gcd(arrow_cast(max(test.field_0 + Int64(1)) RANGE 5m FILL NULL, Utf8(\"Int64\")), arrow_cast(test.tag_0, Utf8(\"Int64\"))) + round(max(test.field_2 + Int64(1)) RANGE 6m FILL NULL + Int64(1)) + max(test.field_2 + Int64(3)) RANGE 10m FILL NULL * arrow_cast(test.tag_1, Utf8(\"Float64\")) + Int64(1) [gcd(arrow_cast(max(test.field_0 + Int64(1)) RANGE 5m FILL NULL,Utf8(\"Int64\")),arrow_cast(test.tag_0,Utf8(\"Int64\"))) + round(max(test.field_2 + Int64(1)) RANGE 6m FILL NULL + Int64(1)) + max(test.field_2 + Int64(3)) RANGE 10m FILL NULL * arrow_cast(test.tag_1,Utf8(\"Float64\")) + Int64(1):Float64;N]\
|
||||
\n RangeSelect: range_exprs=[max(test.field_0 + Int64(1)) RANGE 5m FILL NULL, max(test.field_2 + Int64(1)) RANGE 6m FILL NULL, max(test.field_2 + Int64(3)) RANGE 10m FILL NULL], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [max(test.field_0 + Int64(1)) RANGE 5m FILL NULL:Float64;N, max(test.field_2 + Int64(1)) RANGE 6m FILL NULL:Float64;N, max(test.field_2 + Int64(3)) RANGE 10m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]",
|
||||
\n RangeSelect: range_exprs=[max(test.field_0 + Int64(1)) RANGE 5m FILL NULL, max(test.field_2 + Int64(1)) RANGE 6m FILL NULL, max(test.field_2 + Int64(3)) RANGE 10m FILL NULL], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [max(test.field_0 + Int64(1)) RANGE 5m FILL NULL:Float64;N, max(test.field_2 + Int64(1)) RANGE 6m FILL NULL:Float64;N, max(test.field_2 + Int64(3)) RANGE 10m FILL NULL:Float64;N, timestamp:Timestamp(ms), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]",
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
}
|
||||
@@ -817,7 +814,7 @@ mod test {
|
||||
let query = r#"SELECT min(CAST(field_0 AS Int64) + CAST(field_1 AS Int64)) RANGE '5m' FILL LINEAR FROM test ALIGN '1h' by (tag_0,tag_1);"#;
|
||||
let expected = String::from(
|
||||
"RangeSelect: range_exprs=[min(arrow_cast(test.field_0,Utf8(\"Int64\")) + arrow_cast(test.field_1,Utf8(\"Int64\"))) RANGE 5m FILL LINEAR], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [min(arrow_cast(test.field_0,Utf8(\"Int64\")) + arrow_cast(test.field_1,Utf8(\"Int64\"))) RANGE 5m FILL LINEAR:Float64;N]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]",
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(ms), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]",
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
}
|
||||
|
||||
@@ -49,19 +49,19 @@ pub fn new_ts_array(unit: TimeUnit, arr: Vec<i64>) -> ArrayRef {
|
||||
pub struct MockInputExec {
|
||||
input: Vec<Vec<DfRecordBatch>>,
|
||||
schema: SchemaRef,
|
||||
properties: PlanProperties,
|
||||
properties: Arc<PlanProperties>,
|
||||
metrics: ExecutionPlanMetricsSet,
|
||||
}
|
||||
|
||||
impl MockInputExec {
|
||||
pub fn new(input: Vec<Vec<DfRecordBatch>>, schema: SchemaRef) -> Self {
|
||||
Self {
|
||||
properties: PlanProperties::new(
|
||||
properties: Arc::new(PlanProperties::new(
|
||||
EquivalenceProperties::new(schema.clone()),
|
||||
Partitioning::UnknownPartitioning(1),
|
||||
EmissionType::Incremental,
|
||||
Boundedness::Bounded,
|
||||
),
|
||||
)),
|
||||
input,
|
||||
schema,
|
||||
metrics: ExecutionPlanMetricsSet::new(),
|
||||
@@ -84,7 +84,7 @@ impl ExecutionPlan for MockInputExec {
|
||||
self
|
||||
}
|
||||
|
||||
fn properties(&self) -> &PlanProperties {
|
||||
fn properties(&self) -> &Arc<PlanProperties> {
|
||||
&self.properties
|
||||
}
|
||||
|
||||
|
||||
@@ -81,7 +81,7 @@ pub struct WindowedSortExec {
|
||||
input: Arc<dyn ExecutionPlan>,
|
||||
/// Execution metrics
|
||||
metrics: ExecutionPlanMetricsSet,
|
||||
properties: PlanProperties,
|
||||
properties: Arc<PlanProperties>,
|
||||
}
|
||||
|
||||
/// Checks that partition ranges are sorted correctly for the given sort direction.
|
||||
@@ -130,12 +130,12 @@ impl WindowedSortExec {
|
||||
eq_properties.reorder(vec![expression.clone()])?;
|
||||
|
||||
let properties = input.properties();
|
||||
let properties = PlanProperties::new(
|
||||
let properties = Arc::new(PlanProperties::new(
|
||||
eq_properties,
|
||||
input.output_partitioning().clone(),
|
||||
properties.emission_type,
|
||||
properties.boundedness,
|
||||
);
|
||||
));
|
||||
|
||||
let mut all_avail_working_range = Vec::with_capacity(ranges.len());
|
||||
for r in &ranges {
|
||||
@@ -202,7 +202,7 @@ impl ExecutionPlan for WindowedSortExec {
|
||||
self.input.schema()
|
||||
}
|
||||
|
||||
fn properties(&self) -> &PlanProperties {
|
||||
fn properties(&self) -> &Arc<PlanProperties> {
|
||||
&self.properties
|
||||
}
|
||||
|
||||
|
||||
@@ -25,7 +25,10 @@ use datatypes::arrow::datatypes::{DataType as ArrowDataType, IntervalUnit};
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use itertools::Itertools;
|
||||
use snafu::{OptionExt, ResultExt, ensure};
|
||||
use sqlparser::ast::{ColumnOption, ColumnOptionDef, DataType, Expr};
|
||||
use sqlparser::ast::{
|
||||
ColumnOption, ColumnOptionDef, DataType, Expr, KeyOrIndexDisplay, NullsDistinctOption,
|
||||
PrimaryKeyConstraint, UniqueConstraint,
|
||||
};
|
||||
use sqlparser::dialect::keywords::Keyword;
|
||||
use sqlparser::keywords::ALL_KEYWORDS;
|
||||
use sqlparser::parser::IsOptional::Mandatory;
|
||||
@@ -717,15 +720,25 @@ impl<'a> ParserContext<'a> {
|
||||
parser.parse_expr().context(SyntaxSnafu)?,
|
||||
)))
|
||||
} else if parser.parse_keywords(&[Keyword::PRIMARY, Keyword::KEY]) {
|
||||
Ok(Some(ColumnOption::Unique {
|
||||
is_primary: true,
|
||||
Ok(Some(ColumnOption::PrimaryKey(PrimaryKeyConstraint {
|
||||
name: None,
|
||||
index_name: None,
|
||||
index_type: None,
|
||||
columns: vec![],
|
||||
index_options: vec![],
|
||||
characteristics: None,
|
||||
}))
|
||||
})))
|
||||
} else if parser.parse_keyword(Keyword::UNIQUE) {
|
||||
Ok(Some(ColumnOption::Unique {
|
||||
is_primary: false,
|
||||
Ok(Some(ColumnOption::Unique(UniqueConstraint {
|
||||
name: None,
|
||||
index_name: None,
|
||||
index_type_display: KeyOrIndexDisplay::None,
|
||||
index_type: None,
|
||||
columns: vec![],
|
||||
index_options: vec![],
|
||||
characteristics: None,
|
||||
}))
|
||||
nulls_distinct: NullsDistinctOption::None,
|
||||
})))
|
||||
} else if parser.parse_keywords(&[Keyword::TIME, Keyword::INDEX]) {
|
||||
// Use a DialectSpecific option for time index
|
||||
Ok(Some(ColumnOption::DialectSpecific(vec![
|
||||
|
||||
@@ -23,8 +23,11 @@ use crate::statements::statement::Statement;
|
||||
/// DELETE statement parser implementation
|
||||
impl ParserContext<'_> {
|
||||
pub(crate) fn parse_delete(&mut self) -> Result<Statement> {
|
||||
let _ = self.parser.next_token();
|
||||
let spstatement = self.parser.parse_delete().context(error::SyntaxSnafu)?;
|
||||
let token = self.parser.next_token();
|
||||
let spstatement = self
|
||||
.parser
|
||||
.parse_delete(token)
|
||||
.context(error::SyntaxSnafu)?;
|
||||
|
||||
match spstatement {
|
||||
SpStatement::Delete { .. } => {
|
||||
|
||||
@@ -69,6 +69,7 @@ mod tests {
|
||||
|
||||
let select = sqlparser::ast::Select {
|
||||
distinct: None,
|
||||
select_modifiers: None,
|
||||
top: None,
|
||||
projection: vec![sqlparser::ast::SelectItem::Wildcard(
|
||||
WildcardAdditionalOptions::default(),
|
||||
@@ -103,9 +104,10 @@ mod tests {
|
||||
top_before_distinct: false,
|
||||
prewhere: None,
|
||||
window_before_qualify: false,
|
||||
connect_by: None,
|
||||
connect_by: vec![],
|
||||
select_token: AttachedToken::empty(),
|
||||
flavor: SelectFlavor::Standard,
|
||||
optimizer_hint: None,
|
||||
};
|
||||
|
||||
let sp_query = Box::new(
|
||||
|
||||
@@ -23,8 +23,11 @@ use crate::statements::statement::Statement;
|
||||
/// INSERT/REPLACE statement parser implementation
|
||||
impl ParserContext<'_> {
|
||||
pub(crate) fn parse_insert(&mut self) -> Result<Statement> {
|
||||
let _ = self.parser.next_token();
|
||||
let spstatement = self.parser.parse_insert().context(error::SyntaxSnafu)?;
|
||||
let token = self.parser.next_token();
|
||||
let spstatement = self
|
||||
.parser
|
||||
.parse_insert(token)
|
||||
.context(error::SyntaxSnafu)?;
|
||||
|
||||
match spstatement {
|
||||
SpStatement::Insert { .. } => {
|
||||
@@ -38,8 +41,11 @@ impl ParserContext<'_> {
|
||||
}
|
||||
|
||||
pub(crate) fn parse_replace(&mut self) -> Result<Statement> {
|
||||
let _ = self.parser.next_token();
|
||||
let spstatement = self.parser.parse_insert().context(error::SyntaxSnafu)?;
|
||||
let token = self.parser.next_token();
|
||||
let spstatement = self
|
||||
.parser
|
||||
.parse_insert(token)
|
||||
.context(error::SyntaxSnafu)?;
|
||||
|
||||
match spstatement {
|
||||
SpStatement::Insert(mut insert_stmt) => {
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use chrono::Utc;
|
||||
use datafusion::config::ConfigOptions;
|
||||
use datafusion::error::Result as DfResult;
|
||||
use datafusion::execution::SessionStateBuilder;
|
||||
@@ -23,7 +22,6 @@ use datafusion::execution::context::SessionState;
|
||||
use datafusion::optimizer::simplify_expressions::ExprSimplifier;
|
||||
use datafusion_common::tree_node::{TreeNode, TreeNodeVisitor};
|
||||
use datafusion_common::{DFSchema, ScalarValue};
|
||||
use datafusion_expr::execution_props::ExecutionProps;
|
||||
use datafusion_expr::simplify::SimplifyContext;
|
||||
use datafusion_expr::{AggregateUDF, Expr, ScalarUDF, TableSource, WindowUDF};
|
||||
use datafusion_sql::TableReference;
|
||||
@@ -130,10 +128,7 @@ pub fn parser_expr_to_scalar_value_literal(
|
||||
}
|
||||
|
||||
// 2. simplify logical expr
|
||||
let execution_props = ExecutionProps::new().with_query_execution_start_time(Utc::now());
|
||||
let info =
|
||||
SimplifyContext::new(&execution_props).with_schema(Arc::new(empty_df_schema.clone()));
|
||||
|
||||
let info = SimplifyContext::default().with_current_time();
|
||||
let simplifier = ExprSimplifier::new(info);
|
||||
|
||||
// Coerce the logical expression so simplifier can handle it correctly. This is necessary for const eval with possible type mismatch. i.e.: `now() - now() + '15s'::interval` which is `TimestampNanosecond - TimestampNanosecond + IntervalMonthDayNano`.
|
||||
@@ -293,8 +288,6 @@ pub fn parse_with_options(parser: &mut Parser) -> Result<OptionMap> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use chrono::DateTime;
|
||||
use datafusion::functions::datetime::expr_fn::now;
|
||||
use datafusion_expr::lit;
|
||||
@@ -343,9 +336,7 @@ mod tests {
|
||||
),
|
||||
];
|
||||
|
||||
let execution_props = ExecutionProps::new().with_query_execution_start_time(now_time);
|
||||
let info = SimplifyContext::new(&execution_props).with_schema(Arc::new(DFSchema::empty()));
|
||||
|
||||
let info = SimplifyContext::default().with_query_execution_start_time(Some(now_time));
|
||||
let simplifier = ExprSimplifier::new(info);
|
||||
for (expr, expected) in testcases {
|
||||
let expr_name = expr.schema_name().to_string();
|
||||
|
||||
@@ -114,6 +114,7 @@ impl ParserContext<'_> {
|
||||
match cte.content {
|
||||
CteContent::Sql(body) => sql_cte_tables.push(Cte {
|
||||
alias: TableAlias {
|
||||
explicit: false,
|
||||
name: cte.name,
|
||||
columns: cte
|
||||
.columns
|
||||
|
||||
@@ -91,10 +91,7 @@ pub fn has_primary_key_option(column_def: &ColumnDef) -> bool {
|
||||
column_def
|
||||
.options
|
||||
.iter()
|
||||
.any(|options| match options.option {
|
||||
ColumnOption::Unique { is_primary, .. } => is_primary,
|
||||
_ => false,
|
||||
})
|
||||
.any(|options| matches!(options.option, ColumnOption::PrimaryKey(..)))
|
||||
}
|
||||
|
||||
/// Create a `ColumnSchema` from `Column`.
|
||||
@@ -198,15 +195,10 @@ pub fn sql_column_def_to_grpc_column_def(
|
||||
.context(ConvertToGrpcDataTypeSnafu)?
|
||||
.to_parts();
|
||||
|
||||
let is_primary_key = col.options.iter().any(|o| {
|
||||
matches!(
|
||||
o.option,
|
||||
ColumnOption::Unique {
|
||||
is_primary: true,
|
||||
..
|
||||
}
|
||||
)
|
||||
});
|
||||
let is_primary_key = col
|
||||
.options
|
||||
.iter()
|
||||
.any(|o| matches!(o.option, ColumnOption::PrimaryKey(..)));
|
||||
|
||||
let semantic_type = if is_primary_key {
|
||||
SemanticType::Tag
|
||||
@@ -375,7 +367,7 @@ mod tests {
|
||||
use datatypes::schema::{
|
||||
COLUMN_FULLTEXT_OPT_KEY_ANALYZER, COLUMN_FULLTEXT_OPT_KEY_CASE_SENSITIVE, FulltextAnalyzer,
|
||||
};
|
||||
use sqlparser::ast::{ColumnOptionDef, Expr};
|
||||
use sqlparser::ast::{ColumnOptionDef, Expr, PrimaryKeyConstraint};
|
||||
|
||||
use super::*;
|
||||
use crate::ast::TimezoneInfo;
|
||||
@@ -505,10 +497,14 @@ mod tests {
|
||||
data_type: SqlDataType::Double(ExactNumberInfo::None),
|
||||
options: vec![ColumnOptionDef {
|
||||
name: None,
|
||||
option: ColumnOption::Unique {
|
||||
is_primary: true,
|
||||
option: ColumnOption::PrimaryKey(PrimaryKeyConstraint {
|
||||
name: None,
|
||||
index_name: None,
|
||||
index_type: None,
|
||||
columns: vec![],
|
||||
index_options: vec![],
|
||||
characteristics: None,
|
||||
},
|
||||
}),
|
||||
}],
|
||||
};
|
||||
|
||||
@@ -583,10 +579,14 @@ mod tests {
|
||||
data_type: SqlDataType::Double(ExactNumberInfo::None),
|
||||
options: vec![ColumnOptionDef {
|
||||
name: None,
|
||||
option: ColumnOption::Unique {
|
||||
is_primary: true,
|
||||
option: ColumnOption::PrimaryKey(PrimaryKeyConstraint {
|
||||
name: None,
|
||||
index_name: None,
|
||||
index_type: None,
|
||||
columns: vec![],
|
||||
index_options: vec![],
|
||||
characteristics: None,
|
||||
},
|
||||
}),
|
||||
}],
|
||||
};
|
||||
assert!(has_primary_key_option(&column_def));
|
||||
|
||||
@@ -111,6 +111,7 @@ impl TransformRule for ExpandIntervalTransformRule {
|
||||
Expr::Cast {
|
||||
expr: cast_exp,
|
||||
data_type,
|
||||
array,
|
||||
kind,
|
||||
format,
|
||||
} => {
|
||||
@@ -130,6 +131,7 @@ impl TransformRule for ExpandIntervalTransformRule {
|
||||
kind: kind.clone(),
|
||||
expr: single_quoted_string_expr(interval_value),
|
||||
data_type: data_type.clone(),
|
||||
array: *array,
|
||||
format: std::mem::take(format),
|
||||
}
|
||||
}
|
||||
@@ -396,6 +398,7 @@ mod tests {
|
||||
fields: None,
|
||||
precision: None,
|
||||
},
|
||||
array: false,
|
||||
format: None,
|
||||
kind: sqlparser::ast::CastKind::Cast,
|
||||
};
|
||||
@@ -414,6 +417,7 @@ mod tests {
|
||||
fields: None,
|
||||
precision: None,
|
||||
},
|
||||
array: false,
|
||||
format: None,
|
||||
}
|
||||
);
|
||||
@@ -421,6 +425,7 @@ mod tests {
|
||||
let mut cast_to_i64_expr = Expr::Cast {
|
||||
expr: single_quoted_string_expr("5".to_string()),
|
||||
data_type: DataType::Int64,
|
||||
array: false,
|
||||
format: None,
|
||||
kind: sqlparser::ast::CastKind::Cast,
|
||||
};
|
||||
@@ -431,6 +436,7 @@ mod tests {
|
||||
Expr::Cast {
|
||||
expr: single_quoted_string_expr("5".to_string()),
|
||||
data_type: DataType::Int64,
|
||||
array: false,
|
||||
format: None,
|
||||
kind: sqlparser::ast::CastKind::Cast,
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ pub struct RegionScanExec {
|
||||
/// The expected output ordering for the plan.
|
||||
output_ordering: Option<Vec<PhysicalSortExpr>>,
|
||||
metric: ExecutionPlanMetricsSet,
|
||||
properties: PlanProperties,
|
||||
properties: Arc<PlanProperties>,
|
||||
append_mode: bool,
|
||||
total_rows: usize,
|
||||
is_partition_set: bool,
|
||||
@@ -206,12 +206,12 @@ impl RegionScanExec {
|
||||
}
|
||||
};
|
||||
|
||||
let properties = PlanProperties::new(
|
||||
let properties = Arc::new(PlanProperties::new(
|
||||
eq_props,
|
||||
partitioning,
|
||||
EmissionType::Incremental,
|
||||
Boundedness::Bounded,
|
||||
);
|
||||
));
|
||||
let append_mode = scanner_props.append_mode();
|
||||
let total_rows = scanner_props.total_rows();
|
||||
Ok(Self {
|
||||
@@ -269,7 +269,7 @@ impl RegionScanExec {
|
||||
warn!("Setting partition ranges more than once for RegionScanExec");
|
||||
}
|
||||
|
||||
let mut properties = self.properties.clone();
|
||||
let mut properties = self.properties.as_ref().clone();
|
||||
let new_partitioning = match properties.partitioning {
|
||||
Partitioning::Hash(ref columns, _) => {
|
||||
Partitioning::Hash(columns.clone(), target_partitions)
|
||||
@@ -292,7 +292,7 @@ impl RegionScanExec {
|
||||
arrow_schema: self.arrow_schema.clone(),
|
||||
output_ordering: self.output_ordering.clone(),
|
||||
metric: self.metric.clone(),
|
||||
properties,
|
||||
properties: Arc::new(properties),
|
||||
append_mode: self.append_mode,
|
||||
total_rows: self.total_rows,
|
||||
is_partition_set: true,
|
||||
@@ -349,7 +349,7 @@ impl ExecutionPlan for RegionScanExec {
|
||||
self.arrow_schema.clone()
|
||||
}
|
||||
|
||||
fn properties(&self) -> &PlanProperties {
|
||||
fn properties(&self) -> &Arc<PlanProperties> {
|
||||
&self.properties
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user