From 747b71bf74b4ba5ead90b8a0278db16d9d63993e Mon Sep 17 00:00:00 2001 From: Ruihang Xia Date: Mon, 14 Apr 2025 21:12:37 +0800 Subject: [PATCH] feat: add query engine options (#5895) * feat: add query engine options Signed-off-by: Ruihang Xia * update example Signed-off-by: Ruihang Xia --------- Signed-off-by: Ruihang Xia --- config/config.md | 6 ++++ config/datanode.example.toml | 6 ++++ config/frontend.example.toml | 6 ++++ config/standalone.example.toml | 6 ++++ src/datanode/src/config.rs | 3 ++ src/datanode/src/datanode.rs | 1 + src/flow/src/adapter.rs | 3 ++ src/flow/src/server.rs | 1 + src/flow/src/test_utils.rs | 11 ++++++- src/flow/src/transform.rs | 11 ++++++- src/frontend/src/frontend.rs | 3 ++ src/frontend/src/instance/builder.rs | 1 + src/query/src/datafusion.rs | 12 +++++++- src/query/src/lib.rs | 1 + src/query/src/options.rs | 30 +++++++++++++++++++ src/query/src/query_engine.rs | 16 +++++++++- src/query/src/query_engine/context.rs | 2 ++ .../src/query_engine/default_serializer.rs | 11 ++++++- src/query/src/query_engine/state.rs | 6 ++++ src/query/src/range_select/plan_rewrite.rs | 12 +++++++- src/query/src/tests.rs | 12 +++++++- src/query/src/tests/query_engine_test.rs | 23 ++++++++++++-- src/query/src/tests/time_range_filter_test.rs | 13 ++++++-- src/servers/tests/mod.rs | 13 ++++++-- 24 files changed, 195 insertions(+), 14 deletions(-) create mode 100644 src/query/src/options.rs diff --git a/config/config.md b/config/config.md index ba2540f2c6..d0d7582db5 100644 --- a/config/config.md +++ b/config/config.md @@ -96,6 +96,8 @@ | `procedure.max_running_procedures` | Integer | `128` | Max running procedures.
The maximum number of procedures that can be running at the same time.
If the number of running procedures exceeds this limit, the procedure will be rejected. | | `flow` | -- | -- | flow engine options. | | `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.
Not setting(or set to 0) this value will use the number of CPU cores divided by 2. | +| `query` | -- | -- | The query engine options. | +| `query.parallelism` | Integer | `0` | Parallelism of the query engine.
Default to 0, which means the number of CPU cores. | | `storage` | -- | -- | The data storage options. | | `storage.data_home` | String | `./greptimedb_data/` | The working home directory. | | `storage.type` | String | `File` | The storage type used to store the data.
- `File`: the data is stored in the local file system.
- `S3`: the data is stored in the S3 object storage.
- `Gcs`: the data is stored in the Google Cloud Storage.
- `Azblob`: the data is stored in the Azure Blob Storage.
- `Oss`: the data is stored in the Aliyun OSS. | @@ -270,6 +272,8 @@ | `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. | | `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. | | `meta_client.metadata_cache_tti` | String | `5m` | -- | +| `query` | -- | -- | The query engine options. | +| `query.parallelism` | Integer | `0` | Parallelism of the query engine.
Default to 0, which means the number of CPU cores. | | `datanode` | -- | -- | Datanode options. | | `datanode.client` | -- | -- | Datanode client options. | | `datanode.client.connect_timeout` | String | `10s` | -- | @@ -429,6 +433,8 @@ | `wal.create_index` | Bool | `true` | Whether to enable WAL index creation.
**It's only used when the provider is `kafka`**. | | `wal.dump_index_interval` | String | `60s` | The interval for dumping WAL indexes.
**It's only used when the provider is `kafka`**. | | `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.
**It's only used when the provider is `kafka`**.

This option ensures that when Kafka messages are deleted, the system
can still successfully replay memtable data without throwing an
out-of-range error.
However, enabling this option might lead to unexpected data loss,
as the system will skip over missing entries instead of treating
them as critical errors. | +| `query` | -- | -- | The query engine options. | +| `query.parallelism` | Integer | `0` | Parallelism of the query engine.
Default to 0, which means the number of CPU cores. | | `storage` | -- | -- | The data storage options. | | `storage.data_home` | String | `./greptimedb_data/` | The working home directory. | | `storage.type` | String | `File` | The storage type used to store the data.
- `File`: the data is stored in the local file system.
- `S3`: the data is stored in the S3 object storage.
- `Gcs`: the data is stored in the Google Cloud Storage.
- `Azblob`: the data is stored in the Azure Blob Storage.
- `Oss`: the data is stored in the Aliyun OSS. | diff --git a/config/datanode.example.toml b/config/datanode.example.toml index af6b5571d2..46beb51a23 100644 --- a/config/datanode.example.toml +++ b/config/datanode.example.toml @@ -243,6 +243,12 @@ overwrite_entry_start_id = false # credential = "base64-credential" # endpoint = "https://storage.googleapis.com" +## The query engine options. +[query] +## Parallelism of the query engine. +## Default to 0, which means the number of CPU cores. +parallelism = 0 + ## The data storage options. [storage] ## The working home directory. diff --git a/config/frontend.example.toml b/config/frontend.example.toml index 3d4cd78144..2e3ee4a69d 100644 --- a/config/frontend.example.toml +++ b/config/frontend.example.toml @@ -179,6 +179,12 @@ metadata_cache_ttl = "10m" # TTI of the metadata cache. metadata_cache_tti = "5m" +## The query engine options. +[query] +## Parallelism of the query engine. +## Default to 0, which means the number of CPU cores. +parallelism = 0 + ## Datanode options. [datanode] ## Datanode client options. diff --git a/config/standalone.example.toml b/config/standalone.example.toml index bdef754712..0e72cfcc7e 100644 --- a/config/standalone.example.toml +++ b/config/standalone.example.toml @@ -334,6 +334,12 @@ max_running_procedures = 128 # credential = "base64-credential" # endpoint = "https://storage.googleapis.com" +## The query engine options. +[query] +## Parallelism of the query engine. +## Default to 0, which means the number of CPU cores. +parallelism = 0 + ## The data storage options. [storage] ## The working home directory. diff --git a/src/datanode/src/config.rs b/src/datanode/src/config.rs index 322f337ba3..7d63057a72 100644 --- a/src/datanode/src/config.rs +++ b/src/datanode/src/config.rs @@ -26,6 +26,7 @@ use file_engine::config::EngineConfig as FileEngineConfig; use meta_client::MetaClientOptions; use metric_engine::config::EngineConfig as MetricEngineConfig; use mito2::config::MitoConfig; +use query::options::QueryOptions; use serde::{Deserialize, Serialize}; use servers::export_metrics::ExportMetricsOption; use servers::grpc::GrpcOptions; @@ -375,6 +376,7 @@ pub struct DatanodeOptions { pub enable_telemetry: bool, pub export_metrics: ExportMetricsOption, pub tracing: TracingOptions, + pub query: QueryOptions, /// Deprecated options, please use the new options instead. #[deprecated(note = "Please use `grpc.addr` instead.")] @@ -412,6 +414,7 @@ impl Default for DatanodeOptions { enable_telemetry: true, export_metrics: ExportMetricsOption::default(), tracing: TracingOptions::default(), + query: QueryOptions::default(), // Deprecated options rpc_addr: None, diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs index b32a1668c6..4b1e720032 100644 --- a/src/datanode/src/datanode.rs +++ b/src/datanode/src/datanode.rs @@ -359,6 +359,7 @@ impl DatanodeBuilder { None, false, self.plugins.clone(), + opts.query.clone(), ); let query_engine = query_engine_factory.query_engine(); diff --git a/src/flow/src/adapter.rs b/src/flow/src/adapter.rs index 1dd3e7e40e..8fd62ee2a0 100644 --- a/src/flow/src/adapter.rs +++ b/src/flow/src/adapter.rs @@ -32,6 +32,7 @@ use datatypes::value::Value; use greptime_proto::v1; use itertools::{EitherOrBoth, Itertools}; use meta_client::MetaClientOptions; +use query::options::QueryOptions; use query::QueryEngine; use serde::{Deserialize, Serialize}; use servers::grpc::GrpcOptions; @@ -109,6 +110,7 @@ pub struct FlownodeOptions { pub logging: LoggingOptions, pub tracing: TracingOptions, pub heartbeat: HeartbeatOptions, + pub query: QueryOptions, } impl Default for FlownodeOptions { @@ -122,6 +124,7 @@ impl Default for FlownodeOptions { logging: LoggingOptions::default(), tracing: TracingOptions::default(), heartbeat: HeartbeatOptions::default(), + query: QueryOptions::default(), } } } diff --git a/src/flow/src/server.rs b/src/flow/src/server.rs index f347ac369e..d0038e6ba1 100644 --- a/src/flow/src/server.rs +++ b/src/flow/src/server.rs @@ -332,6 +332,7 @@ impl FlownodeBuilder { None, false, Default::default(), + self.opts.query.clone(), ); let manager = Arc::new( self.build_manager(query_engine_factory.query_engine()) diff --git a/src/flow/src/test_utils.rs b/src/flow/src/test_utils.rs index 4d269a80c0..ecaabae32d 100644 --- a/src/flow/src/test_utils.rs +++ b/src/flow/src/test_utils.rs @@ -23,6 +23,7 @@ use datatypes::timestamp::TimestampMillisecond; use datatypes::vectors::{TimestampMillisecondVectorBuilder, VectorRef}; use itertools::Itertools; use prost::Message; +use query::options::QueryOptions; use query::parser::QueryLanguageParser; use query::query_engine::DefaultSerializer; use query::QueryEngine; @@ -146,7 +147,15 @@ pub fn create_test_query_engine() -> Arc { }; catalog_list.register_table_sync(req_with_ts).unwrap(); - let factory = query::QueryEngineFactory::new(catalog_list, None, None, None, None, false); + let factory = query::QueryEngineFactory::new( + catalog_list, + None, + None, + None, + None, + false, + QueryOptions::default(), + ); let engine = factory.query_engine(); register_function_to_query_engine(&engine); diff --git a/src/flow/src/transform.rs b/src/flow/src/transform.rs index 15da89b21f..04c7f40e68 100644 --- a/src/flow/src/transform.rs +++ b/src/flow/src/transform.rs @@ -171,6 +171,7 @@ mod test { use datatypes::vectors::{TimestampMillisecondVectorBuilder, VectorRef}; use itertools::Itertools; use prost::Message; + use query::options::QueryOptions; use query::parser::QueryLanguageParser; use query::query_engine::DefaultSerializer; use query::QueryEngine; @@ -263,7 +264,15 @@ mod test { }; catalog_list.register_table_sync(req_with_ts).unwrap(); - let factory = query::QueryEngineFactory::new(catalog_list, None, None, None, None, false); + let factory = query::QueryEngineFactory::new( + catalog_list, + None, + None, + None, + None, + false, + QueryOptions::default(), + ); let engine = factory.query_engine(); register_function_to_query_engine(&engine); diff --git a/src/frontend/src/frontend.rs b/src/frontend/src/frontend.rs index 983550d0e7..ba795730c4 100644 --- a/src/frontend/src/frontend.rs +++ b/src/frontend/src/frontend.rs @@ -19,6 +19,7 @@ use common_config::config::Configurable; use common_options::datanode::DatanodeClientOptions; use common_telemetry::logging::{LoggingOptions, TracingOptions}; use meta_client::MetaClientOptions; +use query::options::QueryOptions; use serde::{Deserialize, Serialize}; use servers::export_metrics::{ExportMetricsOption, ExportMetricsTask}; use servers::grpc::GrpcOptions; @@ -58,6 +59,7 @@ pub struct FrontendOptions { pub user_provider: Option, pub export_metrics: ExportMetricsOption, pub tracing: TracingOptions, + pub query: QueryOptions, pub max_in_flight_write_bytes: Option, } @@ -82,6 +84,7 @@ impl Default for FrontendOptions { user_provider: None, export_metrics: ExportMetricsOption::default(), tracing: TracingOptions::default(), + query: QueryOptions::default(), max_in_flight_write_bytes: None, } } diff --git a/src/frontend/src/instance/builder.rs b/src/frontend/src/instance/builder.rs index 8503999b2c..ffbfeabca1 100644 --- a/src/frontend/src/instance/builder.rs +++ b/src/frontend/src/instance/builder.rs @@ -166,6 +166,7 @@ impl FrontendBuilder { Some(Arc::new(flow_service)), true, plugins.clone(), + self.options.query.clone(), ) .query_engine(); diff --git a/src/query/src/datafusion.rs b/src/query/src/datafusion.rs index dba7d0215a..db4207fd8a 100644 --- a/src/query/src/datafusion.rs +++ b/src/query/src/datafusion.rs @@ -567,6 +567,7 @@ mod tests { use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME}; use super::*; + use crate::options::QueryOptions; use crate::parser::QueryLanguageParser; use crate::query_engine::{QueryEngineFactory, QueryEngineRef}; @@ -581,7 +582,16 @@ mod tests { }; catalog_manager.register_table_sync(req).unwrap(); - QueryEngineFactory::new(catalog_manager, None, None, None, None, false).query_engine() + QueryEngineFactory::new( + catalog_manager, + None, + None, + None, + None, + false, + QueryOptions::default(), + ) + .query_engine() } #[tokio::test] diff --git a/src/query/src/lib.rs b/src/query/src/lib.rs index 6e1fbfae0a..26fbfb27cd 100644 --- a/src/query/src/lib.rs +++ b/src/query/src/lib.rs @@ -29,6 +29,7 @@ pub mod executor; pub mod log_query; pub mod metrics; mod optimizer; +pub mod options; pub mod parser; mod part_sort; pub mod physical_wrapper; diff --git a/src/query/src/options.rs b/src/query/src/options.rs new file mode 100644 index 0000000000..441e9f161f --- /dev/null +++ b/src/query/src/options.rs @@ -0,0 +1,30 @@ +// Copyright 2023 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use serde::{Deserialize, Serialize}; + +/// Query engine config +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(default)] +pub struct QueryOptions { + /// Parallelism of query engine. Default to 0, which implies the number of logical CPUs. + pub parallelism: usize, +} + +#[allow(clippy::derivable_impls)] +impl Default for QueryOptions { + fn default() -> Self { + Self { parallelism: 0 } + } +} diff --git a/src/query/src/query_engine.rs b/src/query/src/query_engine.rs index c4e8aee7d1..8b0c091054 100644 --- a/src/query/src/query_engine.rs +++ b/src/query/src/query_engine.rs @@ -38,6 +38,7 @@ use table::TableRef; use crate::dataframe::DataFrame; use crate::datafusion::DatafusionQueryEngine; use crate::error::Result; +use crate::options::QueryOptions; use crate::planner::LogicalPlanner; pub use crate::query_engine::context::QueryEngineContext; pub use crate::query_engine::state::QueryEngineState; @@ -106,6 +107,7 @@ impl QueryEngineFactory { procedure_service_handler: Option, flow_service_handler: Option, with_dist_planner: bool, + options: QueryOptions, ) -> Self { Self::new_with_plugins( catalog_manager, @@ -115,9 +117,11 @@ impl QueryEngineFactory { flow_service_handler, with_dist_planner, Default::default(), + options, ) } + #[allow(clippy::too_many_arguments)] pub fn new_with_plugins( catalog_manager: CatalogManagerRef, region_query_handler: Option, @@ -126,6 +130,7 @@ impl QueryEngineFactory { flow_service_handler: Option, with_dist_planner: bool, plugins: Plugins, + options: QueryOptions, ) -> Self { let state = Arc::new(QueryEngineState::new( catalog_manager, @@ -135,6 +140,7 @@ impl QueryEngineFactory { flow_service_handler, with_dist_planner, plugins.clone(), + options, )); let query_engine = Arc::new(DatafusionQueryEngine::new(state, plugins)); register_functions(&query_engine); @@ -166,7 +172,15 @@ mod tests { #[test] fn test_query_engine_factory() { let catalog_list = catalog::memory::new_memory_catalog_manager().unwrap(); - let factory = QueryEngineFactory::new(catalog_list, None, None, None, None, false); + let factory = QueryEngineFactory::new( + catalog_list, + None, + None, + None, + None, + false, + QueryOptions::default(), + ); let engine = factory.query_engine(); diff --git a/src/query/src/query_engine/context.rs b/src/query/src/query_engine/context.rs index d8c110d2f2..df20a70a42 100644 --- a/src/query/src/query_engine/context.rs +++ b/src/query/src/query_engine/context.rs @@ -75,6 +75,7 @@ impl QueryEngineContext { use common_base::Plugins; use session::context::QueryContext; + use crate::options::QueryOptions; use crate::query_engine::QueryEngineState; let state = Arc::new(QueryEngineState::new( @@ -85,6 +86,7 @@ impl QueryEngineContext { None, false, Plugins::default(), + QueryOptions::default(), )); QueryEngineContext::new(state.session_state(), QueryContext::arc()) diff --git a/src/query/src/query_engine/default_serializer.rs b/src/query/src/query_engine/default_serializer.rs index 23d6789866..c3feed1d55 100644 --- a/src/query/src/query_engine/default_serializer.rs +++ b/src/query/src/query_engine/default_serializer.rs @@ -159,6 +159,7 @@ mod tests { use super::*; use crate::dummy_catalog::DummyCatalogList; use crate::optimizer::test_util::mock_table_provider; + use crate::options::QueryOptions; use crate::QueryEngineFactory; fn mock_plan(schema: SchemaRef) -> LogicalPlan { @@ -177,7 +178,15 @@ mod tests { #[tokio::test] async fn test_serializer_decode_plan() { let catalog_list = catalog::memory::new_memory_catalog_manager().unwrap(); - let factory = QueryEngineFactory::new(catalog_list, None, None, None, None, false); + let factory = QueryEngineFactory::new( + catalog_list, + None, + None, + None, + None, + false, + QueryOptions::default(), + ); let engine = factory.query_engine(); diff --git a/src/query/src/query_engine/state.rs b/src/query/src/query_engine/state.rs index 812fc2c2af..75e1ed84a7 100644 --- a/src/query/src/query_engine/state.rs +++ b/src/query/src/query_engine/state.rs @@ -54,6 +54,7 @@ use crate::optimizer::string_normalization::StringNormalizationRule; use crate::optimizer::type_conversion::TypeConversionRule; use crate::optimizer::windowed_sort::WindowedSortPhysicalRule; use crate::optimizer::ExtensionAnalyzerRule; +use crate::options::QueryOptions as QueryOptionsNew; use crate::query_engine::options::QueryOptions; use crate::query_engine::DefaultSerializer; use crate::range_select::planner::RangeSelectPlanner; @@ -81,6 +82,7 @@ impl fmt::Debug for QueryEngineState { } impl QueryEngineState { + #[allow(clippy::too_many_arguments)] pub fn new( catalog_list: CatalogManagerRef, region_query_handler: Option, @@ -89,9 +91,13 @@ impl QueryEngineState { flow_service_handler: Option, with_dist_planner: bool, plugins: Plugins, + options: QueryOptionsNew, ) -> Self { let runtime_env = Arc::new(RuntimeEnv::default()); let mut session_config = SessionConfig::new().with_create_default_catalog_and_schema(false); + if options.parallelism > 0 { + session_config = session_config.with_target_partitions(options.parallelism); + } // todo(hl): This serves as a workaround for https://github.com/GreptimeTeam/greptimedb/issues/5659 // and we can add that check back once we upgrade datafusion. diff --git a/src/query/src/range_select/plan_rewrite.rs b/src/query/src/range_select/plan_rewrite.rs index b53e1079b8..5e0f223663 100644 --- a/src/query/src/range_select/plan_rewrite.rs +++ b/src/query/src/range_select/plan_rewrite.rs @@ -611,6 +611,7 @@ mod test { use table::test_util::EmptyTable; use super::*; + use crate::options::QueryOptions; use crate::parser::QueryLanguageParser; use crate::{QueryEngineFactory, QueryEngineRef}; @@ -663,7 +664,16 @@ mod test { table, }) .is_ok()); - QueryEngineFactory::new(catalog_list, None, None, None, None, false).query_engine() + QueryEngineFactory::new( + catalog_list, + None, + None, + None, + None, + false, + QueryOptions::default(), + ) + .query_engine() } async fn do_query(sql: &str) -> Result { diff --git a/src/query/src/tests.rs b/src/query/src/tests.rs index f2f2e40bf3..7c004e5229 100644 --- a/src/query/src/tests.rs +++ b/src/query/src/tests.rs @@ -18,6 +18,7 @@ use common_recordbatch::{util, RecordBatch}; use session::context::QueryContext; use table::TableRef; +use crate::options::QueryOptions; use crate::parser::QueryLanguageParser; use crate::{QueryEngineFactory, QueryEngineRef}; @@ -46,5 +47,14 @@ async fn exec_selection(engine: QueryEngineRef, sql: &str) -> Vec { pub fn new_query_engine_with_table(table: TableRef) -> QueryEngineRef { let catalog_manager = MemoryCatalogManager::new_with_table(table); - QueryEngineFactory::new(catalog_manager, None, None, None, None, false).query_engine() + QueryEngineFactory::new( + catalog_manager, + None, + None, + None, + None, + false, + QueryOptions::default(), + ) + .query_engine() } diff --git a/src/query/src/tests/query_engine_test.rs b/src/query/src/tests/query_engine_test.rs index 0f3f817703..07bac1363a 100644 --- a/src/query/src/tests/query_engine_test.rs +++ b/src/query/src/tests/query_engine_test.rs @@ -33,6 +33,7 @@ use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME}; use table::test_util::MemTable; use crate::error::{QueryExecutionSnafu, Result}; +use crate::options::QueryOptions as QueryOptionsNew; use crate::parser::QueryLanguageParser; use crate::query_engine::options::QueryOptions; use crate::query_engine::QueryEngineFactory; @@ -43,7 +44,15 @@ async fn test_datafusion_query_engine() -> Result<()> { let catalog_list = catalog::memory::new_memory_catalog_manager() .map_err(BoxedError::new) .context(QueryExecutionSnafu)?; - let factory = QueryEngineFactory::new(catalog_list, None, None, None, None, false); + let factory = QueryEngineFactory::new( + catalog_list, + None, + None, + None, + None, + false, + QueryOptionsNew::default(), + ); let engine = factory.query_engine(); let column_schemas = vec![ColumnSchema::new( @@ -122,8 +131,16 @@ async fn test_query_validate() -> Result<()> { disallow_cross_catalog_query: true, }); - let factory = - QueryEngineFactory::new_with_plugins(catalog_list, None, None, None, None, false, plugins); + let factory = QueryEngineFactory::new_with_plugins( + catalog_list, + None, + None, + None, + None, + false, + plugins, + QueryOptionsNew::default(), + ); let engine = factory.query_engine(); let stmt = diff --git a/src/query/src/tests/time_range_filter_test.rs b/src/query/src/tests/time_range_filter_test.rs index e141c99fa5..84bdd8cb18 100644 --- a/src/query/src/tests/time_range_filter_test.rs +++ b/src/query/src/tests/time_range_filter_test.rs @@ -33,6 +33,7 @@ use table::predicate::build_time_range_predicate; use table::test_util::MemTable; use table::{Table, TableRef}; +use crate::options::QueryOptions; use crate::tests::exec_selection; use crate::{QueryEngineFactory, QueryEngineRef}; @@ -102,8 +103,16 @@ fn create_test_engine() -> TimeRangeTester { }; let _ = catalog_manager.register_table_sync(req).unwrap(); - let engine = - QueryEngineFactory::new(catalog_manager, None, None, None, None, false).query_engine(); + let engine = QueryEngineFactory::new( + catalog_manager, + None, + None, + None, + None, + false, + QueryOptions::default(), + ) + .query_engine(); TimeRangeTester { engine, filter } } diff --git a/src/servers/tests/mod.rs b/src/servers/tests/mod.rs index aa07980240..13c78a293f 100644 --- a/src/servers/tests/mod.rs +++ b/src/servers/tests/mod.rs @@ -21,6 +21,7 @@ use catalog::memory::MemoryCatalogManager; use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use common_query::Output; use datafusion_expr::LogicalPlan; +use query::options::QueryOptions; use query::parser::{PromQuery, QueryLanguageParser, QueryStatement}; use query::query_engine::DescribeResult; use query::{QueryEngineFactory, QueryEngineRef}; @@ -158,8 +159,16 @@ impl GrpcQueryHandler for DummyInstance { fn create_testing_instance(table: TableRef) -> DummyInstance { let catalog_manager = MemoryCatalogManager::new_with_table(table); - let query_engine = - QueryEngineFactory::new(catalog_manager, None, None, None, None, false).query_engine(); + let query_engine = QueryEngineFactory::new( + catalog_manager, + None, + None, + None, + None, + false, + QueryOptions::default(), + ) + .query_engine(); DummyInstance::new(query_engine) }