mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-06 05:12:54 +00:00
Compare commits
10 Commits
zhongzc/re
...
feat/bulk-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ffe84af343 | ||
|
|
41c40688c4 | ||
|
|
59a82a3f29 | ||
|
|
0d7012a1ea | ||
|
|
88da98c829 | ||
|
|
73ce5914f2 | ||
|
|
f5eac3528c | ||
|
|
9cd61d221d | ||
|
|
7527ff976e | ||
|
|
1d53dd26ae |
3
Cargo.lock
generated
3
Cargo.lock
generated
@@ -2336,7 +2336,6 @@ dependencies = [
|
|||||||
"num-traits",
|
"num-traits",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
"paste",
|
"paste",
|
||||||
"promql",
|
|
||||||
"s2",
|
"s2",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
@@ -5134,7 +5133,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "greptime-proto"
|
name = "greptime-proto"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=454c52634c3bac27de10bf0d85d5533eed1cf03f#454c52634c3bac27de10bf0d85d5533eed1cf03f"
|
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=52083925a15d741c259800a9a54eba3467939180#52083925a15d741c259800a9a54eba3467939180"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"prost 0.13.5",
|
"prost 0.13.5",
|
||||||
"serde",
|
"serde",
|
||||||
|
|||||||
@@ -133,7 +133,7 @@ etcd-client = "0.14"
|
|||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "454c52634c3bac27de10bf0d85d5533eed1cf03f" }
|
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "52083925a15d741c259800a9a54eba3467939180" }
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
http = "1"
|
http = "1"
|
||||||
humantime = "2.1"
|
humantime = "2.1"
|
||||||
|
|||||||
@@ -232,6 +232,7 @@
|
|||||||
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||||
| `grpc.server_addr` | String | `127.0.0.1:4001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
|
| `grpc.server_addr` | String | `127.0.0.1:4001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
|
||||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||||
|
| `grpc.flight_compression` | String | `arrow_ipc` | Compression mode for frontend side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression. |
|
||||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||||
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
||||||
@@ -404,6 +405,7 @@
|
|||||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||||
|
| `grpc.flight_compression` | String | `arrow_ipc` | Compression mode for datanode side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression. |
|
||||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||||
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
||||||
|
|||||||
@@ -44,6 +44,13 @@ runtime_size = 8
|
|||||||
max_recv_message_size = "512MB"
|
max_recv_message_size = "512MB"
|
||||||
## The maximum send message size for gRPC server.
|
## The maximum send message size for gRPC server.
|
||||||
max_send_message_size = "512MB"
|
max_send_message_size = "512MB"
|
||||||
|
## Compression mode for datanode side Arrow IPC service. Available options:
|
||||||
|
## - `none`: disable all compression
|
||||||
|
## - `transport`: only enable gRPC transport compression (zstd)
|
||||||
|
## - `arrow_ipc`: only enable Arrow IPC compression (lz4)
|
||||||
|
## - `all`: enable all compression.
|
||||||
|
## Default to `none`
|
||||||
|
flight_compression = "arrow_ipc"
|
||||||
|
|
||||||
## gRPC server TLS options, see `mysql.tls` section.
|
## gRPC server TLS options, see `mysql.tls` section.
|
||||||
[grpc.tls]
|
[grpc.tls]
|
||||||
|
|||||||
@@ -54,6 +54,13 @@ bind_addr = "127.0.0.1:4001"
|
|||||||
server_addr = "127.0.0.1:4001"
|
server_addr = "127.0.0.1:4001"
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 8
|
runtime_size = 8
|
||||||
|
## Compression mode for frontend side Arrow IPC service. Available options:
|
||||||
|
## - `none`: disable all compression
|
||||||
|
## - `transport`: only enable gRPC transport compression (zstd)
|
||||||
|
## - `arrow_ipc`: only enable Arrow IPC compression (lz4)
|
||||||
|
## - `all`: enable all compression.
|
||||||
|
## Default to `none`
|
||||||
|
flight_compression = "arrow_ipc"
|
||||||
|
|
||||||
## gRPC server TLS options, see `mysql.tls` section.
|
## gRPC server TLS options, see `mysql.tls` section.
|
||||||
[grpc.tls]
|
[grpc.tls]
|
||||||
|
|||||||
@@ -162,12 +162,23 @@ impl Client {
|
|||||||
.as_bytes() as usize
|
.as_bytes() as usize
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn make_flight_client(&self) -> Result<FlightClient> {
|
pub fn make_flight_client(
|
||||||
|
&self,
|
||||||
|
send_compression: bool,
|
||||||
|
accept_compression: bool,
|
||||||
|
) -> Result<FlightClient> {
|
||||||
let (addr, channel) = self.find_channel()?;
|
let (addr, channel) = self.find_channel()?;
|
||||||
|
|
||||||
let client = FlightServiceClient::new(channel)
|
let mut client = FlightServiceClient::new(channel)
|
||||||
.max_decoding_message_size(self.max_grpc_recv_message_size())
|
.max_decoding_message_size(self.max_grpc_recv_message_size())
|
||||||
.max_encoding_message_size(self.max_grpc_send_message_size());
|
.max_encoding_message_size(self.max_grpc_send_message_size());
|
||||||
|
// todo(hl): support compression methods.
|
||||||
|
if send_compression {
|
||||||
|
client = client.send_compressed(CompressionEncoding::Zstd);
|
||||||
|
}
|
||||||
|
if accept_compression {
|
||||||
|
client = client.accept_compressed(CompressionEncoding::Zstd);
|
||||||
|
}
|
||||||
|
|
||||||
Ok(FlightClient { addr, client })
|
Ok(FlightClient { addr, client })
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -49,7 +49,16 @@ impl NodeManager for NodeClients {
|
|||||||
async fn datanode(&self, datanode: &Peer) -> DatanodeRef {
|
async fn datanode(&self, datanode: &Peer) -> DatanodeRef {
|
||||||
let client = self.get_client(datanode).await;
|
let client = self.get_client(datanode).await;
|
||||||
|
|
||||||
Arc::new(RegionRequester::new(client))
|
let ChannelConfig {
|
||||||
|
send_compression,
|
||||||
|
accept_compression,
|
||||||
|
..
|
||||||
|
} = self.channel_manager.config();
|
||||||
|
Arc::new(RegionRequester::new(
|
||||||
|
client,
|
||||||
|
*send_compression,
|
||||||
|
*accept_compression,
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn flownode(&self, flownode: &Peer) -> FlownodeRef {
|
async fn flownode(&self, flownode: &Peer) -> FlownodeRef {
|
||||||
|
|||||||
@@ -287,7 +287,7 @@ impl Database {
|
|||||||
let mut request = tonic::Request::new(request);
|
let mut request = tonic::Request::new(request);
|
||||||
Self::put_hints(request.metadata_mut(), hints)?;
|
Self::put_hints(request.metadata_mut(), hints)?;
|
||||||
|
|
||||||
let mut client = self.client.make_flight_client()?;
|
let mut client = self.client.make_flight_client(false, false)?;
|
||||||
|
|
||||||
let response = client.mut_inner().do_get(request).await.or_else(|e| {
|
let response = client.mut_inner().do_get(request).await.or_else(|e| {
|
||||||
let tonic_code = e.code();
|
let tonic_code = e.code();
|
||||||
@@ -409,7 +409,7 @@ impl Database {
|
|||||||
MetadataValue::from_str(db_to_put).context(InvalidTonicMetadataValueSnafu)?,
|
MetadataValue::from_str(db_to_put).context(InvalidTonicMetadataValueSnafu)?,
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut client = self.client.make_flight_client()?;
|
let mut client = self.client.make_flight_client(false, false)?;
|
||||||
let response = client.mut_inner().do_put(request).await?;
|
let response = client.mut_inner().do_put(request).await?;
|
||||||
let response = response
|
let response = response
|
||||||
.into_inner()
|
.into_inner()
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use api::v1::flow::{FlowRequest, FlowResponse};
|
use api::v1::flow::{DirtyWindowRequest, DirtyWindowRequests, FlowRequest, FlowResponse};
|
||||||
use api::v1::region::InsertRequests;
|
use api::v1::region::InsertRequests;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::node_manager::Flownode;
|
use common_meta::node_manager::Flownode;
|
||||||
@@ -44,6 +44,16 @@ impl Flownode for FlowRequester {
|
|||||||
.map_err(BoxedError::new)
|
.map_err(BoxedError::new)
|
||||||
.context(common_meta::error::ExternalSnafu)
|
.context(common_meta::error::ExternalSnafu)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn handle_mark_window_dirty(
|
||||||
|
&self,
|
||||||
|
req: DirtyWindowRequest,
|
||||||
|
) -> common_meta::error::Result<FlowResponse> {
|
||||||
|
self.handle_mark_window_dirty(req)
|
||||||
|
.await
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(common_meta::error::ExternalSnafu)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FlowRequester {
|
impl FlowRequester {
|
||||||
@@ -91,4 +101,20 @@ impl FlowRequester {
|
|||||||
.into_inner();
|
.into_inner();
|
||||||
Ok(response)
|
Ok(response)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn handle_mark_window_dirty(&self, req: DirtyWindowRequest) -> Result<FlowResponse> {
|
||||||
|
let (addr, mut client) = self.client.raw_flow_client()?;
|
||||||
|
let response = client
|
||||||
|
.handle_mark_dirty_time_window(DirtyWindowRequests {
|
||||||
|
requests: vec![req],
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.or_else(|e| {
|
||||||
|
let code = e.code();
|
||||||
|
let err: crate::error::Error = e.into();
|
||||||
|
Err(BoxedError::new(err)).context(FlowServerSnafu { addr, code })
|
||||||
|
})?
|
||||||
|
.into_inner();
|
||||||
|
Ok(response)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -46,6 +46,8 @@ use crate::{metrics, Client, Error};
|
|||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct RegionRequester {
|
pub struct RegionRequester {
|
||||||
client: Client,
|
client: Client,
|
||||||
|
send_compression: bool,
|
||||||
|
accept_compression: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -89,12 +91,18 @@ impl Datanode for RegionRequester {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl RegionRequester {
|
impl RegionRequester {
|
||||||
pub fn new(client: Client) -> Self {
|
pub fn new(client: Client, send_compression: bool, accept_compression: bool) -> Self {
|
||||||
Self { client }
|
Self {
|
||||||
|
client,
|
||||||
|
send_compression,
|
||||||
|
accept_compression,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn do_get_inner(&self, ticket: Ticket) -> Result<SendableRecordBatchStream> {
|
pub async fn do_get_inner(&self, ticket: Ticket) -> Result<SendableRecordBatchStream> {
|
||||||
let mut flight_client = self.client.make_flight_client()?;
|
let mut flight_client = self
|
||||||
|
.client
|
||||||
|
.make_flight_client(self.send_compression, self.accept_compression)?;
|
||||||
let response = flight_client
|
let response = flight_client
|
||||||
.mut_inner()
|
.mut_inner()
|
||||||
.do_get(ticket)
|
.do_get(ticket)
|
||||||
|
|||||||
@@ -364,12 +364,16 @@ impl StartCommand {
|
|||||||
|
|
||||||
// frontend to datanode need not timeout.
|
// frontend to datanode need not timeout.
|
||||||
// Some queries are expected to take long time.
|
// Some queries are expected to take long time.
|
||||||
let channel_config = ChannelConfig {
|
let mut channel_config = ChannelConfig {
|
||||||
timeout: None,
|
timeout: None,
|
||||||
tcp_nodelay: opts.datanode.client.tcp_nodelay,
|
tcp_nodelay: opts.datanode.client.tcp_nodelay,
|
||||||
connect_timeout: Some(opts.datanode.client.connect_timeout),
|
connect_timeout: Some(opts.datanode.client.connect_timeout),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
if opts.grpc.flight_compression.transport_compression() {
|
||||||
|
channel_config.accept_compression = true;
|
||||||
|
channel_config.send_compression = true;
|
||||||
|
}
|
||||||
let client = NodeClients::new(channel_config);
|
let client = NodeClients::new(channel_config);
|
||||||
|
|
||||||
let instance = FrontendBuilder::new(
|
let instance = FrontendBuilder::new(
|
||||||
|
|||||||
@@ -47,7 +47,6 @@ num = "0.4"
|
|||||||
num-traits = "0.2"
|
num-traits = "0.2"
|
||||||
once_cell.workspace = true
|
once_cell.workspace = true
|
||||||
paste.workspace = true
|
paste.workspace = true
|
||||||
promql.workspace = true
|
|
||||||
s2 = { version = "0.0.12", optional = true }
|
s2 = { version = "0.0.12", optional = true }
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
|
|||||||
90
src/common/function/src/adjust_flow.rs
Normal file
90
src/common/function/src/adjust_flow.rs
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use common_macro::admin_fn;
|
||||||
|
use common_query::error::{
|
||||||
|
InvalidFuncArgsSnafu, MissingFlowServiceHandlerSnafu, Result, UnsupportedInputDataTypeSnafu,
|
||||||
|
};
|
||||||
|
use common_query::prelude::Signature;
|
||||||
|
use datafusion::logical_expr::Volatility;
|
||||||
|
use datatypes::value::{Value, ValueRef};
|
||||||
|
use session::context::QueryContextRef;
|
||||||
|
use snafu::ensure;
|
||||||
|
use store_api::storage::ConcreteDataType;
|
||||||
|
|
||||||
|
use crate::handlers::FlowServiceHandlerRef;
|
||||||
|
use crate::helper::parse_catalog_flow;
|
||||||
|
|
||||||
|
fn adjust_signature() -> Signature {
|
||||||
|
Signature::exact(
|
||||||
|
vec![
|
||||||
|
ConcreteDataType::string_datatype(), // flow name
|
||||||
|
ConcreteDataType::uint64_datatype(), // min_run_interval in seconds
|
||||||
|
ConcreteDataType::uint64_datatype(), // max filter number per query
|
||||||
|
],
|
||||||
|
Volatility::Immutable,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[admin_fn(
|
||||||
|
name = AdjustFlowFunction,
|
||||||
|
display_name = adjust_flow,
|
||||||
|
sig_fn = adjust_signature,
|
||||||
|
ret = uint64
|
||||||
|
)]
|
||||||
|
pub(crate) async fn adjust_flow(
|
||||||
|
flow_service_handler: &FlowServiceHandlerRef,
|
||||||
|
query_ctx: &QueryContextRef,
|
||||||
|
params: &[ValueRef<'_>],
|
||||||
|
) -> Result<Value> {
|
||||||
|
ensure!(
|
||||||
|
params.len() == 3,
|
||||||
|
InvalidFuncArgsSnafu {
|
||||||
|
err_msg: format!(
|
||||||
|
"The length of the args is not correct, expect 3, have: {}",
|
||||||
|
params.len()
|
||||||
|
),
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
let (flow_name, min_run_interval, max_filter_num) = match (params[0], params[1], params[2]) {
|
||||||
|
(
|
||||||
|
ValueRef::String(flow_name),
|
||||||
|
ValueRef::UInt64(min_run_interval),
|
||||||
|
ValueRef::UInt64(max_filter_num),
|
||||||
|
) => (flow_name, min_run_interval, max_filter_num),
|
||||||
|
_ => {
|
||||||
|
return UnsupportedInputDataTypeSnafu {
|
||||||
|
function: "adjust_flow",
|
||||||
|
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
||||||
|
}
|
||||||
|
.fail();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let (catalog_name, flow_name) = parse_catalog_flow(flow_name, query_ctx)?;
|
||||||
|
|
||||||
|
let res = flow_service_handler
|
||||||
|
.adjust(
|
||||||
|
&catalog_name,
|
||||||
|
&flow_name,
|
||||||
|
min_run_interval,
|
||||||
|
max_filter_num as usize,
|
||||||
|
query_ctx.clone(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
let affected_rows = res.affected_rows;
|
||||||
|
|
||||||
|
Ok(Value::from(affected_rows))
|
||||||
|
}
|
||||||
@@ -26,6 +26,7 @@ use flush_compact_table::{CompactTableFunction, FlushTableFunction};
|
|||||||
use migrate_region::MigrateRegionFunction;
|
use migrate_region::MigrateRegionFunction;
|
||||||
use remove_region_follower::RemoveRegionFollowerFunction;
|
use remove_region_follower::RemoveRegionFollowerFunction;
|
||||||
|
|
||||||
|
use crate::adjust_flow::AdjustFlowFunction;
|
||||||
use crate::flush_flow::FlushFlowFunction;
|
use crate::flush_flow::FlushFlowFunction;
|
||||||
use crate::function_registry::FunctionRegistry;
|
use crate::function_registry::FunctionRegistry;
|
||||||
|
|
||||||
@@ -43,5 +44,6 @@ impl AdminFunction {
|
|||||||
registry.register_async(Arc::new(FlushTableFunction));
|
registry.register_async(Arc::new(FlushTableFunction));
|
||||||
registry.register_async(Arc::new(CompactTableFunction));
|
registry.register_async(Arc::new(CompactTableFunction));
|
||||||
registry.register_async(Arc::new(FlushFlowFunction));
|
registry.register_async(Arc::new(FlushFlowFunction));
|
||||||
|
registry.register_async(Arc::new(AdjustFlowFunction));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,21 +12,19 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use common_error::ext::BoxedError;
|
|
||||||
use common_macro::admin_fn;
|
use common_macro::admin_fn;
|
||||||
use common_query::error::{
|
use common_query::error::{
|
||||||
ExecuteSnafu, InvalidFuncArgsSnafu, MissingFlowServiceHandlerSnafu, Result,
|
InvalidFuncArgsSnafu, MissingFlowServiceHandlerSnafu, Result, UnsupportedInputDataTypeSnafu,
|
||||||
UnsupportedInputDataTypeSnafu,
|
|
||||||
};
|
};
|
||||||
use common_query::prelude::Signature;
|
use common_query::prelude::Signature;
|
||||||
use datafusion::logical_expr::Volatility;
|
use datafusion::logical_expr::Volatility;
|
||||||
use datatypes::value::{Value, ValueRef};
|
use datatypes::value::{Value, ValueRef};
|
||||||
use session::context::QueryContextRef;
|
use session::context::QueryContextRef;
|
||||||
use snafu::{ensure, ResultExt};
|
use snafu::ensure;
|
||||||
use sql::parser::ParserContext;
|
|
||||||
use store_api::storage::ConcreteDataType;
|
use store_api::storage::ConcreteDataType;
|
||||||
|
|
||||||
use crate::handlers::FlowServiceHandlerRef;
|
use crate::handlers::FlowServiceHandlerRef;
|
||||||
|
use crate::helper::parse_catalog_flow;
|
||||||
|
|
||||||
fn flush_signature() -> Signature {
|
fn flush_signature() -> Signature {
|
||||||
Signature::uniform(
|
Signature::uniform(
|
||||||
@@ -47,20 +45,6 @@ pub(crate) async fn flush_flow(
|
|||||||
query_ctx: &QueryContextRef,
|
query_ctx: &QueryContextRef,
|
||||||
params: &[ValueRef<'_>],
|
params: &[ValueRef<'_>],
|
||||||
) -> Result<Value> {
|
) -> Result<Value> {
|
||||||
let (catalog_name, flow_name) = parse_flush_flow(params, query_ctx)?;
|
|
||||||
|
|
||||||
let res = flow_service_handler
|
|
||||||
.flush(&catalog_name, &flow_name, query_ctx.clone())
|
|
||||||
.await?;
|
|
||||||
let affected_rows = res.affected_rows;
|
|
||||||
|
|
||||||
Ok(Value::from(affected_rows))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_flush_flow(
|
|
||||||
params: &[ValueRef<'_>],
|
|
||||||
query_ctx: &QueryContextRef,
|
|
||||||
) -> Result<(String, String)> {
|
|
||||||
ensure!(
|
ensure!(
|
||||||
params.len() == 1,
|
params.len() == 1,
|
||||||
InvalidFuncArgsSnafu {
|
InvalidFuncArgsSnafu {
|
||||||
@@ -70,7 +54,6 @@ fn parse_flush_flow(
|
|||||||
),
|
),
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
let ValueRef::String(flow_name) = params[0] else {
|
let ValueRef::String(flow_name) = params[0] else {
|
||||||
return UnsupportedInputDataTypeSnafu {
|
return UnsupportedInputDataTypeSnafu {
|
||||||
function: "flush_flow",
|
function: "flush_flow",
|
||||||
@@ -78,27 +61,14 @@ fn parse_flush_flow(
|
|||||||
}
|
}
|
||||||
.fail();
|
.fail();
|
||||||
};
|
};
|
||||||
let obj_name = ParserContext::parse_table_name(flow_name, query_ctx.sql_dialect())
|
let (catalog_name, flow_name) = parse_catalog_flow(flow_name, query_ctx)?;
|
||||||
.map_err(BoxedError::new)
|
|
||||||
.context(ExecuteSnafu)?;
|
|
||||||
|
|
||||||
let (catalog_name, flow_name) = match &obj_name.0[..] {
|
let res = flow_service_handler
|
||||||
[flow_name] => (
|
.flush(&catalog_name, &flow_name, query_ctx.clone())
|
||||||
query_ctx.current_catalog().to_string(),
|
.await?;
|
||||||
flow_name.value.clone(),
|
let affected_rows = res.affected_rows;
|
||||||
),
|
|
||||||
[catalog, flow_name] => (catalog.value.clone(), flow_name.value.clone()),
|
Ok(Value::from(affected_rows))
|
||||||
_ => {
|
|
||||||
return InvalidFuncArgsSnafu {
|
|
||||||
err_msg: format!(
|
|
||||||
"expect flow name to be <catalog>.<flow-name> or <flow-name>, actual: {}",
|
|
||||||
obj_name
|
|
||||||
),
|
|
||||||
}
|
|
||||||
.fail()
|
|
||||||
}
|
|
||||||
};
|
|
||||||
Ok((catalog_name, flow_name))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@@ -154,10 +124,7 @@ mod test {
|
|||||||
("catalog.flow_name", ("catalog", "flow_name")),
|
("catalog.flow_name", ("catalog", "flow_name")),
|
||||||
];
|
];
|
||||||
for (input, expected) in testcases.iter() {
|
for (input, expected) in testcases.iter() {
|
||||||
let args = vec![*input];
|
let result = parse_catalog_flow(input, &QueryContext::arc()).unwrap();
|
||||||
let args = args.into_iter().map(ValueRef::String).collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let result = parse_flush_flow(&args, &QueryContext::arc()).unwrap();
|
|
||||||
assert_eq!(*expected, (result.0.as_str(), result.1.as_str()));
|
assert_eq!(*expected, (result.0.as_str(), result.1.as_str()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ use crate::system::SystemFunction;
|
|||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub struct FunctionRegistry {
|
pub struct FunctionRegistry {
|
||||||
scalar_functions: RwLock<HashMap<String, ScalarFunctionFactory>>,
|
functions: RwLock<HashMap<String, ScalarFunctionFactory>>,
|
||||||
async_functions: RwLock<HashMap<String, AsyncFunctionRef>>,
|
async_functions: RwLock<HashMap<String, AsyncFunctionRef>>,
|
||||||
aggregate_functions: RwLock<HashMap<String, AggregateUDF>>,
|
aggregate_functions: RwLock<HashMap<String, AggregateUDF>>,
|
||||||
}
|
}
|
||||||
@@ -48,7 +48,7 @@ impl FunctionRegistry {
|
|||||||
pub fn register(&self, func: impl Into<ScalarFunctionFactory>) {
|
pub fn register(&self, func: impl Into<ScalarFunctionFactory>) {
|
||||||
let func = func.into();
|
let func = func.into();
|
||||||
let _ = self
|
let _ = self
|
||||||
.scalar_functions
|
.functions
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.insert(func.name().to_string(), func);
|
.insert(func.name().to_string(), func);
|
||||||
@@ -87,17 +87,13 @@ impl FunctionRegistry {
|
|||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_scalar_function(&self, name: &str) -> Option<ScalarFunctionFactory> {
|
#[cfg(test)]
|
||||||
self.scalar_functions.read().unwrap().get(name).cloned()
|
pub fn get_function(&self, name: &str) -> Option<ScalarFunctionFactory> {
|
||||||
|
self.functions.read().unwrap().get(name).cloned()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn scalar_functions(&self) -> Vec<ScalarFunctionFactory> {
|
pub fn scalar_functions(&self) -> Vec<ScalarFunctionFactory> {
|
||||||
self.scalar_functions
|
self.functions.read().unwrap().values().cloned().collect()
|
||||||
.read()
|
|
||||||
.unwrap()
|
|
||||||
.values()
|
|
||||||
.cloned()
|
|
||||||
.collect()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn aggregate_functions(&self) -> Vec<AggregateUDF> {
|
pub fn aggregate_functions(&self) -> Vec<AggregateUDF> {
|
||||||
@@ -148,11 +144,6 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
|
|||||||
// Approximate functions
|
// Approximate functions
|
||||||
ApproximateFunction::register(&function_registry);
|
ApproximateFunction::register(&function_registry);
|
||||||
|
|
||||||
// PromQL aggregate functions
|
|
||||||
for aggr in promql::functions::aggr_funcs() {
|
|
||||||
function_registry.register_aggr(aggr);
|
|
||||||
}
|
|
||||||
|
|
||||||
Arc::new(function_registry)
|
Arc::new(function_registry)
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -165,10 +156,10 @@ mod tests {
|
|||||||
fn test_function_registry() {
|
fn test_function_registry() {
|
||||||
let registry = FunctionRegistry::default();
|
let registry = FunctionRegistry::default();
|
||||||
|
|
||||||
assert!(registry.get_scalar_function("test_and").is_none());
|
assert!(registry.get_function("test_and").is_none());
|
||||||
assert!(registry.scalar_functions().is_empty());
|
assert!(registry.scalar_functions().is_empty());
|
||||||
registry.register_scalar(TestAndFunction);
|
registry.register_scalar(TestAndFunction);
|
||||||
let _ = registry.get_scalar_function("test_and").unwrap();
|
let _ = registry.get_function("test_and").unwrap();
|
||||||
assert_eq!(1, registry.scalar_functions().len());
|
assert_eq!(1, registry.scalar_functions().len());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -87,6 +87,15 @@ pub trait FlowServiceHandler: Send + Sync {
|
|||||||
flow: &str,
|
flow: &str,
|
||||||
ctx: QueryContextRef,
|
ctx: QueryContextRef,
|
||||||
) -> Result<api::v1::flow::FlowResponse>;
|
) -> Result<api::v1::flow::FlowResponse>;
|
||||||
|
|
||||||
|
async fn adjust(
|
||||||
|
&self,
|
||||||
|
catalog: &str,
|
||||||
|
flow: &str,
|
||||||
|
min_run_interval_secs: u64,
|
||||||
|
max_filter_num_per_query: usize,
|
||||||
|
ctx: QueryContextRef,
|
||||||
|
) -> Result<api::v1::flow::FlowResponse>;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type TableMutationHandlerRef = Arc<dyn TableMutationHandler>;
|
pub type TableMutationHandlerRef = Arc<dyn TableMutationHandler>;
|
||||||
|
|||||||
@@ -12,12 +12,15 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use common_query::error::{InvalidInputTypeSnafu, Result};
|
use common_error::ext::BoxedError;
|
||||||
|
use common_query::error::{ExecuteSnafu, InvalidFuncArgsSnafu, InvalidInputTypeSnafu, Result};
|
||||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||||
use datatypes::prelude::ConcreteDataType;
|
use datatypes::prelude::ConcreteDataType;
|
||||||
use datatypes::types::cast::cast;
|
use datatypes::types::cast::cast;
|
||||||
use datatypes::value::ValueRef;
|
use datatypes::value::ValueRef;
|
||||||
|
use session::context::QueryContextRef;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
use sql::parser::ParserContext;
|
||||||
|
|
||||||
/// Create a function signature with oneof signatures of interleaving two arguments.
|
/// Create a function signature with oneof signatures of interleaving two arguments.
|
||||||
pub fn one_of_sigs2(args1: Vec<ConcreteDataType>, args2: Vec<ConcreteDataType>) -> Signature {
|
pub fn one_of_sigs2(args1: Vec<ConcreteDataType>, args2: Vec<ConcreteDataType>) -> Signature {
|
||||||
@@ -43,3 +46,30 @@ pub fn cast_u64(value: &ValueRef) -> Result<Option<u64>> {
|
|||||||
})
|
})
|
||||||
.map(|v| v.as_u64())
|
.map(|v| v.as_u64())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn parse_catalog_flow(
|
||||||
|
flow_name: &str,
|
||||||
|
query_ctx: &QueryContextRef,
|
||||||
|
) -> Result<(String, String)> {
|
||||||
|
let obj_name = ParserContext::parse_table_name(flow_name, query_ctx.sql_dialect())
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(ExecuteSnafu)?;
|
||||||
|
|
||||||
|
let (catalog_name, flow_name) = match &obj_name.0[..] {
|
||||||
|
[flow_name] => (
|
||||||
|
query_ctx.current_catalog().to_string(),
|
||||||
|
flow_name.value.clone(),
|
||||||
|
),
|
||||||
|
[catalog, flow_name] => (catalog.value.clone(), flow_name.value.clone()),
|
||||||
|
_ => {
|
||||||
|
return InvalidFuncArgsSnafu {
|
||||||
|
err_msg: format!(
|
||||||
|
"expect flow name to be <catalog>.<flow-name> or <flow-name>, actual: {}",
|
||||||
|
obj_name
|
||||||
|
),
|
||||||
|
}
|
||||||
|
.fail()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Ok((catalog_name, flow_name))
|
||||||
|
}
|
||||||
|
|||||||
@@ -15,6 +15,7 @@
|
|||||||
#![feature(let_chains)]
|
#![feature(let_chains)]
|
||||||
#![feature(try_blocks)]
|
#![feature(try_blocks)]
|
||||||
|
|
||||||
|
mod adjust_flow;
|
||||||
mod admin;
|
mod admin;
|
||||||
mod flush_flow;
|
mod flush_flow;
|
||||||
mod macros;
|
mod macros;
|
||||||
|
|||||||
@@ -148,6 +148,17 @@ impl FunctionState {
|
|||||||
) -> Result<api::v1::flow::FlowResponse> {
|
) -> Result<api::v1::flow::FlowResponse> {
|
||||||
todo!()
|
todo!()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn adjust(
|
||||||
|
&self,
|
||||||
|
_catalog: &str,
|
||||||
|
_flow: &str,
|
||||||
|
_min_run_interval_secs: u64,
|
||||||
|
_max_filter_num_per_query: usize,
|
||||||
|
_ctx: QueryContextRef,
|
||||||
|
) -> Result<api::v1::flow::FlowResponse> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
|
|||||||
@@ -296,6 +296,8 @@ pub struct ChannelConfig {
|
|||||||
pub max_recv_message_size: ReadableSize,
|
pub max_recv_message_size: ReadableSize,
|
||||||
// Max gRPC sending(encoding) message size
|
// Max gRPC sending(encoding) message size
|
||||||
pub max_send_message_size: ReadableSize,
|
pub max_send_message_size: ReadableSize,
|
||||||
|
pub send_compression: bool,
|
||||||
|
pub accept_compression: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for ChannelConfig {
|
impl Default for ChannelConfig {
|
||||||
@@ -316,6 +318,8 @@ impl Default for ChannelConfig {
|
|||||||
client_tls: None,
|
client_tls: None,
|
||||||
max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
|
max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
|
||||||
max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
|
max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
|
||||||
|
send_compression: false,
|
||||||
|
accept_compression: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -566,6 +570,8 @@ mod tests {
|
|||||||
client_tls: None,
|
client_tls: None,
|
||||||
max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
|
max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
|
||||||
max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
|
max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
|
||||||
|
send_compression: false,
|
||||||
|
accept_compression: false,
|
||||||
},
|
},
|
||||||
default_cfg
|
default_cfg
|
||||||
);
|
);
|
||||||
@@ -610,6 +616,8 @@ mod tests {
|
|||||||
}),
|
}),
|
||||||
max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
|
max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
|
||||||
max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
|
max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
|
||||||
|
send_compression: false,
|
||||||
|
accept_compression: false,
|
||||||
},
|
},
|
||||||
cfg
|
cfg
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -64,6 +64,7 @@ impl Default for FlightEncoder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl FlightEncoder {
|
impl FlightEncoder {
|
||||||
|
/// Creates new [FlightEncoder] with compression disabled.
|
||||||
pub fn with_compression_disabled() -> Self {
|
pub fn with_compression_disabled() -> Self {
|
||||||
let write_options = writer::IpcWriteOptions::default()
|
let write_options = writer::IpcWriteOptions::default()
|
||||||
.try_with_compression(None)
|
.try_with_compression(None)
|
||||||
|
|||||||
@@ -15,7 +15,7 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use api::region::RegionResponse;
|
use api::region::RegionResponse;
|
||||||
use api::v1::flow::{FlowRequest, FlowResponse};
|
use api::v1::flow::{DirtyWindowRequest, FlowRequest, FlowResponse};
|
||||||
use api::v1::region::{InsertRequests, RegionRequest};
|
use api::v1::region::{InsertRequests, RegionRequest};
|
||||||
pub use common_base::AffectedRows;
|
pub use common_base::AffectedRows;
|
||||||
use common_query::request::QueryRequest;
|
use common_query::request::QueryRequest;
|
||||||
@@ -42,6 +42,9 @@ pub trait Flownode: Send + Sync {
|
|||||||
async fn handle(&self, request: FlowRequest) -> Result<FlowResponse>;
|
async fn handle(&self, request: FlowRequest) -> Result<FlowResponse>;
|
||||||
|
|
||||||
async fn handle_inserts(&self, request: InsertRequests) -> Result<FlowResponse>;
|
async fn handle_inserts(&self, request: InsertRequests) -> Result<FlowResponse>;
|
||||||
|
|
||||||
|
/// Handles requests to mark time window as dirty.
|
||||||
|
async fn handle_mark_window_dirty(&self, req: DirtyWindowRequest) -> Result<FlowResponse>;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type FlownodeRef = Arc<dyn Flownode>;
|
pub type FlownodeRef = Arc<dyn Flownode>;
|
||||||
|
|||||||
@@ -15,7 +15,7 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use api::region::RegionResponse;
|
use api::region::RegionResponse;
|
||||||
use api::v1::flow::{FlowRequest, FlowResponse};
|
use api::v1::flow::{DirtyWindowRequest, FlowRequest, FlowResponse};
|
||||||
use api::v1::region::{InsertRequests, RegionRequest};
|
use api::v1::region::{InsertRequests, RegionRequest};
|
||||||
pub use common_base::AffectedRows;
|
pub use common_base::AffectedRows;
|
||||||
use common_query::request::QueryRequest;
|
use common_query::request::QueryRequest;
|
||||||
@@ -67,6 +67,14 @@ pub trait MockFlownodeHandler: Sync + Send + Clone {
|
|||||||
) -> Result<FlowResponse> {
|
) -> Result<FlowResponse> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn handle_mark_window_dirty(
|
||||||
|
&self,
|
||||||
|
_peer: &Peer,
|
||||||
|
_req: DirtyWindowRequest,
|
||||||
|
) -> Result<FlowResponse> {
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A mock struct implements [NodeManager] only implement the `datanode` method.
|
/// A mock struct implements [NodeManager] only implement the `datanode` method.
|
||||||
@@ -134,6 +142,10 @@ impl<T: MockFlownodeHandler> Flownode for MockNode<T> {
|
|||||||
async fn handle_inserts(&self, requests: InsertRequests) -> Result<FlowResponse> {
|
async fn handle_inserts(&self, requests: InsertRequests) -> Result<FlowResponse> {
|
||||||
self.handler.handle_inserts(&self.peer, requests).await
|
self.handler.handle_inserts(&self.peer, requests).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn handle_mark_window_dirty(&self, req: DirtyWindowRequest) -> Result<FlowResponse> {
|
||||||
|
self.handler.handle_mark_window_dirty(&self.peer, req).await
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
|
|||||||
@@ -372,6 +372,7 @@ impl DatanodeBuilder {
|
|||||||
opts.max_concurrent_queries,
|
opts.max_concurrent_queries,
|
||||||
//TODO: revaluate the hardcoded timeout on the next version of datanode concurrency limiter.
|
//TODO: revaluate the hardcoded timeout on the next version of datanode concurrency limiter.
|
||||||
Duration::from_millis(100),
|
Duration::from_millis(100),
|
||||||
|
opts.grpc.flight_compression,
|
||||||
);
|
);
|
||||||
|
|
||||||
let object_store_manager = Self::build_object_store_manager(&opts.storage).await?;
|
let object_store_manager = Self::build_object_store_manager(&opts.storage).await?;
|
||||||
|
|||||||
@@ -50,6 +50,7 @@ use query::QueryEngineRef;
|
|||||||
use servers::error::{self as servers_error, ExecuteGrpcRequestSnafu, Result as ServerResult};
|
use servers::error::{self as servers_error, ExecuteGrpcRequestSnafu, Result as ServerResult};
|
||||||
use servers::grpc::flight::{FlightCraft, FlightRecordBatchStream, TonicStream};
|
use servers::grpc::flight::{FlightCraft, FlightRecordBatchStream, TonicStream};
|
||||||
use servers::grpc::region_server::RegionServerHandler;
|
use servers::grpc::region_server::RegionServerHandler;
|
||||||
|
use servers::grpc::FlightCompression;
|
||||||
use session::context::{QueryContextBuilder, QueryContextRef};
|
use session::context::{QueryContextBuilder, QueryContextRef};
|
||||||
use snafu::{ensure, OptionExt, ResultExt};
|
use snafu::{ensure, OptionExt, ResultExt};
|
||||||
use store_api::metric_engine_consts::{
|
use store_api::metric_engine_consts::{
|
||||||
@@ -80,6 +81,7 @@ use crate::event_listener::RegionServerEventListenerRef;
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct RegionServer {
|
pub struct RegionServer {
|
||||||
inner: Arc<RegionServerInner>,
|
inner: Arc<RegionServerInner>,
|
||||||
|
flight_compression: FlightCompression,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct RegionStat {
|
pub struct RegionStat {
|
||||||
@@ -93,6 +95,7 @@ impl RegionServer {
|
|||||||
query_engine: QueryEngineRef,
|
query_engine: QueryEngineRef,
|
||||||
runtime: Runtime,
|
runtime: Runtime,
|
||||||
event_listener: RegionServerEventListenerRef,
|
event_listener: RegionServerEventListenerRef,
|
||||||
|
flight_compression: FlightCompression,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self::with_table_provider(
|
Self::with_table_provider(
|
||||||
query_engine,
|
query_engine,
|
||||||
@@ -101,6 +104,7 @@ impl RegionServer {
|
|||||||
Arc::new(DummyTableProviderFactory),
|
Arc::new(DummyTableProviderFactory),
|
||||||
0,
|
0,
|
||||||
Duration::from_millis(0),
|
Duration::from_millis(0),
|
||||||
|
flight_compression,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -111,6 +115,7 @@ impl RegionServer {
|
|||||||
table_provider_factory: TableProviderFactoryRef,
|
table_provider_factory: TableProviderFactoryRef,
|
||||||
max_concurrent_queries: usize,
|
max_concurrent_queries: usize,
|
||||||
concurrent_query_limiter_timeout: Duration,
|
concurrent_query_limiter_timeout: Duration,
|
||||||
|
flight_compression: FlightCompression,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
inner: Arc::new(RegionServerInner::new(
|
inner: Arc::new(RegionServerInner::new(
|
||||||
@@ -123,6 +128,7 @@ impl RegionServer {
|
|||||||
concurrent_query_limiter_timeout,
|
concurrent_query_limiter_timeout,
|
||||||
),
|
),
|
||||||
)),
|
)),
|
||||||
|
flight_compression,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -536,7 +542,11 @@ impl FlightCraft for RegionServer {
|
|||||||
.trace(tracing_context.attach(info_span!("RegionServer::handle_read")))
|
.trace(tracing_context.attach(info_span!("RegionServer::handle_read")))
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let stream = Box::pin(FlightRecordBatchStream::new(result, tracing_context));
|
let stream = Box::pin(FlightRecordBatchStream::new(
|
||||||
|
result,
|
||||||
|
tracing_context,
|
||||||
|
self.flight_compression,
|
||||||
|
));
|
||||||
Ok(Response::new(stream))
|
Ok(Response::new(stream))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ use query::dataframe::DataFrame;
|
|||||||
use query::planner::LogicalPlanner;
|
use query::planner::LogicalPlanner;
|
||||||
use query::query_engine::{DescribeResult, QueryEngineState};
|
use query::query_engine::{DescribeResult, QueryEngineState};
|
||||||
use query::{QueryEngine, QueryEngineContext};
|
use query::{QueryEngine, QueryEngineContext};
|
||||||
|
use servers::grpc::FlightCompression;
|
||||||
use session::context::QueryContextRef;
|
use session::context::QueryContextRef;
|
||||||
use store_api::metadata::RegionMetadataRef;
|
use store_api::metadata::RegionMetadataRef;
|
||||||
use store_api::region_engine::{
|
use store_api::region_engine::{
|
||||||
@@ -97,6 +98,7 @@ pub fn mock_region_server() -> RegionServer {
|
|||||||
Arc::new(MockQueryEngine),
|
Arc::new(MockQueryEngine),
|
||||||
Runtime::builder().build().unwrap(),
|
Runtime::builder().build().unwrap(),
|
||||||
Box::new(NoopRegionServerEventListener),
|
Box::new(NoopRegionServerEventListener),
|
||||||
|
FlightCompression::default(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -61,6 +61,7 @@ prost.workspace = true
|
|||||||
query.workspace = true
|
query.workspace = true
|
||||||
rand.workspace = true
|
rand.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
|
serde_json.workspace = true
|
||||||
servers.workspace = true
|
servers.workspace = true
|
||||||
session.workspace = true
|
session.workspace = true
|
||||||
smallvec.workspace = true
|
smallvec.workspace = true
|
||||||
|
|||||||
@@ -316,7 +316,7 @@ impl StreamingEngine {
|
|||||||
);
|
);
|
||||||
|
|
||||||
METRIC_FLOW_ROWS
|
METRIC_FLOW_ROWS
|
||||||
.with_label_values(&["out"])
|
.with_label_values(&["out-streaming"])
|
||||||
.inc_by(total_rows as u64);
|
.inc_by(total_rows as u64);
|
||||||
|
|
||||||
let now = self.tick_manager.tick();
|
let now = self.tick_manager.tick();
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ use std::sync::atomic::AtomicBool;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use api::v1::flow::{
|
use api::v1::flow::{
|
||||||
flow_request, CreateRequest, DropRequest, FlowRequest, FlowResponse, FlushFlow,
|
flow_request, AdjustFlow, CreateRequest, DropRequest, FlowRequest, FlowResponse, FlushFlow,
|
||||||
};
|
};
|
||||||
use api::v1::region::InsertRequests;
|
use api::v1::region::InsertRequests;
|
||||||
use catalog::CatalogManager;
|
use catalog::CatalogManager;
|
||||||
@@ -31,7 +31,9 @@ use common_runtime::JoinHandle;
|
|||||||
use common_telemetry::{error, info, trace, warn};
|
use common_telemetry::{error, info, trace, warn};
|
||||||
use datatypes::value::Value;
|
use datatypes::value::Value;
|
||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
|
use greptime_proto::v1::flow::DirtyWindowRequest;
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
use session::context::QueryContextBuilder;
|
use session::context::QueryContextBuilder;
|
||||||
use snafu::{ensure, IntoError, OptionExt, ResultExt};
|
use snafu::{ensure, IntoError, OptionExt, ResultExt};
|
||||||
use store_api::storage::{RegionId, TableId};
|
use store_api::storage::{RegionId, TableId};
|
||||||
@@ -46,7 +48,7 @@ use crate::error::{
|
|||||||
IllegalCheckTaskStateSnafu, InsertIntoFlowSnafu, InternalSnafu, JoinTaskSnafu, ListFlowsSnafu,
|
IllegalCheckTaskStateSnafu, InsertIntoFlowSnafu, InternalSnafu, JoinTaskSnafu, ListFlowsSnafu,
|
||||||
NoAvailableFrontendSnafu, SyncCheckTaskSnafu, UnexpectedSnafu,
|
NoAvailableFrontendSnafu, SyncCheckTaskSnafu, UnexpectedSnafu,
|
||||||
};
|
};
|
||||||
use crate::metrics::METRIC_FLOW_TASK_COUNT;
|
use crate::metrics::{METRIC_FLOW_ROWS, METRIC_FLOW_TASK_COUNT};
|
||||||
use crate::repr::{self, DiffRow};
|
use crate::repr::{self, DiffRow};
|
||||||
use crate::{Error, FlowId};
|
use crate::{Error, FlowId};
|
||||||
|
|
||||||
@@ -689,6 +691,9 @@ impl FlowEngine for FlowDualEngine {
|
|||||||
let mut to_stream_engine = Vec::with_capacity(request.requests.len());
|
let mut to_stream_engine = Vec::with_capacity(request.requests.len());
|
||||||
let mut to_batch_engine = request.requests;
|
let mut to_batch_engine = request.requests;
|
||||||
|
|
||||||
|
let mut batching_row_cnt = 0;
|
||||||
|
let mut streaming_row_cnt = 0;
|
||||||
|
|
||||||
{
|
{
|
||||||
// not locking this, or recover flows will be starved when also handling flow inserts
|
// not locking this, or recover flows will be starved when also handling flow inserts
|
||||||
let src_table2flow = self.src_table2flow.read().await;
|
let src_table2flow = self.src_table2flow.read().await;
|
||||||
@@ -698,9 +703,11 @@ impl FlowEngine for FlowDualEngine {
|
|||||||
let is_in_stream = src_table2flow.in_stream(table_id);
|
let is_in_stream = src_table2flow.in_stream(table_id);
|
||||||
let is_in_batch = src_table2flow.in_batch(table_id);
|
let is_in_batch = src_table2flow.in_batch(table_id);
|
||||||
if is_in_stream {
|
if is_in_stream {
|
||||||
|
streaming_row_cnt += req.rows.as_ref().map(|rs| rs.rows.len()).unwrap_or(0);
|
||||||
to_stream_engine.push(req.clone());
|
to_stream_engine.push(req.clone());
|
||||||
}
|
}
|
||||||
if is_in_batch {
|
if is_in_batch {
|
||||||
|
batching_row_cnt += req.rows.as_ref().map(|rs| rs.rows.len()).unwrap_or(0);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if !is_in_batch && !is_in_stream {
|
if !is_in_batch && !is_in_stream {
|
||||||
@@ -713,6 +720,14 @@ impl FlowEngine for FlowDualEngine {
|
|||||||
// can't use drop due to https://github.com/rust-lang/rust/pull/128846
|
// can't use drop due to https://github.com/rust-lang/rust/pull/128846
|
||||||
}
|
}
|
||||||
|
|
||||||
|
METRIC_FLOW_ROWS
|
||||||
|
.with_label_values(&["in-streaming"])
|
||||||
|
.inc_by(streaming_row_cnt as u64);
|
||||||
|
|
||||||
|
METRIC_FLOW_ROWS
|
||||||
|
.with_label_values(&["in-batching"])
|
||||||
|
.inc_by(batching_row_cnt as u64);
|
||||||
|
|
||||||
let streaming_engine = self.streaming_engine.clone();
|
let streaming_engine = self.streaming_engine.clone();
|
||||||
let stream_handler: JoinHandle<Result<(), Error>> =
|
let stream_handler: JoinHandle<Result<(), Error>> =
|
||||||
common_runtime::spawn_global(async move {
|
common_runtime::spawn_global(async move {
|
||||||
@@ -809,6 +824,25 @@ impl common_meta::node_manager::Flownode for FlowDualEngine {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
Some(flow_request::Body::Adjust(AdjustFlow { flow_id, options })) => {
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
struct Options {
|
||||||
|
min_run_interval_secs: u64,
|
||||||
|
max_filter_num_per_query: usize,
|
||||||
|
}
|
||||||
|
let options: Options = serde_json::from_str(&options).with_context(|_| {
|
||||||
|
common_meta::error::DeserializeFromJsonSnafu { input: options }
|
||||||
|
})?;
|
||||||
|
self.batching_engine
|
||||||
|
.adjust_flow(
|
||||||
|
flow_id.unwrap().id as u64,
|
||||||
|
options.min_run_interval_secs,
|
||||||
|
options.max_filter_num_per_query,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map_err(to_meta_err(snafu::location!()))?;
|
||||||
|
Ok(Default::default())
|
||||||
|
}
|
||||||
other => common_meta::error::InvalidFlowRequestBodySnafu { body: other }.fail(),
|
other => common_meta::error::InvalidFlowRequestBodySnafu { body: other }.fail(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -819,6 +853,11 @@ impl common_meta::node_manager::Flownode for FlowDualEngine {
|
|||||||
.map(|_| Default::default())
|
.map(|_| Default::default())
|
||||||
.map_err(to_meta_err(snafu::location!()))
|
.map_err(to_meta_err(snafu::location!()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn handle_mark_window_dirty(&self, _req: DirtyWindowRequest) -> MetaResult<FlowResponse> {
|
||||||
|
// todo: implement
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// return a function to convert `crate::error::Error` to `common_meta::error::Error`
|
/// return a function to convert `crate::error::Error` to `common_meta::error::Error`
|
||||||
@@ -926,6 +965,11 @@ impl common_meta::node_manager::Flownode for StreamingEngine {
|
|||||||
.map(|_| Default::default())
|
.map(|_| Default::default())
|
||||||
.map_err(to_meta_err(snafu::location!()))
|
.map_err(to_meta_err(snafu::location!()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn handle_mark_window_dirty(&self, _req: DirtyWindowRequest) -> MetaResult<FlowResponse> {
|
||||||
|
// todo: implement
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FlowEngine for StreamingEngine {
|
impl FlowEngine for StreamingEngine {
|
||||||
|
|||||||
@@ -17,6 +17,7 @@
|
|||||||
use std::collections::{BTreeMap, HashMap};
|
use std::collections::{BTreeMap, HashMap};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use api::v1::flow::{DirtyWindowRequests, FlowResponse};
|
||||||
use catalog::CatalogManagerRef;
|
use catalog::CatalogManagerRef;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::ddl::create_flow::FlowType;
|
use common_meta::ddl::create_flow::FlowType;
|
||||||
@@ -29,8 +30,7 @@ use common_telemetry::{debug, info};
|
|||||||
use common_time::TimeToLive;
|
use common_time::TimeToLive;
|
||||||
use query::QueryEngineRef;
|
use query::QueryEngineRef;
|
||||||
use snafu::{ensure, OptionExt, ResultExt};
|
use snafu::{ensure, OptionExt, ResultExt};
|
||||||
use store_api::storage::RegionId;
|
use store_api::storage::{RegionId, TableId};
|
||||||
use table::metadata::TableId;
|
|
||||||
use tokio::sync::{oneshot, RwLock};
|
use tokio::sync::{oneshot, RwLock};
|
||||||
|
|
||||||
use crate::batching_mode::frontend_client::FrontendClient;
|
use crate::batching_mode::frontend_client::FrontendClient;
|
||||||
@@ -42,6 +42,7 @@ use crate::error::{
|
|||||||
ExternalSnafu, FlowAlreadyExistSnafu, FlowNotFoundSnafu, TableNotFoundMetaSnafu,
|
ExternalSnafu, FlowAlreadyExistSnafu, FlowNotFoundSnafu, TableNotFoundMetaSnafu,
|
||||||
UnexpectedSnafu, UnsupportedSnafu,
|
UnexpectedSnafu, UnsupportedSnafu,
|
||||||
};
|
};
|
||||||
|
use crate::metrics::METRIC_FLOW_BATCHING_ENGINE_BULK_MARK_TIME_WINDOW_RANGE;
|
||||||
use crate::{CreateFlowArgs, Error, FlowId, TableName};
|
use crate::{CreateFlowArgs, Error, FlowId, TableName};
|
||||||
|
|
||||||
/// Batching mode Engine, responsible for driving all the batching mode tasks
|
/// Batching mode Engine, responsible for driving all the batching mode tasks
|
||||||
@@ -77,6 +78,122 @@ impl BatchingEngine {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn handle_mark_dirty_time_window(
|
||||||
|
&self,
|
||||||
|
reqs: DirtyWindowRequests,
|
||||||
|
) -> Result<FlowResponse, Error> {
|
||||||
|
let table_info_mgr = self.table_meta.table_info_manager();
|
||||||
|
|
||||||
|
let mut group_by_table_id: HashMap<u32, Vec<_>> = HashMap::new();
|
||||||
|
for r in reqs.requests {
|
||||||
|
let tid = TableId::from(r.table_id);
|
||||||
|
let entry = group_by_table_id.entry(tid).or_default();
|
||||||
|
entry.extend(r.dirty_time_ranges);
|
||||||
|
}
|
||||||
|
let tids = group_by_table_id.keys().cloned().collect::<Vec<TableId>>();
|
||||||
|
let table_infos =
|
||||||
|
table_info_mgr
|
||||||
|
.batch_get(&tids)
|
||||||
|
.await
|
||||||
|
.with_context(|_| TableNotFoundMetaSnafu {
|
||||||
|
msg: format!("Failed to get table info for table ids: {:?}", tids),
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let group_by_table_name = group_by_table_id
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(|(id, rows)| {
|
||||||
|
let table_name = table_infos.get(&id).map(|info| info.table_name());
|
||||||
|
let Some(table_name) = table_name else {
|
||||||
|
warn!("Failed to get table infos for table id: {:?}", id);
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
let table_name = [
|
||||||
|
table_name.catalog_name,
|
||||||
|
table_name.schema_name,
|
||||||
|
table_name.table_name,
|
||||||
|
];
|
||||||
|
let schema = &table_infos.get(&id).unwrap().table_info.meta.schema;
|
||||||
|
let time_index_unit = schema.column_schemas[schema.timestamp_index.unwrap()]
|
||||||
|
.data_type
|
||||||
|
.as_timestamp()
|
||||||
|
.unwrap()
|
||||||
|
.unit();
|
||||||
|
Some((table_name, (rows, time_index_unit)))
|
||||||
|
})
|
||||||
|
.collect::<HashMap<_, _>>();
|
||||||
|
|
||||||
|
let group_by_table_name = Arc::new(group_by_table_name);
|
||||||
|
|
||||||
|
let mut handles = Vec::new();
|
||||||
|
let tasks = self.tasks.read().await;
|
||||||
|
|
||||||
|
for (_flow_id, task) in tasks.iter() {
|
||||||
|
let src_table_names = &task.config.source_table_names;
|
||||||
|
|
||||||
|
if src_table_names
|
||||||
|
.iter()
|
||||||
|
.all(|name| !group_by_table_name.contains_key(name))
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let group_by_table_name = group_by_table_name.clone();
|
||||||
|
let task = task.clone();
|
||||||
|
|
||||||
|
let handle: JoinHandle<Result<(), Error>> = tokio::spawn(async move {
|
||||||
|
let src_table_names = &task.config.source_table_names;
|
||||||
|
let mut all_dirty_windows = vec![];
|
||||||
|
for src_table_name in src_table_names {
|
||||||
|
if let Some((window_ranges, unit)) = group_by_table_name.get(src_table_name) {
|
||||||
|
let Some(expr) = &task.config.time_window_expr else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
for window in window_ranges {
|
||||||
|
let align_start = expr
|
||||||
|
.eval(common_time::Timestamp::new(window.start_value, *unit))?
|
||||||
|
.0
|
||||||
|
.context(UnexpectedSnafu {
|
||||||
|
reason: "Failed to eval start value",
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let align_end = expr
|
||||||
|
.eval(common_time::Timestamp::new(window.end_value, *unit))?
|
||||||
|
.1
|
||||||
|
.context(UnexpectedSnafu {
|
||||||
|
reason: "Failed to eval end value",
|
||||||
|
})?;
|
||||||
|
all_dirty_windows.push((align_start, align_end));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let mut state = task.state.write().unwrap();
|
||||||
|
let flow_id_label = task.config.flow_id.to_string();
|
||||||
|
for (s, e) in all_dirty_windows {
|
||||||
|
METRIC_FLOW_BATCHING_ENGINE_BULK_MARK_TIME_WINDOW_RANGE
|
||||||
|
.with_label_values(&[&flow_id_label])
|
||||||
|
.observe(e.sub(&s).unwrap_or_default().num_seconds() as f64);
|
||||||
|
state.dirty_time_windows.add_window(s, Some(e));
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
});
|
||||||
|
handles.push(handle);
|
||||||
|
}
|
||||||
|
drop(tasks);
|
||||||
|
for handle in handles {
|
||||||
|
match handle.await {
|
||||||
|
Err(e) => {
|
||||||
|
warn!("Failed to handle inserts: {e}");
|
||||||
|
}
|
||||||
|
Ok(Ok(())) => (),
|
||||||
|
Ok(Err(e)) => {
|
||||||
|
warn!("Failed to handle inserts: {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Default::default())
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn handle_inserts_inner(
|
pub async fn handle_inserts_inner(
|
||||||
&self,
|
&self,
|
||||||
request: api::v1::region::InsertRequests,
|
request: api::v1::region::InsertRequests,
|
||||||
@@ -388,6 +505,20 @@ impl BatchingEngine {
|
|||||||
pub async fn flow_exist_inner(&self, flow_id: FlowId) -> bool {
|
pub async fn flow_exist_inner(&self, flow_id: FlowId) -> bool {
|
||||||
self.tasks.read().await.contains_key(&flow_id)
|
self.tasks.read().await.contains_key(&flow_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn adjust_flow(
|
||||||
|
&self,
|
||||||
|
flow_id: FlowId,
|
||||||
|
min_run_interval_secs: u64,
|
||||||
|
max_filter_num_per_query: usize,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let task = self.tasks.read().await.get(&flow_id).cloned();
|
||||||
|
let task = task.with_context(|| FlowNotFoundSnafu { id: flow_id })?;
|
||||||
|
debug!("Adjusting flow {flow_id} with min_run_interval_secs={} and max_filter_num_per_query={}", min_run_interval_secs, max_filter_num_per_query);
|
||||||
|
task.adjust(min_run_interval_secs, max_filter_num_per_query);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FlowEngine for BatchingEngine {
|
impl FlowEngine for BatchingEngine {
|
||||||
|
|||||||
@@ -14,8 +14,9 @@
|
|||||||
|
|
||||||
//! Frontend client to run flow as batching task which is time-window-aware normal query triggered every tick set by user
|
//! Frontend client to run flow as batching task which is time-window-aware normal query triggered every tick set by user
|
||||||
|
|
||||||
use std::sync::{Arc, Weak};
|
use std::collections::HashMap;
|
||||||
use std::time::SystemTime;
|
use std::sync::{Arc, Mutex, Weak};
|
||||||
|
use std::time::{Duration, Instant, SystemTime};
|
||||||
|
|
||||||
use api::v1::greptime_request::Request;
|
use api::v1::greptime_request::Request;
|
||||||
use api::v1::CreateTableExpr;
|
use api::v1::CreateTableExpr;
|
||||||
@@ -26,20 +27,21 @@ use common_meta::cluster::{NodeInfo, NodeInfoKey, Role};
|
|||||||
use common_meta::peer::Peer;
|
use common_meta::peer::Peer;
|
||||||
use common_meta::rpc::store::RangeRequest;
|
use common_meta::rpc::store::RangeRequest;
|
||||||
use common_query::Output;
|
use common_query::Output;
|
||||||
use common_telemetry::warn;
|
use common_telemetry::{debug, warn};
|
||||||
|
use itertools::Itertools;
|
||||||
use meta_client::client::MetaClient;
|
use meta_client::client::MetaClient;
|
||||||
use rand::rng;
|
|
||||||
use rand::seq::SliceRandom;
|
|
||||||
use servers::query_handler::grpc::GrpcQueryHandler;
|
use servers::query_handler::grpc::GrpcQueryHandler;
|
||||||
use session::context::{QueryContextBuilder, QueryContextRef};
|
use session::context::{QueryContextBuilder, QueryContextRef};
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
|
||||||
|
use crate::batching_mode::task::BatchingTask;
|
||||||
use crate::batching_mode::{
|
use crate::batching_mode::{
|
||||||
DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT, FRONTEND_ACTIVITY_TIMEOUT, GRPC_CONN_TIMEOUT,
|
DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT, FRONTEND_ACTIVITY_TIMEOUT, GRPC_CONN_TIMEOUT,
|
||||||
GRPC_MAX_RETRIES,
|
GRPC_MAX_RETRIES,
|
||||||
};
|
};
|
||||||
use crate::error::{ExternalSnafu, InvalidRequestSnafu, NoAvailableFrontendSnafu, UnexpectedSnafu};
|
use crate::error::{ExternalSnafu, InvalidRequestSnafu, NoAvailableFrontendSnafu, UnexpectedSnafu};
|
||||||
use crate::{Error, FlowAuthHeader};
|
use crate::metrics::METRIC_FLOW_BATCHING_ENGINE_GUESS_FE_LOAD;
|
||||||
|
use crate::{Error, FlowAuthHeader, FlowId};
|
||||||
|
|
||||||
/// Just like [`GrpcQueryHandler`] but use BoxedError
|
/// Just like [`GrpcQueryHandler`] but use BoxedError
|
||||||
///
|
///
|
||||||
@@ -74,6 +76,105 @@ impl<
|
|||||||
|
|
||||||
type HandlerMutable = Arc<std::sync::Mutex<Option<Weak<dyn GrpcQueryHandlerWithBoxedError>>>>;
|
type HandlerMutable = Arc<std::sync::Mutex<Option<Weak<dyn GrpcQueryHandlerWithBoxedError>>>>;
|
||||||
|
|
||||||
|
/// Statistics about running query on this frontend from flownode
|
||||||
|
#[derive(Debug, Default, Clone)]
|
||||||
|
struct FrontendStat {
|
||||||
|
/// The query for flow id has been running since this timestamp
|
||||||
|
since: HashMap<FlowId, Instant>,
|
||||||
|
/// The average query time for each flow id
|
||||||
|
/// This is used to calculate the average query time for each flow id
|
||||||
|
past_query_avg: HashMap<FlowId, (usize, Duration)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Default, Clone)]
|
||||||
|
pub struct FrontendStats {
|
||||||
|
/// The statistics for each flow id
|
||||||
|
stats: Arc<Mutex<HashMap<String, FrontendStat>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FrontendStats {
|
||||||
|
pub fn observe(&self, frontend_addr: &str, flow_id: FlowId) -> FrontendStatsGuard {
|
||||||
|
let mut stats = self.stats.lock().expect("Failed to lock frontend stats");
|
||||||
|
let stat = stats.entry(frontend_addr.to_string()).or_default();
|
||||||
|
stat.since.insert(flow_id, Instant::now());
|
||||||
|
|
||||||
|
FrontendStatsGuard {
|
||||||
|
stats: self.stats.clone(),
|
||||||
|
frontend_addr: frontend_addr.to_string(),
|
||||||
|
cur: flow_id,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// return frontend addrs sorted by load, from lightest to heaviest
|
||||||
|
/// The load is calculated as the total average query time for each flow id plus running query's total running time elapsed
|
||||||
|
pub fn sort_by_load(&self) -> Vec<String> {
|
||||||
|
let stats = self.stats.lock().expect("Failed to lock frontend stats");
|
||||||
|
let fe_load_factor = stats
|
||||||
|
.iter()
|
||||||
|
.map(|(node_addr, stat)| {
|
||||||
|
// total expected avg running time for all currently running queries
|
||||||
|
let total_expect_avg_run_time = stat
|
||||||
|
.since
|
||||||
|
.keys()
|
||||||
|
.map(|f| {
|
||||||
|
let (count, total_duration) =
|
||||||
|
stat.past_query_avg.get(f).unwrap_or(&(0, Duration::ZERO));
|
||||||
|
if *count == 0 {
|
||||||
|
0.0
|
||||||
|
} else {
|
||||||
|
total_duration.as_secs_f64() / *count as f64
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.sum::<f64>();
|
||||||
|
let total_cur_running_time = stat
|
||||||
|
.since
|
||||||
|
.values()
|
||||||
|
.map(|since| since.elapsed().as_secs_f64())
|
||||||
|
.sum::<f64>();
|
||||||
|
(
|
||||||
|
node_addr.to_string(),
|
||||||
|
total_expect_avg_run_time + total_cur_running_time,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.sorted_by(|(_, load_a), (_, load_b)| {
|
||||||
|
load_a
|
||||||
|
.partial_cmp(load_b)
|
||||||
|
.unwrap_or(std::cmp::Ordering::Equal)
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
debug!("Frontend load factor: {:?}", fe_load_factor);
|
||||||
|
for (node_addr, load) in &fe_load_factor {
|
||||||
|
METRIC_FLOW_BATCHING_ENGINE_GUESS_FE_LOAD
|
||||||
|
.with_label_values(&[&node_addr.to_string()])
|
||||||
|
.observe(*load);
|
||||||
|
}
|
||||||
|
fe_load_factor
|
||||||
|
.into_iter()
|
||||||
|
.map(|(addr, _)| addr)
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct FrontendStatsGuard {
|
||||||
|
stats: Arc<Mutex<HashMap<String, FrontendStat>>>,
|
||||||
|
frontend_addr: String,
|
||||||
|
cur: FlowId,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for FrontendStatsGuard {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
let mut stats = self.stats.lock().expect("Failed to lock frontend stats");
|
||||||
|
if let Some(stat) = stats.get_mut(&self.frontend_addr) {
|
||||||
|
if let Some(since) = stat.since.remove(&self.cur) {
|
||||||
|
let elapsed = since.elapsed();
|
||||||
|
let (count, total_duration) = stat.past_query_avg.entry(self.cur).or_default();
|
||||||
|
*count += 1;
|
||||||
|
*total_duration += elapsed;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// A simple frontend client able to execute sql using grpc protocol
|
/// A simple frontend client able to execute sql using grpc protocol
|
||||||
///
|
///
|
||||||
/// This is for computation-heavy query which need to offload computation to frontend, lifting the load from flownode
|
/// This is for computation-heavy query which need to offload computation to frontend, lifting the load from flownode
|
||||||
@@ -83,6 +184,7 @@ pub enum FrontendClient {
|
|||||||
meta_client: Arc<MetaClient>,
|
meta_client: Arc<MetaClient>,
|
||||||
chnl_mgr: ChannelManager,
|
chnl_mgr: ChannelManager,
|
||||||
auth: Option<FlowAuthHeader>,
|
auth: Option<FlowAuthHeader>,
|
||||||
|
fe_stats: FrontendStats,
|
||||||
},
|
},
|
||||||
Standalone {
|
Standalone {
|
||||||
/// for the sake of simplicity still use grpc even in standalone mode
|
/// for the sake of simplicity still use grpc even in standalone mode
|
||||||
@@ -114,6 +216,7 @@ impl FrontendClient {
|
|||||||
ChannelManager::with_config(cfg)
|
ChannelManager::with_config(cfg)
|
||||||
},
|
},
|
||||||
auth,
|
auth,
|
||||||
|
fe_stats: Default::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -192,6 +295,7 @@ impl FrontendClient {
|
|||||||
meta_client: _,
|
meta_client: _,
|
||||||
chnl_mgr,
|
chnl_mgr,
|
||||||
auth,
|
auth,
|
||||||
|
fe_stats,
|
||||||
} = self
|
} = self
|
||||||
else {
|
else {
|
||||||
return UnexpectedSnafu {
|
return UnexpectedSnafu {
|
||||||
@@ -208,8 +312,21 @@ impl FrontendClient {
|
|||||||
.duration_since(SystemTime::UNIX_EPOCH)
|
.duration_since(SystemTime::UNIX_EPOCH)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.as_millis() as i64;
|
.as_millis() as i64;
|
||||||
// shuffle the frontends to avoid always pick the same one
|
let node_addrs_by_load = fe_stats.sort_by_load();
|
||||||
frontends.shuffle(&mut rng());
|
// index+1 to load order asc, so that the lightest node has load 1 and non-existent node has load 0
|
||||||
|
let addr2load = node_addrs_by_load
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(i, id)| (id.clone(), i + 1))
|
||||||
|
.collect::<HashMap<_, _>>();
|
||||||
|
// sort frontends by load, from lightest to heaviest
|
||||||
|
frontends.sort_by(|(_, a), (_, b)| {
|
||||||
|
// if not even in stats, treat as 0 load since never been queried
|
||||||
|
let load_a = addr2load.get(&a.peer.addr).unwrap_or(&0);
|
||||||
|
let load_b = addr2load.get(&b.peer.addr).unwrap_or(&0);
|
||||||
|
load_a.cmp(load_b)
|
||||||
|
});
|
||||||
|
debug!("Frontend nodes sorted by load: {:?}", frontends);
|
||||||
|
|
||||||
// found node with maximum last_activity_ts
|
// found node with maximum last_activity_ts
|
||||||
for (_, node_info) in frontends
|
for (_, node_info) in frontends
|
||||||
@@ -257,6 +374,7 @@ impl FrontendClient {
|
|||||||
create: CreateTableExpr,
|
create: CreateTableExpr,
|
||||||
catalog: &str,
|
catalog: &str,
|
||||||
schema: &str,
|
schema: &str,
|
||||||
|
task: Option<&BatchingTask>,
|
||||||
) -> Result<u32, Error> {
|
) -> Result<u32, Error> {
|
||||||
self.handle(
|
self.handle(
|
||||||
Request::Ddl(api::v1::DdlRequest {
|
Request::Ddl(api::v1::DdlRequest {
|
||||||
@@ -265,6 +383,7 @@ impl FrontendClient {
|
|||||||
catalog,
|
catalog,
|
||||||
schema,
|
schema,
|
||||||
&mut None,
|
&mut None,
|
||||||
|
task,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
@@ -276,15 +395,19 @@ impl FrontendClient {
|
|||||||
catalog: &str,
|
catalog: &str,
|
||||||
schema: &str,
|
schema: &str,
|
||||||
peer_desc: &mut Option<PeerDesc>,
|
peer_desc: &mut Option<PeerDesc>,
|
||||||
|
task: Option<&BatchingTask>,
|
||||||
) -> Result<u32, Error> {
|
) -> Result<u32, Error> {
|
||||||
match self {
|
match self {
|
||||||
FrontendClient::Distributed { .. } => {
|
FrontendClient::Distributed { fe_stats, .. } => {
|
||||||
let db = self.get_random_active_frontend(catalog, schema).await?;
|
let db = self.get_random_active_frontend(catalog, schema).await?;
|
||||||
|
|
||||||
*peer_desc = Some(PeerDesc::Dist {
|
*peer_desc = Some(PeerDesc::Dist {
|
||||||
peer: db.peer.clone(),
|
peer: db.peer.clone(),
|
||||||
});
|
});
|
||||||
|
|
||||||
|
let flow_id = task.map(|t| t.config.flow_id).unwrap_or_default();
|
||||||
|
let _guard = fe_stats.observe(&db.peer.addr, flow_id);
|
||||||
|
|
||||||
db.database
|
db.database
|
||||||
.handle_with_retry(req.clone(), GRPC_MAX_RETRIES)
|
.handle_with_retry(req.clone(), GRPC_MAX_RETRIES)
|
||||||
.await
|
.await
|
||||||
|
|||||||
@@ -31,7 +31,9 @@ use crate::batching_mode::time_window::TimeWindowExpr;
|
|||||||
use crate::batching_mode::MIN_REFRESH_DURATION;
|
use crate::batching_mode::MIN_REFRESH_DURATION;
|
||||||
use crate::error::{DatatypesSnafu, InternalSnafu, TimeSnafu, UnexpectedSnafu};
|
use crate::error::{DatatypesSnafu, InternalSnafu, TimeSnafu, UnexpectedSnafu};
|
||||||
use crate::metrics::{
|
use crate::metrics::{
|
||||||
METRIC_FLOW_BATCHING_ENGINE_QUERY_TIME_RANGE, METRIC_FLOW_BATCHING_ENGINE_QUERY_WINDOW_CNT,
|
METRIC_FLOW_BATCHING_ENGINE_QUERY_WINDOW_CNT, METRIC_FLOW_BATCHING_ENGINE_QUERY_WINDOW_SIZE,
|
||||||
|
METRIC_FLOW_BATCHING_ENGINE_STALLED_QUERY_WINDOW_CNT,
|
||||||
|
METRIC_FLOW_BATCHING_ENGINE_STALLED_WINDOW_SIZE,
|
||||||
};
|
};
|
||||||
use crate::{Error, FlowId};
|
use crate::{Error, FlowId};
|
||||||
|
|
||||||
@@ -52,6 +54,11 @@ pub struct TaskState {
|
|||||||
pub(crate) shutdown_rx: oneshot::Receiver<()>,
|
pub(crate) shutdown_rx: oneshot::Receiver<()>,
|
||||||
/// Task handle
|
/// Task handle
|
||||||
pub(crate) task_handle: Option<tokio::task::JoinHandle<()>>,
|
pub(crate) task_handle: Option<tokio::task::JoinHandle<()>>,
|
||||||
|
|
||||||
|
/// min run interval in seconds
|
||||||
|
pub(crate) min_run_interval: Option<u64>,
|
||||||
|
/// max filter number per query
|
||||||
|
pub(crate) max_filter_num: Option<usize>,
|
||||||
}
|
}
|
||||||
impl TaskState {
|
impl TaskState {
|
||||||
pub fn new(query_ctx: QueryContextRef, shutdown_rx: oneshot::Receiver<()>) -> Self {
|
pub fn new(query_ctx: QueryContextRef, shutdown_rx: oneshot::Receiver<()>) -> Self {
|
||||||
@@ -63,6 +70,8 @@ impl TaskState {
|
|||||||
exec_state: ExecState::Idle,
|
exec_state: ExecState::Idle,
|
||||||
shutdown_rx,
|
shutdown_rx,
|
||||||
task_handle: None,
|
task_handle: None,
|
||||||
|
min_run_interval: None,
|
||||||
|
max_filter_num: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -86,24 +95,21 @@ impl TaskState {
|
|||||||
/// TODO: Make this behavior configurable.
|
/// TODO: Make this behavior configurable.
|
||||||
pub fn get_next_start_query_time(
|
pub fn get_next_start_query_time(
|
||||||
&self,
|
&self,
|
||||||
flow_id: FlowId,
|
_flow_id: FlowId,
|
||||||
time_window_size: &Option<Duration>,
|
_time_window_size: &Option<Duration>,
|
||||||
max_timeout: Option<Duration>,
|
max_timeout: Option<Duration>,
|
||||||
) -> Instant {
|
) -> Instant {
|
||||||
let last_duration = max_timeout
|
let next_duration = max_timeout
|
||||||
.unwrap_or(self.last_query_duration)
|
.unwrap_or(self.last_query_duration)
|
||||||
.min(self.last_query_duration)
|
.min(self.last_query_duration)
|
||||||
.max(MIN_REFRESH_DURATION);
|
.max(
|
||||||
|
self.min_run_interval
|
||||||
let next_duration = time_window_size
|
.map(Duration::from_secs)
|
||||||
.map(|t| {
|
.unwrap_or(MIN_REFRESH_DURATION),
|
||||||
let half = t / 2;
|
);
|
||||||
half.max(last_duration)
|
|
||||||
})
|
|
||||||
.unwrap_or(last_duration);
|
|
||||||
|
|
||||||
// if have dirty time window, execute immediately to clean dirty time window
|
// if have dirty time window, execute immediately to clean dirty time window
|
||||||
if self.dirty_time_windows.windows.is_empty() {
|
/*if self.dirty_time_windows.windows.is_empty() {
|
||||||
self.last_update_time + next_duration
|
self.last_update_time + next_duration
|
||||||
} else {
|
} else {
|
||||||
debug!(
|
debug!(
|
||||||
@@ -113,7 +119,10 @@ impl TaskState {
|
|||||||
self.dirty_time_windows.windows
|
self.dirty_time_windows.windows
|
||||||
);
|
);
|
||||||
Instant::now()
|
Instant::now()
|
||||||
}
|
}*/
|
||||||
|
|
||||||
|
// wait for next duration anyway
|
||||||
|
self.last_update_time + next_duration
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -206,47 +215,63 @@ impl DirtyTimeWindows {
|
|||||||
|
|
||||||
// get the first `window_cnt` time windows
|
// get the first `window_cnt` time windows
|
||||||
let max_time_range = window_size * window_cnt as i32;
|
let max_time_range = window_size * window_cnt as i32;
|
||||||
let nth = {
|
|
||||||
let mut cur_time_range = chrono::Duration::zero();
|
|
||||||
let mut nth_key = None;
|
|
||||||
for (idx, (start, end)) in self.windows.iter().enumerate() {
|
|
||||||
// if time range is too long, stop
|
|
||||||
if cur_time_range > max_time_range {
|
|
||||||
nth_key = Some(*start);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// if we have enough time windows, stop
|
let mut to_be_query = BTreeMap::new();
|
||||||
if idx >= window_cnt {
|
let mut new_windows = self.windows.clone();
|
||||||
nth_key = Some(*start);
|
let mut cur_time_range = chrono::Duration::zero();
|
||||||
break;
|
for (idx, (start, end)) in self.windows.iter().enumerate() {
|
||||||
}
|
let first_end = start
|
||||||
|
.add_duration(window_size.to_std().unwrap())
|
||||||
|
.context(TimeSnafu)?;
|
||||||
|
let end = end.unwrap_or(first_end);
|
||||||
|
|
||||||
if let Some(end) = end {
|
// if time range is too long, stop
|
||||||
if let Some(x) = end.sub(start) {
|
if cur_time_range >= max_time_range {
|
||||||
cur_time_range += x;
|
break;
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
nth_key
|
// if we have enough time windows, stop
|
||||||
};
|
if idx >= window_cnt {
|
||||||
let first_nth = {
|
break;
|
||||||
if let Some(nth) = nth {
|
|
||||||
let mut after = self.windows.split_off(&nth);
|
|
||||||
std::mem::swap(&mut self.windows, &mut after);
|
|
||||||
|
|
||||||
after
|
|
||||||
} else {
|
|
||||||
std::mem::take(&mut self.windows)
|
|
||||||
}
|
}
|
||||||
};
|
|
||||||
|
if let Some(x) = end.sub(start) {
|
||||||
|
if cur_time_range + x <= max_time_range {
|
||||||
|
to_be_query.insert(*start, Some(end));
|
||||||
|
new_windows.remove(start);
|
||||||
|
cur_time_range += x;
|
||||||
|
} else {
|
||||||
|
// too large a window, split it
|
||||||
|
// split at window_size * times
|
||||||
|
let surplus = max_time_range - cur_time_range;
|
||||||
|
let times = surplus.num_seconds() / window_size.num_seconds();
|
||||||
|
|
||||||
|
let split_offset = window_size * times as i32;
|
||||||
|
let split_at = start
|
||||||
|
.add_duration(split_offset.to_std().unwrap())
|
||||||
|
.context(TimeSnafu)?;
|
||||||
|
to_be_query.insert(*start, Some(split_at));
|
||||||
|
|
||||||
|
// remove the original window
|
||||||
|
new_windows.remove(start);
|
||||||
|
new_windows.insert(split_at, Some(end));
|
||||||
|
cur_time_range += split_offset;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self.windows = new_windows;
|
||||||
|
|
||||||
METRIC_FLOW_BATCHING_ENGINE_QUERY_WINDOW_CNT
|
METRIC_FLOW_BATCHING_ENGINE_QUERY_WINDOW_CNT
|
||||||
.with_label_values(&[flow_id.to_string().as_str()])
|
.with_label_values(&[flow_id.to_string().as_str()])
|
||||||
.observe(first_nth.len() as f64);
|
.observe(to_be_query.len() as f64);
|
||||||
|
|
||||||
let full_time_range = first_nth
|
METRIC_FLOW_BATCHING_ENGINE_STALLED_QUERY_WINDOW_CNT
|
||||||
|
.with_label_values(&[flow_id.to_string().as_str()])
|
||||||
|
.observe(self.windows.len() as f64);
|
||||||
|
|
||||||
|
let full_time_range = to_be_query
|
||||||
.iter()
|
.iter()
|
||||||
.fold(chrono::Duration::zero(), |acc, (start, end)| {
|
.fold(chrono::Duration::zero(), |acc, (start, end)| {
|
||||||
if let Some(end) = end {
|
if let Some(end) = end {
|
||||||
@@ -256,12 +281,27 @@ impl DirtyTimeWindows {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
.num_seconds() as f64;
|
.num_seconds() as f64;
|
||||||
METRIC_FLOW_BATCHING_ENGINE_QUERY_TIME_RANGE
|
METRIC_FLOW_BATCHING_ENGINE_QUERY_WINDOW_SIZE
|
||||||
.with_label_values(&[flow_id.to_string().as_str()])
|
.with_label_values(&[flow_id.to_string().as_str()])
|
||||||
.observe(full_time_range);
|
.observe(full_time_range);
|
||||||
|
|
||||||
|
let stalled_time_range =
|
||||||
|
self.windows
|
||||||
|
.iter()
|
||||||
|
.fold(chrono::Duration::zero(), |acc, (start, end)| {
|
||||||
|
if let Some(end) = end {
|
||||||
|
acc + end.sub(start).unwrap_or(chrono::Duration::zero())
|
||||||
|
} else {
|
||||||
|
acc
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
METRIC_FLOW_BATCHING_ENGINE_STALLED_WINDOW_SIZE
|
||||||
|
.with_label_values(&[flow_id.to_string().as_str()])
|
||||||
|
.observe(stalled_time_range.num_seconds() as f64);
|
||||||
|
|
||||||
let mut expr_lst = vec![];
|
let mut expr_lst = vec![];
|
||||||
for (start, end) in first_nth.into_iter() {
|
for (start, end) in to_be_query.into_iter() {
|
||||||
// align using time window exprs
|
// align using time window exprs
|
||||||
let (start, end) = if let Some(ctx) = task_ctx {
|
let (start, end) = if let Some(ctx) = task_ctx {
|
||||||
let Some(time_window_expr) = &ctx.config.time_window_expr else {
|
let Some(time_window_expr) = &ctx.config.time_window_expr else {
|
||||||
@@ -495,6 +535,64 @@ mod test {
|
|||||||
"((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts < CAST('1970-01-01 00:00:21' AS TIMESTAMP)))",
|
"((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts < CAST('1970-01-01 00:00:21' AS TIMESTAMP)))",
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
|
// split range
|
||||||
|
(
|
||||||
|
Vec::from_iter((0..20).map(|i|Timestamp::new_second(i*3)).chain(std::iter::once(
|
||||||
|
Timestamp::new_second(60 + 3 * (DirtyTimeWindows::MERGE_DIST as i64 + 1)),
|
||||||
|
))),
|
||||||
|
(chrono::Duration::seconds(3), None),
|
||||||
|
BTreeMap::from([
|
||||||
|
(
|
||||||
|
Timestamp::new_second(0),
|
||||||
|
Some(Timestamp::new_second(
|
||||||
|
60
|
||||||
|
)),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
Timestamp::new_second(60 + 3 * (DirtyTimeWindows::MERGE_DIST as i64 + 1)),
|
||||||
|
Some(Timestamp::new_second(
|
||||||
|
60 + 3 * (DirtyTimeWindows::MERGE_DIST as i64 + 1) + 3
|
||||||
|
)),
|
||||||
|
)]),
|
||||||
|
Some(
|
||||||
|
"((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts < CAST('1970-01-01 00:01:00' AS TIMESTAMP)))",
|
||||||
|
)
|
||||||
|
),
|
||||||
|
// split 2 min into 1 min
|
||||||
|
(
|
||||||
|
Vec::from_iter((0..40).map(|i|Timestamp::new_second(i*3))),
|
||||||
|
(chrono::Duration::seconds(3), None),
|
||||||
|
BTreeMap::from([
|
||||||
|
(
|
||||||
|
Timestamp::new_second(0),
|
||||||
|
Some(Timestamp::new_second(
|
||||||
|
40 * 3
|
||||||
|
)),
|
||||||
|
)]),
|
||||||
|
Some(
|
||||||
|
"((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts < CAST('1970-01-01 00:01:00' AS TIMESTAMP)))",
|
||||||
|
)
|
||||||
|
),
|
||||||
|
// split 3s + 1min into 3s + 57s
|
||||||
|
(
|
||||||
|
Vec::from_iter(std::iter::once(Timestamp::new_second(0)).chain((0..40).map(|i|Timestamp::new_second(20+i*3)))),
|
||||||
|
(chrono::Duration::seconds(3), None),
|
||||||
|
BTreeMap::from([
|
||||||
|
(
|
||||||
|
Timestamp::new_second(0),
|
||||||
|
Some(Timestamp::new_second(
|
||||||
|
3
|
||||||
|
)),
|
||||||
|
),(
|
||||||
|
Timestamp::new_second(20),
|
||||||
|
Some(Timestamp::new_second(
|
||||||
|
140
|
||||||
|
)),
|
||||||
|
)]),
|
||||||
|
Some(
|
||||||
|
"(((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts < CAST('1970-01-01 00:00:03' AS TIMESTAMP))) OR ((ts >= CAST('1970-01-01 00:00:20' AS TIMESTAMP)) AND (ts < CAST('1970-01-01 00:01:17' AS TIMESTAMP))))",
|
||||||
|
)
|
||||||
|
),
|
||||||
// expired
|
// expired
|
||||||
(
|
(
|
||||||
vec![
|
vec![
|
||||||
@@ -511,6 +609,8 @@ mod test {
|
|||||||
None
|
None
|
||||||
),
|
),
|
||||||
];
|
];
|
||||||
|
// let len = testcases.len();
|
||||||
|
// let testcases = testcases[(len - 2)..(len - 1)].to_vec();
|
||||||
for (lower_bounds, (window_size, expire_lower_bound), expected, expected_filter_expr) in
|
for (lower_bounds, (window_size, expire_lower_bound), expected, expected_filter_expr) in
|
||||||
testcases
|
testcases
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -61,7 +61,9 @@ use crate::error::{
|
|||||||
SubstraitEncodeLogicalPlanSnafu, UnexpectedSnafu,
|
SubstraitEncodeLogicalPlanSnafu, UnexpectedSnafu,
|
||||||
};
|
};
|
||||||
use crate::metrics::{
|
use crate::metrics::{
|
||||||
METRIC_FLOW_BATCHING_ENGINE_QUERY_TIME, METRIC_FLOW_BATCHING_ENGINE_SLOW_QUERY,
|
METRIC_FLOW_BATCHING_ENGINE_ERROR_CNT, METRIC_FLOW_BATCHING_ENGINE_QUERY_TIME,
|
||||||
|
METRIC_FLOW_BATCHING_ENGINE_SLOW_QUERY, METRIC_FLOW_BATCHING_ENGINE_START_QUERY_CNT,
|
||||||
|
METRIC_FLOW_ROWS,
|
||||||
};
|
};
|
||||||
use crate::{Error, FlowId};
|
use crate::{Error, FlowId};
|
||||||
|
|
||||||
@@ -144,6 +146,12 @@ impl BatchingTask {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn adjust(&self, min_run_interval_secs: u64, max_filter_num_per_query: usize) {
|
||||||
|
let mut state = self.state.write().unwrap();
|
||||||
|
state.min_run_interval = Some(min_run_interval_secs);
|
||||||
|
state.max_filter_num = Some(max_filter_num_per_query);
|
||||||
|
}
|
||||||
|
|
||||||
/// mark time window range (now - expire_after, now) as dirty (or (0, now) if expire_after not set)
|
/// mark time window range (now - expire_after, now) as dirty (or (0, now) if expire_after not set)
|
||||||
///
|
///
|
||||||
/// useful for flush_flow to flush dirty time windows range
|
/// useful for flush_flow to flush dirty time windows range
|
||||||
@@ -280,7 +288,7 @@ impl BatchingTask {
|
|||||||
let catalog = &self.config.sink_table_name[0];
|
let catalog = &self.config.sink_table_name[0];
|
||||||
let schema = &self.config.sink_table_name[1];
|
let schema = &self.config.sink_table_name[1];
|
||||||
frontend_client
|
frontend_client
|
||||||
.create(expr.clone(), catalog, schema)
|
.create(expr.clone(), catalog, schema, Some(self))
|
||||||
.await?;
|
.await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -361,7 +369,7 @@ impl BatchingTask {
|
|||||||
};
|
};
|
||||||
|
|
||||||
frontend_client
|
frontend_client
|
||||||
.handle(req, catalog, schema, &mut peer_desc)
|
.handle(req, catalog, schema, &mut peer_desc, Some(self))
|
||||||
.await
|
.await
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -371,6 +379,9 @@ impl BatchingTask {
|
|||||||
"Flow {flow_id} executed, affected_rows: {affected_rows:?}, elapsed: {:?}",
|
"Flow {flow_id} executed, affected_rows: {affected_rows:?}, elapsed: {:?}",
|
||||||
elapsed
|
elapsed
|
||||||
);
|
);
|
||||||
|
METRIC_FLOW_ROWS
|
||||||
|
.with_label_values(&[format!("{}-out-batching", flow_id).as_str()])
|
||||||
|
.inc_by(*affected_rows as _);
|
||||||
} else if let Err(err) = &res {
|
} else if let Err(err) = &res {
|
||||||
warn!(
|
warn!(
|
||||||
"Failed to execute Flow {flow_id} on frontend {:?}, result: {err:?}, elapsed: {:?} with query: {}",
|
"Failed to execute Flow {flow_id} on frontend {:?}, result: {err:?}, elapsed: {:?} with query: {}",
|
||||||
@@ -410,6 +421,7 @@ impl BatchingTask {
|
|||||||
engine: QueryEngineRef,
|
engine: QueryEngineRef,
|
||||||
frontend_client: Arc<FrontendClient>,
|
frontend_client: Arc<FrontendClient>,
|
||||||
) {
|
) {
|
||||||
|
let flow_id_str = self.config.flow_id.to_string();
|
||||||
loop {
|
loop {
|
||||||
// first check if shutdown signal is received
|
// first check if shutdown signal is received
|
||||||
// if so, break the loop
|
// if so, break the loop
|
||||||
@@ -427,6 +439,9 @@ impl BatchingTask {
|
|||||||
Err(TryRecvError::Empty) => (),
|
Err(TryRecvError::Empty) => (),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
METRIC_FLOW_BATCHING_ENGINE_START_QUERY_CNT
|
||||||
|
.with_label_values(&[&flow_id_str])
|
||||||
|
.inc();
|
||||||
|
|
||||||
let new_query = match self.gen_insert_plan(&engine).await {
|
let new_query = match self.gen_insert_plan(&engine).await {
|
||||||
Ok(new_query) => new_query,
|
Ok(new_query) => new_query,
|
||||||
@@ -473,6 +488,9 @@ impl BatchingTask {
|
|||||||
}
|
}
|
||||||
// TODO(discord9): this error should have better place to go, but for now just print error, also more context is needed
|
// TODO(discord9): this error should have better place to go, but for now just print error, also more context is needed
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
|
METRIC_FLOW_BATCHING_ENGINE_ERROR_CNT
|
||||||
|
.with_label_values(&[&flow_id_str])
|
||||||
|
.inc();
|
||||||
match new_query {
|
match new_query {
|
||||||
Some(query) => {
|
Some(query) => {
|
||||||
common_telemetry::error!(err; "Failed to execute query for flow={} with query: {query}", self.config.flow_id)
|
common_telemetry::error!(err; "Failed to execute query for flow={} with query: {query}", self.config.flow_id)
|
||||||
@@ -580,19 +598,20 @@ impl BatchingTask {
|
|||||||
),
|
),
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let expr = self
|
let expr = {
|
||||||
.state
|
let mut state = self.state.write().unwrap();
|
||||||
.write()
|
let max_window_cnt = state
|
||||||
.unwrap()
|
.max_filter_num
|
||||||
.dirty_time_windows
|
.unwrap_or(DirtyTimeWindows::MAX_FILTER_NUM);
|
||||||
.gen_filter_exprs(
|
state.dirty_time_windows.gen_filter_exprs(
|
||||||
&col_name,
|
&col_name,
|
||||||
Some(l),
|
Some(l),
|
||||||
window_size,
|
window_size,
|
||||||
DirtyTimeWindows::MAX_FILTER_NUM,
|
max_window_cnt,
|
||||||
self.config.flow_id,
|
self.config.flow_id,
|
||||||
Some(self),
|
Some(self),
|
||||||
)?;
|
)?
|
||||||
|
};
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
"Flow id={:?}, Generated filter expr: {:?}",
|
"Flow id={:?}, Generated filter expr: {:?}",
|
||||||
|
|||||||
@@ -42,6 +42,14 @@ lazy_static! {
|
|||||||
vec![60., 2. * 60., 3. * 60., 5. * 60., 10. * 60.]
|
vec![60., 2. * 60., 3. * 60., 5. * 60., 10. * 60.]
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
pub static ref METRIC_FLOW_BATCHING_ENGINE_STALLED_QUERY_WINDOW_CNT: HistogramVec =
|
||||||
|
register_histogram_vec!(
|
||||||
|
"greptime_flow_batching_engine_stalled_query_window_cnt",
|
||||||
|
"flow batching engine stalled query time window count",
|
||||||
|
&["flow_id"],
|
||||||
|
vec![0.0, 5., 10., 20., 40.]
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
pub static ref METRIC_FLOW_BATCHING_ENGINE_QUERY_WINDOW_CNT: HistogramVec =
|
pub static ref METRIC_FLOW_BATCHING_ENGINE_QUERY_WINDOW_CNT: HistogramVec =
|
||||||
register_histogram_vec!(
|
register_histogram_vec!(
|
||||||
"greptime_flow_batching_engine_query_window_cnt",
|
"greptime_flow_batching_engine_query_window_cnt",
|
||||||
@@ -50,19 +58,57 @@ lazy_static! {
|
|||||||
vec![0.0, 5., 10., 20., 40.]
|
vec![0.0, 5., 10., 20., 40.]
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
pub static ref METRIC_FLOW_BATCHING_ENGINE_QUERY_TIME_RANGE: HistogramVec =
|
pub static ref METRIC_FLOW_BATCHING_ENGINE_QUERY_WINDOW_SIZE: HistogramVec =
|
||||||
register_histogram_vec!(
|
register_histogram_vec!(
|
||||||
"greptime_flow_batching_engine_query_time_range_secs",
|
"greptime_flow_batching_engine_query_window_size_secs",
|
||||||
"flow batching engine query time range(seconds)",
|
"flow batching engine query window size(seconds)",
|
||||||
&["flow_id"],
|
&["flow_id"],
|
||||||
vec![60., 4. * 60., 16. * 60., 64. * 60., 256. * 60.]
|
vec![60., 4. * 60., 16. * 60., 64. * 60., 256. * 60.]
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
pub static ref METRIC_FLOW_BATCHING_ENGINE_STALLED_WINDOW_SIZE: HistogramVec =
|
||||||
|
register_histogram_vec!(
|
||||||
|
"greptime_flow_batching_engine_stalled_window_size_secs",
|
||||||
|
"flow batching engine stalled window size(seconds)",
|
||||||
|
&["flow_id"],
|
||||||
|
vec![60., 4. * 60., 16. * 60., 64. * 60., 256. * 60.]
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
pub static ref METRIC_FLOW_BATCHING_ENGINE_BULK_MARK_TIME_WINDOW_RANGE: HistogramVec =
|
||||||
|
register_histogram_vec!(
|
||||||
|
"greptime_flow_batching_engine_bulk_mark_time_window_range_secs",
|
||||||
|
"flow batching engine query time window range marked by bulk memtable in seconds",
|
||||||
|
&["flow_id"],
|
||||||
|
vec![0.0, 60., 4. * 60., 16. * 60., 64. * 60., 256. * 60.]
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
pub static ref METRIC_FLOW_BATCHING_ENGINE_START_QUERY_CNT: IntCounterVec =
|
||||||
|
register_int_counter_vec!(
|
||||||
|
"greptime_flow_batching_start_query_count",
|
||||||
|
"flow batching engine started query count",
|
||||||
|
&["flow_id"],
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
pub static ref METRIC_FLOW_BATCHING_ENGINE_ERROR_CNT: IntCounterVec =
|
||||||
|
register_int_counter_vec!(
|
||||||
|
"greptime_flow_batching_error_count",
|
||||||
|
"flow batching engine error count per flow id",
|
||||||
|
&["flow_id"]
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
pub static ref METRIC_FLOW_BATCHING_ENGINE_GUESS_FE_LOAD: HistogramVec =
|
||||||
|
register_histogram_vec!(
|
||||||
|
"greptime_flow_batching_engine_guess_fe_load",
|
||||||
|
"flow batching engine guessed frontend load",
|
||||||
|
&["fe_addr"],
|
||||||
|
vec![60., 4. * 60., 16. * 60., 64. * 60., 256. * 60.]
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
pub static ref METRIC_FLOW_RUN_INTERVAL_MS: IntGauge =
|
pub static ref METRIC_FLOW_RUN_INTERVAL_MS: IntGauge =
|
||||||
register_int_gauge!("greptime_flow_run_interval_ms", "flow run interval in ms").unwrap();
|
register_int_gauge!("greptime_flow_run_interval_ms", "flow run interval in ms").unwrap();
|
||||||
pub static ref METRIC_FLOW_ROWS: IntCounterVec = register_int_counter_vec!(
|
pub static ref METRIC_FLOW_ROWS: IntCounterVec = register_int_counter_vec!(
|
||||||
"greptime_flow_processed_rows",
|
"greptime_flow_processed_rows",
|
||||||
"Count of rows flowing through the system",
|
"Count of rows flowing through the system.",
|
||||||
&["direction"]
|
&["direction"]
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|||||||
@@ -17,6 +17,7 @@
|
|||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use api::v1::flow::DirtyWindowRequests;
|
||||||
use api::v1::{RowDeleteRequests, RowInsertRequests};
|
use api::v1::{RowDeleteRequests, RowInsertRequests};
|
||||||
use cache::{TABLE_FLOWNODE_SET_CACHE_NAME, TABLE_ROUTE_CACHE_NAME};
|
use cache::{TABLE_FLOWNODE_SET_CACHE_NAME, TABLE_ROUTE_CACHE_NAME};
|
||||||
use catalog::CatalogManagerRef;
|
use catalog::CatalogManagerRef;
|
||||||
@@ -136,6 +137,18 @@ impl flow_server::Flow for FlowService {
|
|||||||
.map(Response::new)
|
.map(Response::new)
|
||||||
.map_err(to_status_with_last_err)
|
.map_err(to_status_with_last_err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn handle_mark_dirty_time_window(
|
||||||
|
&self,
|
||||||
|
reqs: Request<DirtyWindowRequests>,
|
||||||
|
) -> Result<Response<FlowResponse>, Status> {
|
||||||
|
self.dual_engine
|
||||||
|
.batching_engine()
|
||||||
|
.handle_mark_dirty_time_window(reqs.into_inner())
|
||||||
|
.await
|
||||||
|
.map(Response::new)
|
||||||
|
.map_err(to_status_with_last_err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
|
|||||||
@@ -35,8 +35,8 @@ use servers::query_handler::grpc::GrpcQueryHandler;
|
|||||||
use servers::query_handler::sql::SqlQueryHandler;
|
use servers::query_handler::sql::SqlQueryHandler;
|
||||||
use session::context::QueryContextRef;
|
use session::context::QueryContextRef;
|
||||||
use snafu::{ensure, OptionExt, ResultExt};
|
use snafu::{ensure, OptionExt, ResultExt};
|
||||||
use table::metadata::TableId;
|
|
||||||
use table::table_name::TableName;
|
use table::table_name::TableName;
|
||||||
|
use table::TableRef;
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CatalogSnafu, DataFusionSnafu, Error, InFlightWriteBytesExceededSnafu,
|
CatalogSnafu, DataFusionSnafu, Error, InFlightWriteBytesExceededSnafu,
|
||||||
@@ -235,34 +235,33 @@ impl GrpcQueryHandler for Instance {
|
|||||||
|
|
||||||
async fn put_record_batch(
|
async fn put_record_batch(
|
||||||
&self,
|
&self,
|
||||||
table: &TableName,
|
table_name: &TableName,
|
||||||
table_id: &mut Option<TableId>,
|
table_ref: &mut Option<TableRef>,
|
||||||
decoder: &mut FlightDecoder,
|
decoder: &mut FlightDecoder,
|
||||||
data: FlightData,
|
data: FlightData,
|
||||||
) -> Result<AffectedRows> {
|
) -> Result<AffectedRows> {
|
||||||
let table_id = if let Some(table_id) = table_id {
|
let table = if let Some(table) = table_ref {
|
||||||
*table_id
|
table.clone()
|
||||||
} else {
|
} else {
|
||||||
let table = self
|
let table = self
|
||||||
.catalog_manager()
|
.catalog_manager()
|
||||||
.table(
|
.table(
|
||||||
&table.catalog_name,
|
&table_name.catalog_name,
|
||||||
&table.schema_name,
|
&table_name.schema_name,
|
||||||
&table.table_name,
|
&table_name.table_name,
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.context(CatalogSnafu)?
|
.context(CatalogSnafu)?
|
||||||
.with_context(|| TableNotFoundSnafu {
|
.with_context(|| TableNotFoundSnafu {
|
||||||
table_name: table.to_string(),
|
table_name: table_name.to_string(),
|
||||||
})?;
|
})?;
|
||||||
let id = table.table_info().table_id();
|
*table_ref = Some(table.clone());
|
||||||
*table_id = Some(id);
|
table
|
||||||
id
|
|
||||||
};
|
};
|
||||||
|
|
||||||
self.inserter
|
self.inserter
|
||||||
.handle_bulk_insert(table_id, decoder, data)
|
.handle_bulk_insert(table, decoder, data)
|
||||||
.await
|
.await
|
||||||
.context(TableOperationSnafu)
|
.context(TableOperationSnafu)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -154,6 +154,7 @@ where
|
|||||||
ServerGrpcQueryHandlerAdapter::arc(self.instance.clone()),
|
ServerGrpcQueryHandlerAdapter::arc(self.instance.clone()),
|
||||||
user_provider.clone(),
|
user_provider.clone(),
|
||||||
runtime,
|
runtime,
|
||||||
|
opts.grpc.flight_compression,
|
||||||
);
|
);
|
||||||
|
|
||||||
let grpc_server = builder
|
let grpc_server = builder
|
||||||
|
|||||||
@@ -12,18 +12,28 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
use ahash::{HashMap, HashMapExt};
|
use ahash::{HashMap, HashMapExt};
|
||||||
|
use api::v1::flow::{DirtyWindowRequest, WindowRange};
|
||||||
use api::v1::region::{
|
use api::v1::region::{
|
||||||
bulk_insert_request, region_request, BulkInsertRequest, RegionRequest, RegionRequestHeader,
|
bulk_insert_request, region_request, BulkInsertRequest, RegionRequest, RegionRequestHeader,
|
||||||
};
|
};
|
||||||
use api::v1::ArrowIpc;
|
use api::v1::ArrowIpc;
|
||||||
|
use arrow::array::{
|
||||||
|
Array, TimestampMicrosecondArray, TimestampMillisecondArray, TimestampNanosecondArray,
|
||||||
|
TimestampSecondArray,
|
||||||
|
};
|
||||||
|
use arrow::datatypes::{DataType, Int64Type, TimeUnit};
|
||||||
|
use arrow::record_batch::RecordBatch;
|
||||||
use common_base::AffectedRows;
|
use common_base::AffectedRows;
|
||||||
use common_grpc::flight::{FlightDecoder, FlightEncoder, FlightMessage};
|
use common_grpc::flight::{FlightDecoder, FlightEncoder, FlightMessage};
|
||||||
use common_grpc::FlightData;
|
use common_grpc::FlightData;
|
||||||
|
use common_telemetry::error;
|
||||||
use common_telemetry::tracing_context::TracingContext;
|
use common_telemetry::tracing_context::TracingContext;
|
||||||
use snafu::ResultExt;
|
use snafu::{OptionExt, ResultExt};
|
||||||
use store_api::storage::RegionId;
|
use store_api::storage::{RegionId, TableId};
|
||||||
use table::metadata::TableId;
|
use table::TableRef;
|
||||||
|
|
||||||
use crate::insert::Inserter;
|
use crate::insert::Inserter;
|
||||||
use crate::{error, metrics};
|
use crate::{error, metrics};
|
||||||
@@ -32,10 +42,11 @@ impl Inserter {
|
|||||||
/// Handle bulk insert request.
|
/// Handle bulk insert request.
|
||||||
pub async fn handle_bulk_insert(
|
pub async fn handle_bulk_insert(
|
||||||
&self,
|
&self,
|
||||||
table_id: TableId,
|
table: TableRef,
|
||||||
decoder: &mut FlightDecoder,
|
decoder: &mut FlightDecoder,
|
||||||
data: FlightData,
|
data: FlightData,
|
||||||
) -> error::Result<AffectedRows> {
|
) -> error::Result<AffectedRows> {
|
||||||
|
let table_id = table.table_info().table_id();
|
||||||
let decode_timer = metrics::HANDLE_BULK_INSERT_ELAPSED
|
let decode_timer = metrics::HANDLE_BULK_INSERT_ELAPSED
|
||||||
.with_label_values(&["decode_request"])
|
.with_label_values(&["decode_request"])
|
||||||
.start_timer();
|
.start_timer();
|
||||||
@@ -48,6 +59,20 @@ impl Inserter {
|
|||||||
return Ok(0);
|
return Ok(0);
|
||||||
};
|
};
|
||||||
decode_timer.observe_duration();
|
decode_timer.observe_duration();
|
||||||
|
if let Some((min, max)) = compute_timestamp_range(
|
||||||
|
&record_batch,
|
||||||
|
&table
|
||||||
|
.table_info()
|
||||||
|
.meta
|
||||||
|
.schema
|
||||||
|
.timestamp_column()
|
||||||
|
.as_ref()
|
||||||
|
.unwrap()
|
||||||
|
.name,
|
||||||
|
)? {
|
||||||
|
// notify flownode to update dirty time windows.
|
||||||
|
self.update_flow_dirty_window(table_id, min, max);
|
||||||
|
}
|
||||||
metrics::BULK_REQUEST_MESSAGE_SIZE.observe(body_size as f64);
|
metrics::BULK_REQUEST_MESSAGE_SIZE.observe(body_size as f64);
|
||||||
metrics::BULK_REQUEST_ROWS
|
metrics::BULK_REQUEST_ROWS
|
||||||
.with_label_values(&["raw"])
|
.with_label_values(&["raw"])
|
||||||
@@ -216,4 +241,88 @@ impl Inserter {
|
|||||||
crate::metrics::DIST_INGEST_ROW_COUNT.inc_by(rows_inserted as u64);
|
crate::metrics::DIST_INGEST_ROW_COUNT.inc_by(rows_inserted as u64);
|
||||||
Ok(rows_inserted)
|
Ok(rows_inserted)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn update_flow_dirty_window(&self, table_id: TableId, min: i64, max: i64) {
|
||||||
|
let table_flownode_set_cache = self.table_flownode_set_cache.clone();
|
||||||
|
let node_manager = self.node_manager.clone();
|
||||||
|
common_runtime::spawn_global(async move {
|
||||||
|
let result = table_flownode_set_cache
|
||||||
|
.get(table_id)
|
||||||
|
.await
|
||||||
|
.context(error::RequestInsertsSnafu);
|
||||||
|
let flownodes = match result {
|
||||||
|
Ok(flownodes) => flownodes.unwrap_or_default(),
|
||||||
|
Err(e) => {
|
||||||
|
error!(e; "Failed to get flownodes for table id: {}", table_id);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let peers: HashSet<_> = flownodes.values().cloned().collect();
|
||||||
|
for peer in peers {
|
||||||
|
let node_manager = node_manager.clone();
|
||||||
|
common_runtime::spawn_global(async move {
|
||||||
|
if let Err(e) = node_manager
|
||||||
|
.flownode(&peer)
|
||||||
|
.await
|
||||||
|
.handle_mark_window_dirty(DirtyWindowRequest {
|
||||||
|
table_id,
|
||||||
|
dirty_time_ranges: vec![WindowRange {
|
||||||
|
start_value: min,
|
||||||
|
end_value: max,
|
||||||
|
}],
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.context(error::RequestInsertsSnafu)
|
||||||
|
{
|
||||||
|
error!(e; "Failed to mark time window as dirty, table: {}, min: {}, max: {}", table_id, min, max);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Calculate the timestamp range of record batch. Return `None` if record batch is empty.
|
||||||
|
fn compute_timestamp_range(
|
||||||
|
rb: &RecordBatch,
|
||||||
|
timestamp_index_name: &str,
|
||||||
|
) -> error::Result<Option<(i64, i64)>> {
|
||||||
|
let ts_col = rb
|
||||||
|
.column_by_name(timestamp_index_name)
|
||||||
|
.context(error::ColumnNotFoundSnafu {
|
||||||
|
msg: timestamp_index_name,
|
||||||
|
})?;
|
||||||
|
if rb.num_rows() == 0 {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
let primitive = match ts_col.data_type() {
|
||||||
|
DataType::Timestamp(unit, _) => match unit {
|
||||||
|
TimeUnit::Second => ts_col
|
||||||
|
.as_any()
|
||||||
|
.downcast_ref::<TimestampSecondArray>()
|
||||||
|
.unwrap()
|
||||||
|
.reinterpret_cast::<Int64Type>(),
|
||||||
|
TimeUnit::Millisecond => ts_col
|
||||||
|
.as_any()
|
||||||
|
.downcast_ref::<TimestampMillisecondArray>()
|
||||||
|
.unwrap()
|
||||||
|
.reinterpret_cast::<Int64Type>(),
|
||||||
|
TimeUnit::Microsecond => ts_col
|
||||||
|
.as_any()
|
||||||
|
.downcast_ref::<TimestampMicrosecondArray>()
|
||||||
|
.unwrap()
|
||||||
|
.reinterpret_cast::<Int64Type>(),
|
||||||
|
TimeUnit::Nanosecond => ts_col
|
||||||
|
.as_any()
|
||||||
|
.downcast_ref::<TimestampNanosecondArray>()
|
||||||
|
.unwrap()
|
||||||
|
.reinterpret_cast::<Int64Type>(),
|
||||||
|
},
|
||||||
|
t => {
|
||||||
|
return error::InvalidTimeIndexTypeSnafu { ty: t.clone() }.fail();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(arrow::compute::min(&primitive).zip(arrow::compute::max(&primitive)))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -837,6 +837,13 @@ pub enum Error {
|
|||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Invalid time index type: {}", ty))]
|
||||||
|
InvalidTimeIndexType {
|
||||||
|
ty: arrow::datatypes::DataType,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
@@ -964,6 +971,7 @@ impl ErrorExt for Error {
|
|||||||
Error::ColumnOptions { source, .. } => source.status_code(),
|
Error::ColumnOptions { source, .. } => source.status_code(),
|
||||||
Error::DecodeFlightData { source, .. } => source.status_code(),
|
Error::DecodeFlightData { source, .. } => source.status_code(),
|
||||||
Error::ComputeArrow { .. } => StatusCode::Internal,
|
Error::ComputeArrow { .. } => StatusCode::Internal,
|
||||||
|
Error::InvalidTimeIndexType { .. } => StatusCode::InvalidArguments,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use api::v1::flow::FlowRequestHeader;
|
use api::v1::flow::{AdjustFlow, FlowRequestHeader};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_function::handlers::FlowServiceHandler;
|
use common_function::handlers::FlowServiceHandler;
|
||||||
@@ -22,6 +22,7 @@ use common_query::error::Result;
|
|||||||
use common_telemetry::tracing_context::TracingContext;
|
use common_telemetry::tracing_context::TracingContext;
|
||||||
use futures::stream::FuturesUnordered;
|
use futures::stream::FuturesUnordered;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
|
use serde_json::json;
|
||||||
use session::context::QueryContextRef;
|
use session::context::QueryContextRef;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
|
||||||
@@ -57,9 +58,96 @@ impl FlowServiceHandler for FlowServiceOperator {
|
|||||||
) -> Result<api::v1::flow::FlowResponse> {
|
) -> Result<api::v1::flow::FlowResponse> {
|
||||||
self.flush_inner(catalog, flow, ctx).await
|
self.flush_inner(catalog, flow, ctx).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn adjust(
|
||||||
|
&self,
|
||||||
|
catalog: &str,
|
||||||
|
flow: &str,
|
||||||
|
min_run_interval_secs: u64,
|
||||||
|
max_filter_num_per_query: usize,
|
||||||
|
ctx: QueryContextRef,
|
||||||
|
) -> Result<api::v1::flow::FlowResponse> {
|
||||||
|
self.adjust_inner(
|
||||||
|
catalog,
|
||||||
|
flow,
|
||||||
|
min_run_interval_secs,
|
||||||
|
max_filter_num_per_query,
|
||||||
|
ctx,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FlowServiceOperator {
|
impl FlowServiceOperator {
|
||||||
|
async fn adjust_inner(
|
||||||
|
&self,
|
||||||
|
catalog: &str,
|
||||||
|
flow: &str,
|
||||||
|
min_run_interval_secs: u64,
|
||||||
|
max_filter_num_per_query: usize,
|
||||||
|
ctx: QueryContextRef,
|
||||||
|
) -> Result<api::v1::flow::FlowResponse> {
|
||||||
|
let id = self
|
||||||
|
.flow_metadata_manager
|
||||||
|
.flow_name_manager()
|
||||||
|
.get(catalog, flow)
|
||||||
|
.await
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(common_query::error::ExecuteSnafu)?
|
||||||
|
.context(common_meta::error::FlowNotFoundSnafu {
|
||||||
|
flow_name: format!("{}.{}", catalog, flow),
|
||||||
|
})
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(common_query::error::ExecuteSnafu)?
|
||||||
|
.flow_id();
|
||||||
|
|
||||||
|
let all_flownode_peers = self
|
||||||
|
.flow_metadata_manager
|
||||||
|
.flow_route_manager()
|
||||||
|
.routes(id)
|
||||||
|
.await
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(common_query::error::ExecuteSnafu)?;
|
||||||
|
|
||||||
|
// order of flownodes doesn't matter here
|
||||||
|
let all_flow_nodes = FuturesUnordered::from_iter(
|
||||||
|
all_flownode_peers
|
||||||
|
.iter()
|
||||||
|
.map(|(_key, peer)| self.node_manager.flownode(peer.peer())),
|
||||||
|
)
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// TODO(discord9): use proper type for flow options
|
||||||
|
let options = json!({
|
||||||
|
"min_run_interval_secs": min_run_interval_secs,
|
||||||
|
"max_filter_num_per_query": max_filter_num_per_query,
|
||||||
|
});
|
||||||
|
|
||||||
|
for node in all_flow_nodes {
|
||||||
|
let _res = {
|
||||||
|
use api::v1::flow::{flow_request, FlowRequest};
|
||||||
|
let flush_req = FlowRequest {
|
||||||
|
header: Some(FlowRequestHeader {
|
||||||
|
tracing_context: TracingContext::from_current_span().to_w3c(),
|
||||||
|
query_context: Some(
|
||||||
|
common_meta::rpc::ddl::QueryContext::from(ctx.clone()).into(),
|
||||||
|
),
|
||||||
|
}),
|
||||||
|
body: Some(flow_request::Body::Adjust(AdjustFlow {
|
||||||
|
flow_id: Some(api::v1::FlowId { id }),
|
||||||
|
options: options.to_string(),
|
||||||
|
})),
|
||||||
|
};
|
||||||
|
node.handle(flush_req)
|
||||||
|
.await
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(common_query::error::ExecuteSnafu)?
|
||||||
|
};
|
||||||
|
}
|
||||||
|
Ok(Default::default())
|
||||||
|
}
|
||||||
|
|
||||||
/// Flush the flownodes according to the flow id.
|
/// Flush the flownodes according to the flow id.
|
||||||
async fn flush_inner(
|
async fn flush_inner(
|
||||||
&self,
|
&self,
|
||||||
|
|||||||
@@ -78,7 +78,7 @@ pub struct Inserter {
|
|||||||
catalog_manager: CatalogManagerRef,
|
catalog_manager: CatalogManagerRef,
|
||||||
pub(crate) partition_manager: PartitionRuleManagerRef,
|
pub(crate) partition_manager: PartitionRuleManagerRef,
|
||||||
pub(crate) node_manager: NodeManagerRef,
|
pub(crate) node_manager: NodeManagerRef,
|
||||||
table_flownode_set_cache: TableFlownodeSetCacheRef,
|
pub(crate) table_flownode_set_cache: TableFlownodeSetCacheRef,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type InserterRef = Arc<Inserter>;
|
pub type InserterRef = Arc<Inserter>;
|
||||||
|
|||||||
@@ -34,7 +34,6 @@ pub use changes::Changes;
|
|||||||
use datafusion::arrow::array::{ArrayRef, Float64Array, TimestampMillisecondArray};
|
use datafusion::arrow::array::{ArrayRef, Float64Array, TimestampMillisecondArray};
|
||||||
use datafusion::error::DataFusionError;
|
use datafusion::error::DataFusionError;
|
||||||
use datafusion::physical_plan::ColumnarValue;
|
use datafusion::physical_plan::ColumnarValue;
|
||||||
use datafusion_expr::{AggregateUDF, ScalarUDF};
|
|
||||||
pub use deriv::Deriv;
|
pub use deriv::Deriv;
|
||||||
pub use extrapolate_rate::{Delta, Increase, Rate};
|
pub use extrapolate_rate::{Delta, Increase, Rate};
|
||||||
pub use holt_winters::HoltWinters;
|
pub use holt_winters::HoltWinters;
|
||||||
@@ -45,39 +44,6 @@ pub use quantile_aggr::{quantile_udaf, QUANTILE_NAME};
|
|||||||
pub use resets::Resets;
|
pub use resets::Resets;
|
||||||
pub use round::Round;
|
pub use round::Round;
|
||||||
|
|
||||||
/// Range functions for PromQL.
|
|
||||||
pub fn range_funcs() -> Vec<ScalarUDF> {
|
|
||||||
vec![
|
|
||||||
IDelta::<false>::scalar_udf(),
|
|
||||||
IDelta::<true>::scalar_udf(),
|
|
||||||
Rate::scalar_udf(),
|
|
||||||
Increase::scalar_udf(),
|
|
||||||
Delta::scalar_udf(),
|
|
||||||
Resets::scalar_udf(),
|
|
||||||
Changes::scalar_udf(),
|
|
||||||
Deriv::scalar_udf(),
|
|
||||||
Round::scalar_udf(),
|
|
||||||
AvgOverTime::scalar_udf(),
|
|
||||||
MinOverTime::scalar_udf(),
|
|
||||||
MaxOverTime::scalar_udf(),
|
|
||||||
SumOverTime::scalar_udf(),
|
|
||||||
CountOverTime::scalar_udf(),
|
|
||||||
LastOverTime::scalar_udf(),
|
|
||||||
AbsentOverTime::scalar_udf(),
|
|
||||||
PresentOverTime::scalar_udf(),
|
|
||||||
StddevOverTime::scalar_udf(),
|
|
||||||
StdvarOverTime::scalar_udf(),
|
|
||||||
QuantileOverTime::scalar_udf(),
|
|
||||||
PredictLinear::scalar_udf(),
|
|
||||||
HoltWinters::scalar_udf(),
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Aggregate functions for PromQL.
|
|
||||||
pub fn aggr_funcs() -> Vec<AggregateUDF> {
|
|
||||||
vec![quantile_udaf()]
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Extracts an array from a `ColumnarValue`.
|
/// Extracts an array from a `ColumnarValue`.
|
||||||
///
|
///
|
||||||
/// If the `ColumnarValue` is a scalar, it converts it to an array of size 1.
|
/// If the `ColumnarValue` is a scalar, it converts it to an array of size 1.
|
||||||
|
|||||||
@@ -40,8 +40,8 @@ pub struct QuantileAccumulator {
|
|||||||
|
|
||||||
/// Create a quantile `AggregateUDF` for PromQL quantile operator,
|
/// Create a quantile `AggregateUDF` for PromQL quantile operator,
|
||||||
/// which calculates φ-quantile (0 ≤ φ ≤ 1) over dimensions
|
/// which calculates φ-quantile (0 ≤ φ ≤ 1) over dimensions
|
||||||
pub fn quantile_udaf() -> AggregateUDF {
|
pub fn quantile_udaf() -> Arc<AggregateUDF> {
|
||||||
create_udaf(
|
Arc::new(create_udaf(
|
||||||
QUANTILE_NAME,
|
QUANTILE_NAME,
|
||||||
// Input type: (φ, values)
|
// Input type: (φ, values)
|
||||||
vec![DataType::Float64, DataType::Float64],
|
vec![DataType::Float64, DataType::Float64],
|
||||||
@@ -63,7 +63,7 @@ pub fn quantile_udaf() -> AggregateUDF {
|
|||||||
)]
|
)]
|
||||||
.into(),
|
.into(),
|
||||||
)]),
|
)]),
|
||||||
)
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
impl QuantileAccumulator {
|
impl QuantileAccumulator {
|
||||||
|
|||||||
@@ -1948,7 +1948,7 @@ impl PromPlanner {
|
|||||||
token::T_QUANTILE => {
|
token::T_QUANTILE => {
|
||||||
let q = Self::get_param_value_as_f64(op, param)?;
|
let q = Self::get_param_value_as_f64(op, param)?;
|
||||||
non_col_args.push(lit(q));
|
non_col_args.push(lit(q));
|
||||||
Arc::new(quantile_udaf())
|
quantile_udaf()
|
||||||
}
|
}
|
||||||
token::T_AVG => avg_udaf(),
|
token::T_AVG => avg_udaf(),
|
||||||
token::T_COUNT_VALUES | token::T_COUNT => count_udaf(),
|
token::T_COUNT_VALUES | token::T_COUNT => count_udaf(),
|
||||||
|
|||||||
@@ -28,6 +28,11 @@ use datafusion::execution::{FunctionRegistry, SessionStateBuilder};
|
|||||||
use datafusion::logical_expr::LogicalPlan;
|
use datafusion::logical_expr::LogicalPlan;
|
||||||
use datafusion_expr::UserDefinedLogicalNode;
|
use datafusion_expr::UserDefinedLogicalNode;
|
||||||
use greptime_proto::substrait_extension::MergeScan as PbMergeScan;
|
use greptime_proto::substrait_extension::MergeScan as PbMergeScan;
|
||||||
|
use promql::functions::{
|
||||||
|
quantile_udaf, AbsentOverTime, AvgOverTime, Changes, CountOverTime, Delta, Deriv, IDelta,
|
||||||
|
Increase, LastOverTime, MaxOverTime, MinOverTime, PresentOverTime, Rate, Resets, Round,
|
||||||
|
StddevOverTime, StdvarOverTime, SumOverTime,
|
||||||
|
};
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use session::context::QueryContextRef;
|
use session::context::QueryContextRef;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
@@ -112,15 +117,12 @@ impl SubstraitPlanDecoder for DefaultPlanDecoder {
|
|||||||
let mut session_state = SessionStateBuilder::new_from_existing(self.session_state.clone())
|
let mut session_state = SessionStateBuilder::new_from_existing(self.session_state.clone())
|
||||||
.with_catalog_list(catalog_list)
|
.with_catalog_list(catalog_list)
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
// Substrait decoder will look up the UDFs in SessionState, so we need to register them
|
// Substrait decoder will look up the UDFs in SessionState, so we need to register them
|
||||||
// Note: the query context must be passed to set the timezone
|
// Note: the query context must be passed to set the timezone
|
||||||
// We MUST register the UDFs after we build the session state, otherwise the UDFs will be lost
|
// We MUST register the UDFs after we build the session state, otherwise the UDFs will be lost
|
||||||
// if they have the same name as the default UDFs or their alias.
|
// if they have the same name as the default UDFs or their alias.
|
||||||
// e.g. The default UDF `to_char()` has an alias `date_format()`, if we register a UDF with the name `date_format()`
|
// e.g. The default UDF `to_char()` has an alias `date_format()`, if we register a UDF with the name `date_format()`
|
||||||
// before we build the session state, the UDF will be lost.
|
// before we build the session state, the UDF will be lost.
|
||||||
|
|
||||||
// Scalar functions
|
|
||||||
for func in FUNCTION_REGISTRY.scalar_functions() {
|
for func in FUNCTION_REGISTRY.scalar_functions() {
|
||||||
let udf = func.provide(FunctionContext {
|
let udf = func.provide(FunctionContext {
|
||||||
query_ctx: self.query_ctx.clone(),
|
query_ctx: self.query_ctx.clone(),
|
||||||
@@ -131,15 +133,6 @@ impl SubstraitPlanDecoder for DefaultPlanDecoder {
|
|||||||
.context(RegisterUdfSnafu { name: func.name() })?;
|
.context(RegisterUdfSnafu { name: func.name() })?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// PromQL range functions
|
|
||||||
for func in promql::functions::range_funcs() {
|
|
||||||
let name = func.name().to_string();
|
|
||||||
session_state
|
|
||||||
.register_udf(Arc::new(func))
|
|
||||||
.context(RegisterUdfSnafu { name })?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Aggregate functions
|
|
||||||
for func in FUNCTION_REGISTRY.aggregate_functions() {
|
for func in FUNCTION_REGISTRY.aggregate_functions() {
|
||||||
let name = func.name().to_string();
|
let name = func.name().to_string();
|
||||||
session_state
|
session_state
|
||||||
@@ -147,6 +140,29 @@ impl SubstraitPlanDecoder for DefaultPlanDecoder {
|
|||||||
.context(RegisterUdfSnafu { name })?;
|
.context(RegisterUdfSnafu { name })?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let _ = session_state.register_udaf(quantile_udaf());
|
||||||
|
|
||||||
|
let _ = session_state.register_udf(Arc::new(IDelta::<false>::scalar_udf()));
|
||||||
|
let _ = session_state.register_udf(Arc::new(IDelta::<true>::scalar_udf()));
|
||||||
|
let _ = session_state.register_udf(Arc::new(Rate::scalar_udf()));
|
||||||
|
let _ = session_state.register_udf(Arc::new(Increase::scalar_udf()));
|
||||||
|
let _ = session_state.register_udf(Arc::new(Delta::scalar_udf()));
|
||||||
|
let _ = session_state.register_udf(Arc::new(Resets::scalar_udf()));
|
||||||
|
let _ = session_state.register_udf(Arc::new(Changes::scalar_udf()));
|
||||||
|
let _ = session_state.register_udf(Arc::new(Deriv::scalar_udf()));
|
||||||
|
let _ = session_state.register_udf(Arc::new(Round::scalar_udf()));
|
||||||
|
let _ = session_state.register_udf(Arc::new(AvgOverTime::scalar_udf()));
|
||||||
|
let _ = session_state.register_udf(Arc::new(MinOverTime::scalar_udf()));
|
||||||
|
let _ = session_state.register_udf(Arc::new(MaxOverTime::scalar_udf()));
|
||||||
|
let _ = session_state.register_udf(Arc::new(SumOverTime::scalar_udf()));
|
||||||
|
let _ = session_state.register_udf(Arc::new(CountOverTime::scalar_udf()));
|
||||||
|
let _ = session_state.register_udf(Arc::new(LastOverTime::scalar_udf()));
|
||||||
|
let _ = session_state.register_udf(Arc::new(AbsentOverTime::scalar_udf()));
|
||||||
|
let _ = session_state.register_udf(Arc::new(PresentOverTime::scalar_udf()));
|
||||||
|
let _ = session_state.register_udf(Arc::new(StddevOverTime::scalar_udf()));
|
||||||
|
let _ = session_state.register_udf(Arc::new(StdvarOverTime::scalar_udf()));
|
||||||
|
// TODO(ruihang): add quantile_over_time, predict_linear, holt_winters, round
|
||||||
|
|
||||||
let logical_plan = DFLogicalSubstraitConvertor
|
let logical_plan = DFLogicalSubstraitConvertor
|
||||||
.decode(message, session_state)
|
.decode(message, session_state)
|
||||||
.await
|
.await
|
||||||
|
|||||||
@@ -66,6 +66,8 @@ pub struct GrpcOptions {
|
|||||||
pub max_recv_message_size: ReadableSize,
|
pub max_recv_message_size: ReadableSize,
|
||||||
/// Max gRPC sending(encoding) message size
|
/// Max gRPC sending(encoding) message size
|
||||||
pub max_send_message_size: ReadableSize,
|
pub max_send_message_size: ReadableSize,
|
||||||
|
/// Compression mode in Arrow Flight service.
|
||||||
|
pub flight_compression: FlightCompression,
|
||||||
pub runtime_size: usize,
|
pub runtime_size: usize,
|
||||||
#[serde(default = "Default::default")]
|
#[serde(default = "Default::default")]
|
||||||
pub tls: TlsOption,
|
pub tls: TlsOption,
|
||||||
@@ -114,6 +116,7 @@ impl Default for GrpcOptions {
|
|||||||
server_addr: String::new(),
|
server_addr: String::new(),
|
||||||
max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
|
max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
|
||||||
max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
|
max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
|
||||||
|
flight_compression: FlightCompression::ArrowIpc,
|
||||||
runtime_size: 8,
|
runtime_size: 8,
|
||||||
tls: TlsOption::default(),
|
tls: TlsOption::default(),
|
||||||
}
|
}
|
||||||
@@ -132,6 +135,30 @@ impl GrpcOptions {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
|
||||||
|
#[serde(rename_all = "snake_case")]
|
||||||
|
pub enum FlightCompression {
|
||||||
|
/// Disable all compression in Arrow Flight service.
|
||||||
|
#[default]
|
||||||
|
None,
|
||||||
|
/// Enable only transport layer compression (zstd).
|
||||||
|
Transport,
|
||||||
|
/// Enable only payload compression (lz4)
|
||||||
|
ArrowIpc,
|
||||||
|
/// Enable all compression.
|
||||||
|
All,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FlightCompression {
|
||||||
|
pub fn transport_compression(&self) -> bool {
|
||||||
|
self == &FlightCompression::Transport || self == &FlightCompression::All
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn arrow_compression(&self) -> bool {
|
||||||
|
self == &FlightCompression::ArrowIpc || self == &FlightCompression::All
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub struct GrpcServer {
|
pub struct GrpcServer {
|
||||||
// states
|
// states
|
||||||
shutdown_tx: Mutex<Option<Sender<()>>>,
|
shutdown_tx: Mutex<Option<Sender<()>>>,
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ use tonic::{Request, Response, Status, Streaming};
|
|||||||
use crate::error::{InvalidParameterSnafu, ParseJsonSnafu, Result, ToJsonSnafu};
|
use crate::error::{InvalidParameterSnafu, ParseJsonSnafu, Result, ToJsonSnafu};
|
||||||
pub use crate::grpc::flight::stream::FlightRecordBatchStream;
|
pub use crate::grpc::flight::stream::FlightRecordBatchStream;
|
||||||
use crate::grpc::greptime_handler::{get_request_type, GreptimeRequestHandler};
|
use crate::grpc::greptime_handler::{get_request_type, GreptimeRequestHandler};
|
||||||
use crate::grpc::TonicResult;
|
use crate::grpc::{FlightCompression, TonicResult};
|
||||||
use crate::http::header::constants::GREPTIME_DB_HEADER_NAME;
|
use crate::http::header::constants::GREPTIME_DB_HEADER_NAME;
|
||||||
use crate::http::AUTHORIZATION_HEADER;
|
use crate::http::AUTHORIZATION_HEADER;
|
||||||
use crate::{error, hint_headers};
|
use crate::{error, hint_headers};
|
||||||
@@ -195,9 +195,14 @@ impl FlightCraft for GreptimeRequestHandler {
|
|||||||
protocol = "grpc",
|
protocol = "grpc",
|
||||||
request_type = get_request_type(&request)
|
request_type = get_request_type(&request)
|
||||||
);
|
);
|
||||||
|
let flight_compression = self.flight_compression;
|
||||||
async {
|
async {
|
||||||
let output = self.handle_request(request, hints).await?;
|
let output = self.handle_request(request, hints).await?;
|
||||||
let stream = to_flight_data_stream(output, TracingContext::from_current_span());
|
let stream = to_flight_data_stream(
|
||||||
|
output,
|
||||||
|
TracingContext::from_current_span(),
|
||||||
|
flight_compression,
|
||||||
|
);
|
||||||
Ok(Response::new(stream))
|
Ok(Response::new(stream))
|
||||||
}
|
}
|
||||||
.trace(span)
|
.trace(span)
|
||||||
@@ -365,14 +370,16 @@ impl Stream for PutRecordBatchRequestStream {
|
|||||||
fn to_flight_data_stream(
|
fn to_flight_data_stream(
|
||||||
output: Output,
|
output: Output,
|
||||||
tracing_context: TracingContext,
|
tracing_context: TracingContext,
|
||||||
|
flight_compression: FlightCompression,
|
||||||
) -> TonicStream<FlightData> {
|
) -> TonicStream<FlightData> {
|
||||||
match output.data {
|
match output.data {
|
||||||
OutputData::Stream(stream) => {
|
OutputData::Stream(stream) => {
|
||||||
let stream = FlightRecordBatchStream::new(stream, tracing_context);
|
let stream = FlightRecordBatchStream::new(stream, tracing_context, flight_compression);
|
||||||
Box::pin(stream) as _
|
Box::pin(stream) as _
|
||||||
}
|
}
|
||||||
OutputData::RecordBatches(x) => {
|
OutputData::RecordBatches(x) => {
|
||||||
let stream = FlightRecordBatchStream::new(x.as_stream(), tracing_context);
|
let stream =
|
||||||
|
FlightRecordBatchStream::new(x.as_stream(), tracing_context, flight_compression);
|
||||||
Box::pin(stream) as _
|
Box::pin(stream) as _
|
||||||
}
|
}
|
||||||
OutputData::AffectedRows(rows) => {
|
OutputData::AffectedRows(rows) => {
|
||||||
|
|||||||
@@ -30,6 +30,7 @@ use tokio::task::JoinHandle;
|
|||||||
|
|
||||||
use crate::error;
|
use crate::error;
|
||||||
use crate::grpc::flight::TonicResult;
|
use crate::grpc::flight::TonicResult;
|
||||||
|
use crate::grpc::FlightCompression;
|
||||||
|
|
||||||
#[pin_project(PinnedDrop)]
|
#[pin_project(PinnedDrop)]
|
||||||
pub struct FlightRecordBatchStream {
|
pub struct FlightRecordBatchStream {
|
||||||
@@ -41,18 +42,27 @@ pub struct FlightRecordBatchStream {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl FlightRecordBatchStream {
|
impl FlightRecordBatchStream {
|
||||||
pub fn new(recordbatches: SendableRecordBatchStream, tracing_context: TracingContext) -> Self {
|
pub fn new(
|
||||||
|
recordbatches: SendableRecordBatchStream,
|
||||||
|
tracing_context: TracingContext,
|
||||||
|
compression: FlightCompression,
|
||||||
|
) -> Self {
|
||||||
let (tx, rx) = mpsc::channel::<TonicResult<FlightMessage>>(1);
|
let (tx, rx) = mpsc::channel::<TonicResult<FlightMessage>>(1);
|
||||||
let join_handle = common_runtime::spawn_global(async move {
|
let join_handle = common_runtime::spawn_global(async move {
|
||||||
Self::flight_data_stream(recordbatches, tx)
|
Self::flight_data_stream(recordbatches, tx)
|
||||||
.trace(tracing_context.attach(info_span!("flight_data_stream")))
|
.trace(tracing_context.attach(info_span!("flight_data_stream")))
|
||||||
.await
|
.await
|
||||||
});
|
});
|
||||||
|
let encoder = if compression.arrow_compression() {
|
||||||
|
FlightEncoder::default()
|
||||||
|
} else {
|
||||||
|
FlightEncoder::with_compression_disabled()
|
||||||
|
};
|
||||||
Self {
|
Self {
|
||||||
rx,
|
rx,
|
||||||
join_handle,
|
join_handle,
|
||||||
done: false,
|
done: false,
|
||||||
encoder: FlightEncoder::with_compression_disabled(),
|
encoder,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -161,7 +171,11 @@ mod test {
|
|||||||
let recordbatches = RecordBatches::try_new(schema.clone(), vec![recordbatch.clone()])
|
let recordbatches = RecordBatches::try_new(schema.clone(), vec![recordbatch.clone()])
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.as_stream();
|
.as_stream();
|
||||||
let mut stream = FlightRecordBatchStream::new(recordbatches, TracingContext::default());
|
let mut stream = FlightRecordBatchStream::new(
|
||||||
|
recordbatches,
|
||||||
|
TracingContext::default(),
|
||||||
|
FlightCompression::default(),
|
||||||
|
);
|
||||||
|
|
||||||
let mut raw_data = Vec::with_capacity(2);
|
let mut raw_data = Vec::with_capacity(2);
|
||||||
raw_data.push(stream.next().await.unwrap().unwrap());
|
raw_data.push(stream.next().await.unwrap().unwrap());
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ use futures_util::StreamExt;
|
|||||||
use session::context::{QueryContext, QueryContextBuilder, QueryContextRef};
|
use session::context::{QueryContext, QueryContextBuilder, QueryContextRef};
|
||||||
use session::hints::READ_PREFERENCE_HINT;
|
use session::hints::READ_PREFERENCE_HINT;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use table::metadata::TableId;
|
use table::TableRef;
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
use crate::error::Error::UnsupportedAuthScheme;
|
use crate::error::Error::UnsupportedAuthScheme;
|
||||||
@@ -49,7 +49,7 @@ use crate::error::{
|
|||||||
JoinTaskSnafu, NotFoundAuthHeaderSnafu, Result, UnknownHintSnafu,
|
JoinTaskSnafu, NotFoundAuthHeaderSnafu, Result, UnknownHintSnafu,
|
||||||
};
|
};
|
||||||
use crate::grpc::flight::{PutRecordBatchRequest, PutRecordBatchRequestStream};
|
use crate::grpc::flight::{PutRecordBatchRequest, PutRecordBatchRequestStream};
|
||||||
use crate::grpc::TonicResult;
|
use crate::grpc::{FlightCompression, TonicResult};
|
||||||
use crate::metrics;
|
use crate::metrics;
|
||||||
use crate::metrics::{METRIC_AUTH_FAILURE, METRIC_SERVER_GRPC_DB_REQUEST_TIMER};
|
use crate::metrics::{METRIC_AUTH_FAILURE, METRIC_SERVER_GRPC_DB_REQUEST_TIMER};
|
||||||
use crate::query_handler::grpc::ServerGrpcQueryHandlerRef;
|
use crate::query_handler::grpc::ServerGrpcQueryHandlerRef;
|
||||||
@@ -59,6 +59,7 @@ pub struct GreptimeRequestHandler {
|
|||||||
handler: ServerGrpcQueryHandlerRef,
|
handler: ServerGrpcQueryHandlerRef,
|
||||||
user_provider: Option<UserProviderRef>,
|
user_provider: Option<UserProviderRef>,
|
||||||
runtime: Option<Runtime>,
|
runtime: Option<Runtime>,
|
||||||
|
pub(crate) flight_compression: FlightCompression,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GreptimeRequestHandler {
|
impl GreptimeRequestHandler {
|
||||||
@@ -66,11 +67,13 @@ impl GreptimeRequestHandler {
|
|||||||
handler: ServerGrpcQueryHandlerRef,
|
handler: ServerGrpcQueryHandlerRef,
|
||||||
user_provider: Option<UserProviderRef>,
|
user_provider: Option<UserProviderRef>,
|
||||||
runtime: Option<Runtime>,
|
runtime: Option<Runtime>,
|
||||||
|
flight_compression: FlightCompression,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
handler,
|
handler,
|
||||||
user_provider,
|
user_provider,
|
||||||
runtime,
|
runtime,
|
||||||
|
flight_compression,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -146,8 +149,8 @@ impl GreptimeRequestHandler {
|
|||||||
.clone()
|
.clone()
|
||||||
.unwrap_or_else(common_runtime::global_runtime);
|
.unwrap_or_else(common_runtime::global_runtime);
|
||||||
runtime.spawn(async move {
|
runtime.spawn(async move {
|
||||||
// Cached table id
|
// Cached table ref
|
||||||
let mut table_id: Option<TableId> = None;
|
let mut table_ref: Option<TableRef> = None;
|
||||||
|
|
||||||
let mut decoder = FlightDecoder::default();
|
let mut decoder = FlightDecoder::default();
|
||||||
while let Some(request) = stream.next().await {
|
while let Some(request) = stream.next().await {
|
||||||
@@ -166,7 +169,7 @@ impl GreptimeRequestHandler {
|
|||||||
|
|
||||||
let timer = metrics::GRPC_BULK_INSERT_ELAPSED.start_timer();
|
let timer = metrics::GRPC_BULK_INSERT_ELAPSED.start_timer();
|
||||||
let result = handler
|
let result = handler
|
||||||
.put_record_batch(&table_name, &mut table_id, &mut decoder, data)
|
.put_record_batch(&table_name, &mut table_ref, &mut decoder, data)
|
||||||
.await
|
.await
|
||||||
.inspect_err(|e| error!(e; "Failed to handle flight record batches"));
|
.inspect_err(|e| error!(e; "Failed to handle flight record batches"));
|
||||||
timer.observe_duration();
|
timer.observe_duration();
|
||||||
|
|||||||
@@ -23,8 +23,8 @@ use common_grpc::flight::FlightDecoder;
|
|||||||
use common_query::Output;
|
use common_query::Output;
|
||||||
use session::context::QueryContextRef;
|
use session::context::QueryContextRef;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use table::metadata::TableId;
|
|
||||||
use table::table_name::TableName;
|
use table::table_name::TableName;
|
||||||
|
use table::TableRef;
|
||||||
|
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
|
|
||||||
@@ -45,8 +45,8 @@ pub trait GrpcQueryHandler {
|
|||||||
|
|
||||||
async fn put_record_batch(
|
async fn put_record_batch(
|
||||||
&self,
|
&self,
|
||||||
table: &TableName,
|
table_name: &TableName,
|
||||||
table_id: &mut Option<TableId>,
|
table_ref: &mut Option<TableRef>,
|
||||||
decoder: &mut FlightDecoder,
|
decoder: &mut FlightDecoder,
|
||||||
flight_data: FlightData,
|
flight_data: FlightData,
|
||||||
) -> std::result::Result<AffectedRows, Self::Error>;
|
) -> std::result::Result<AffectedRows, Self::Error>;
|
||||||
@@ -77,13 +77,13 @@ where
|
|||||||
|
|
||||||
async fn put_record_batch(
|
async fn put_record_batch(
|
||||||
&self,
|
&self,
|
||||||
table: &TableName,
|
table_name: &TableName,
|
||||||
table_id: &mut Option<TableId>,
|
table_ref: &mut Option<TableRef>,
|
||||||
decoder: &mut FlightDecoder,
|
decoder: &mut FlightDecoder,
|
||||||
data: FlightData,
|
data: FlightData,
|
||||||
) -> Result<AffectedRows> {
|
) -> Result<AffectedRows> {
|
||||||
self.0
|
self.0
|
||||||
.put_record_batch(table, table_id, decoder, data)
|
.put_record_batch(table_name, table_ref, decoder, data)
|
||||||
.await
|
.await
|
||||||
.map_err(BoxedError::new)
|
.map_err(BoxedError::new)
|
||||||
.context(error::ExecuteGrpcRequestSnafu)
|
.context(error::ExecuteGrpcRequestSnafu)
|
||||||
|
|||||||
@@ -34,7 +34,6 @@ use servers::query_handler::sql::{ServerSqlQueryHandlerRef, SqlQueryHandler};
|
|||||||
use session::context::QueryContextRef;
|
use session::context::QueryContextRef;
|
||||||
use snafu::ensure;
|
use snafu::ensure;
|
||||||
use sql::statements::statement::Statement;
|
use sql::statements::statement::Statement;
|
||||||
use table::metadata::TableId;
|
|
||||||
use table::table_name::TableName;
|
use table::table_name::TableName;
|
||||||
use table::TableRef;
|
use table::TableRef;
|
||||||
|
|
||||||
@@ -160,15 +159,11 @@ impl GrpcQueryHandler for DummyInstance {
|
|||||||
|
|
||||||
async fn put_record_batch(
|
async fn put_record_batch(
|
||||||
&self,
|
&self,
|
||||||
table: &TableName,
|
_table_name: &TableName,
|
||||||
table_id: &mut Option<TableId>,
|
_table_ref: &mut Option<TableRef>,
|
||||||
decoder: &mut FlightDecoder,
|
_decoder: &mut FlightDecoder,
|
||||||
data: FlightData,
|
_data: FlightData,
|
||||||
) -> std::result::Result<AffectedRows, Self::Error> {
|
) -> std::result::Result<AffectedRows, Self::Error> {
|
||||||
let _ = table;
|
|
||||||
let _ = data;
|
|
||||||
let _ = table_id;
|
|
||||||
let _ = decoder;
|
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ mod test {
|
|||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use servers::grpc::builder::GrpcServerBuilder;
|
use servers::grpc::builder::GrpcServerBuilder;
|
||||||
use servers::grpc::greptime_handler::GreptimeRequestHandler;
|
use servers::grpc::greptime_handler::GreptimeRequestHandler;
|
||||||
use servers::grpc::GrpcServerConfig;
|
use servers::grpc::{FlightCompression, GrpcServerConfig};
|
||||||
use servers::query_handler::grpc::ServerGrpcQueryHandlerAdapter;
|
use servers::query_handler::grpc::ServerGrpcQueryHandlerAdapter;
|
||||||
use servers::server::Server;
|
use servers::server::Server;
|
||||||
|
|
||||||
@@ -94,6 +94,7 @@ mod test {
|
|||||||
)
|
)
|
||||||
.ok(),
|
.ok(),
|
||||||
Some(runtime.clone()),
|
Some(runtime.clone()),
|
||||||
|
FlightCompression::default(),
|
||||||
);
|
);
|
||||||
let mut grpc_server = GrpcServerBuilder::new(GrpcServerConfig::default(), runtime)
|
let mut grpc_server = GrpcServerBuilder::new(GrpcServerConfig::default(), runtime)
|
||||||
.flight_handler(Arc::new(greptime_request_handler))
|
.flight_handler(Arc::new(greptime_request_handler))
|
||||||
@@ -139,8 +140,7 @@ mod test {
|
|||||||
let schema = record_batches[0].schema.arrow_schema().clone();
|
let schema = record_batches[0].schema.arrow_schema().clone();
|
||||||
|
|
||||||
let stream = futures::stream::once(async move {
|
let stream = futures::stream::once(async move {
|
||||||
let mut schema_data =
|
let mut schema_data = FlightEncoder::default().encode(FlightMessage::Schema(schema));
|
||||||
FlightEncoder::with_compression_disabled().encode(FlightMessage::Schema(schema));
|
|
||||||
let metadata = DoPutMetadata::new(0);
|
let metadata = DoPutMetadata::new(0);
|
||||||
schema_data.app_metadata = serde_json::to_vec(&metadata).unwrap().into();
|
schema_data.app_metadata = serde_json::to_vec(&metadata).unwrap().into();
|
||||||
// first message in "DoPut" stream should carry table name in flight descriptor
|
// first message in "DoPut" stream should carry table name in flight descriptor
|
||||||
@@ -155,7 +155,7 @@ mod test {
|
|||||||
tokio_stream::iter(record_batches)
|
tokio_stream::iter(record_batches)
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.map(|(i, x)| {
|
.map(|(i, x)| {
|
||||||
let mut encoder = FlightEncoder::with_compression_disabled();
|
let mut encoder = FlightEncoder::default();
|
||||||
let message = FlightMessage::RecordBatch(x.into_df_record_batch());
|
let message = FlightMessage::RecordBatch(x.into_df_record_batch());
|
||||||
let mut data = encoder.encode(message);
|
let mut data = encoder.encode(message);
|
||||||
let metadata = DoPutMetadata::new((i + 1) as i64);
|
let metadata = DoPutMetadata::new((i + 1) as i64);
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ use object_store::test_util::TempFolder;
|
|||||||
use object_store::ObjectStore;
|
use object_store::ObjectStore;
|
||||||
use servers::grpc::builder::GrpcServerBuilder;
|
use servers::grpc::builder::GrpcServerBuilder;
|
||||||
use servers::grpc::greptime_handler::GreptimeRequestHandler;
|
use servers::grpc::greptime_handler::GreptimeRequestHandler;
|
||||||
use servers::grpc::{GrpcOptions, GrpcServer, GrpcServerConfig};
|
use servers::grpc::{FlightCompression, GrpcOptions, GrpcServer, GrpcServerConfig};
|
||||||
use servers::http::{HttpOptions, HttpServerBuilder, PromValidationMode};
|
use servers::http::{HttpOptions, HttpServerBuilder, PromValidationMode};
|
||||||
use servers::metrics_handler::MetricsHandler;
|
use servers::metrics_handler::MetricsHandler;
|
||||||
use servers::mysql::server::{MysqlServer, MysqlSpawnConfig, MysqlSpawnRef};
|
use servers::mysql::server::{MysqlServer, MysqlSpawnConfig, MysqlSpawnRef};
|
||||||
@@ -585,6 +585,7 @@ pub async fn setup_grpc_server_with(
|
|||||||
ServerGrpcQueryHandlerAdapter::arc(fe_instance_ref.clone()),
|
ServerGrpcQueryHandlerAdapter::arc(fe_instance_ref.clone()),
|
||||||
user_provider.clone(),
|
user_provider.clone(),
|
||||||
Some(runtime.clone()),
|
Some(runtime.clone()),
|
||||||
|
FlightCompression::default(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let flight_handler = Arc::new(greptime_request_handler.clone());
|
let flight_handler = Arc::new(greptime_request_handler.clone());
|
||||||
|
|||||||
@@ -1025,6 +1025,7 @@ bind_addr = "127.0.0.1:4001"
|
|||||||
server_addr = "127.0.0.1:4001"
|
server_addr = "127.0.0.1:4001"
|
||||||
max_recv_message_size = "512MiB"
|
max_recv_message_size = "512MiB"
|
||||||
max_send_message_size = "512MiB"
|
max_send_message_size = "512MiB"
|
||||||
|
flight_compression = "arrow_ipc"
|
||||||
runtime_size = 8
|
runtime_size = 8
|
||||||
|
|
||||||
[grpc.tls]
|
[grpc.tls]
|
||||||
|
|||||||
Reference in New Issue
Block a user