mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-22 22:20:02 +00:00
Compare commits
26 Commits
v0.12.0-ni
...
wyze_with_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7f7b974e8a | ||
|
|
4875ace0d0 | ||
|
|
a847d96649 | ||
|
|
23a0a54e18 | ||
|
|
78eb8b53f6 | ||
|
|
2455f39e8e | ||
|
|
7fe0074202 | ||
|
|
e16bc203d0 | ||
|
|
9a3c26bb0a | ||
|
|
e1ff398c32 | ||
|
|
780e3000de | ||
|
|
2b5ddf8427 | ||
|
|
904d560175 | ||
|
|
765d1277ee | ||
|
|
ccf42a9d97 | ||
|
|
71e2fb895f | ||
|
|
c9671fd669 | ||
|
|
b5efc75aab | ||
|
|
c1d18d9980 | ||
|
|
5d9faaaf39 | ||
|
|
538875abee | ||
|
|
5ed09c4584 | ||
|
|
3f6a41eac5 | ||
|
|
ff0dcf12c5 | ||
|
|
5b1fca825a | ||
|
|
7bd108e2be |
65
Cargo.lock
generated
65
Cargo.lock
generated
@@ -432,7 +432,7 @@ dependencies = [
|
||||
"arrow-schema",
|
||||
"chrono",
|
||||
"half",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"lexical-core",
|
||||
"num",
|
||||
"serde",
|
||||
@@ -1475,7 +1475,7 @@ version = "0.13.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6026d8cd82ada8bbcfe337805dd1eb6afdc9e80fa4d57e977b3a36315e0c5525"
|
||||
dependencies = [
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"lazy_static",
|
||||
"num-traits",
|
||||
"regex",
|
||||
@@ -2009,6 +2009,7 @@ dependencies = [
|
||||
name = "common-function"
|
||||
version = "0.12.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
"approx 0.5.1",
|
||||
"arc-swap",
|
||||
@@ -2031,6 +2032,7 @@ dependencies = [
|
||||
"geo-types",
|
||||
"geohash",
|
||||
"h3o",
|
||||
"hyperloglogplus",
|
||||
"jsonb",
|
||||
"nalgebra 0.33.2",
|
||||
"num",
|
||||
@@ -2974,7 +2976,7 @@ dependencies = [
|
||||
"chrono",
|
||||
"half",
|
||||
"hashbrown 0.14.5",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"libc",
|
||||
"object_store",
|
||||
"parquet",
|
||||
@@ -3034,7 +3036,7 @@ dependencies = [
|
||||
"datafusion-functions-aggregate-common",
|
||||
"datafusion-functions-window-common",
|
||||
"datafusion-physical-expr-common",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"paste",
|
||||
"recursive",
|
||||
"serde_json",
|
||||
@@ -3156,7 +3158,7 @@ dependencies = [
|
||||
"datafusion-physical-expr-common",
|
||||
"datafusion-physical-plan",
|
||||
"half",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"log",
|
||||
"parking_lot 0.12.3",
|
||||
"paste",
|
||||
@@ -3207,7 +3209,7 @@ dependencies = [
|
||||
"datafusion-common",
|
||||
"datafusion-expr",
|
||||
"datafusion-physical-expr",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"itertools 0.13.0",
|
||||
"log",
|
||||
"recursive",
|
||||
@@ -3232,7 +3234,7 @@ dependencies = [
|
||||
"datafusion-physical-expr-common",
|
||||
"half",
|
||||
"hashbrown 0.14.5",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"itertools 0.13.0",
|
||||
"log",
|
||||
"paste",
|
||||
@@ -3291,7 +3293,7 @@ dependencies = [
|
||||
"futures",
|
||||
"half",
|
||||
"hashbrown 0.14.5",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"itertools 0.13.0",
|
||||
"log",
|
||||
"once_cell",
|
||||
@@ -3311,7 +3313,7 @@ dependencies = [
|
||||
"arrow-schema",
|
||||
"datafusion-common",
|
||||
"datafusion-expr",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"log",
|
||||
"recursive",
|
||||
"regex",
|
||||
@@ -4163,6 +4165,7 @@ dependencies = [
|
||||
"bytes",
|
||||
"cache",
|
||||
"catalog",
|
||||
"chrono",
|
||||
"client",
|
||||
"common-base",
|
||||
"common-catalog",
|
||||
@@ -4697,7 +4700,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=a25adc8a01340231121646d8f0a29d0e92f45461#a25adc8a01340231121646d8f0a29d0e92f45461"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=072ce580502e015df1a6b03a185b60309a7c2a7a#072ce580502e015df1a6b03a185b60309a7c2a7a"
|
||||
dependencies = [
|
||||
"prost 0.13.3",
|
||||
"serde",
|
||||
@@ -4720,7 +4723,7 @@ dependencies = [
|
||||
"futures-sink",
|
||||
"futures-util",
|
||||
"http 0.2.12",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"slab",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -4739,7 +4742,7 @@ dependencies = [
|
||||
"futures-core",
|
||||
"futures-sink",
|
||||
"http 1.1.0",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"slab",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -5289,6 +5292,15 @@ dependencies = [
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hyperloglogplus"
|
||||
version = "0.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "621debdf94dcac33e50475fdd76d34d5ea9c0362a834b9db08c3024696c1fbe3"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "i_float"
|
||||
version = "1.3.1"
|
||||
@@ -5577,9 +5589,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "indexmap"
|
||||
version = "2.6.0"
|
||||
version = "2.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da"
|
||||
checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652"
|
||||
dependencies = [
|
||||
"equivalent",
|
||||
"hashbrown 0.15.2",
|
||||
@@ -5593,7 +5605,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "232929e1d75fe899576a3d5c7416ad0d88dbfbb3c3d6aa00873a7408a50ddb88"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"is-terminal",
|
||||
"itoa",
|
||||
"log",
|
||||
@@ -5940,7 +5952,7 @@ version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4ee7893dab2e44ae5f9d0173f26ff4aa327c10b01b06a72b52dd9405b628640d"
|
||||
dependencies = [
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6423,7 +6435,7 @@ dependencies = [
|
||||
"cactus",
|
||||
"cfgrammar",
|
||||
"filetime",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"lazy_static",
|
||||
"lrtable",
|
||||
"num-traits",
|
||||
@@ -7664,7 +7676,7 @@ checksum = "1e32339a5dc40459130b3bd269e9892439f55b33e772d2a9d402a789baaf4e8a"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"futures-sink",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"js-sys",
|
||||
"once_cell",
|
||||
"pin-project-lite",
|
||||
@@ -8236,7 +8248,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db"
|
||||
dependencies = [
|
||||
"fixedbitset",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -10327,7 +10339,7 @@ version = "1.0.137"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "930cfb6e6abf99298aaad7d29abbef7a9999a9a8806a40088f55f0dcec03146b"
|
||||
dependencies = [
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"itoa",
|
||||
"memchr",
|
||||
"ryu",
|
||||
@@ -10398,7 +10410,7 @@ dependencies = [
|
||||
"chrono",
|
||||
"hex",
|
||||
"indexmap 1.9.3",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"serde_json",
|
||||
@@ -10424,7 +10436,7 @@ version = "0.9.34+deprecated"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47"
|
||||
dependencies = [
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"itoa",
|
||||
"ryu",
|
||||
"serde",
|
||||
@@ -10485,6 +10497,7 @@ dependencies = [
|
||||
"humantime",
|
||||
"humantime-serde",
|
||||
"hyper 1.4.1",
|
||||
"indexmap 2.7.1",
|
||||
"influxdb_line_protocol",
|
||||
"itertools 0.10.5",
|
||||
"json5",
|
||||
@@ -11026,7 +11039,7 @@ dependencies = [
|
||||
"futures-util",
|
||||
"hashbrown 0.15.2",
|
||||
"hashlink",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"log",
|
||||
"memchr",
|
||||
"once_cell",
|
||||
@@ -12322,7 +12335,7 @@ version = "0.19.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421"
|
||||
dependencies = [
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"toml_datetime",
|
||||
"winnow 0.5.40",
|
||||
]
|
||||
@@ -12333,7 +12346,7 @@ version = "0.22.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5"
|
||||
dependencies = [
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
@@ -12471,7 +12484,7 @@ dependencies = [
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"hdrhistogram",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"pin-project-lite",
|
||||
"slab",
|
||||
"sync_wrapper 1.0.1",
|
||||
|
||||
@@ -129,7 +129,7 @@ etcd-client = "0.14"
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "a25adc8a01340231121646d8f0a29d0e92f45461" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "072ce580502e015df1a6b03a185b60309a7c2a7a" }
|
||||
hex = "0.4"
|
||||
http = "1"
|
||||
humantime = "2.1"
|
||||
|
||||
@@ -319,6 +319,7 @@
|
||||
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
||||
| `node_max_idle_time` | String | `24hours` | Max allowed idle time before removing node info from metasrv memory. |
|
||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
|
||||
@@ -50,6 +50,9 @@ use_memory_store = false
|
||||
## - Using shared storage (e.g., s3).
|
||||
enable_region_failover = false
|
||||
|
||||
## Max allowed idle time before removing node info from metasrv memory.
|
||||
node_max_idle_time = "24hours"
|
||||
|
||||
## Whether to enable greptimedb telemetry. Enabled by default.
|
||||
#+ enable_telemetry = true
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
|
||||
mod client;
|
||||
pub mod client_manager;
|
||||
#[cfg(feature = "testing")]
|
||||
mod database;
|
||||
pub mod error;
|
||||
pub mod flow;
|
||||
@@ -34,7 +33,6 @@ pub use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
|
||||
use snafu::OptionExt;
|
||||
|
||||
pub use self::client::Client;
|
||||
#[cfg(feature = "testing")]
|
||||
pub use self::database::Database;
|
||||
pub use self::error::{Error, Result};
|
||||
use crate::error::{IllegalDatabaseResponseSnafu, ServerSnafu};
|
||||
|
||||
@@ -32,7 +32,7 @@ use common_meta::key::TableMetadataManager;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::TracingOptions;
|
||||
use common_version::{short_version, version};
|
||||
use flow::{FlownodeBuilder, FlownodeInstance, FrontendInvoker};
|
||||
use flow::{FlownodeBuilder, FlownodeInstance, FrontendClient, FrontendInvoker};
|
||||
use meta_client::{MetaClientOptions, MetaClientType};
|
||||
use servers::Mode;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
@@ -317,6 +317,8 @@ impl StartCommand {
|
||||
Arc::new(executor),
|
||||
);
|
||||
|
||||
let frontend_client = FrontendClient::from_meta_client(meta_client.clone());
|
||||
|
||||
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(cached_meta_backend.clone()));
|
||||
let flownode_builder = FlownodeBuilder::new(
|
||||
opts,
|
||||
@@ -324,6 +326,7 @@ impl StartCommand {
|
||||
table_metadata_manager,
|
||||
catalog_manager.clone(),
|
||||
flow_metadata_manager,
|
||||
Arc::new(frontend_client),
|
||||
)
|
||||
.with_heartbeat_task(heartbeat_task);
|
||||
|
||||
|
||||
@@ -54,7 +54,10 @@ use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, Sto
|
||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||
use datanode::region_server::RegionServer;
|
||||
use file_engine::config::EngineConfig as FileEngineConfig;
|
||||
use flow::{FlowConfig, FlowWorkerManager, FlownodeBuilder, FlownodeOptions, FrontendInvoker};
|
||||
use flow::{
|
||||
FlowConfig, FlowWorkerManager, FlownodeBuilder, FlownodeOptions, FrontendClient,
|
||||
FrontendInvoker,
|
||||
};
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::instance::builder::FrontendBuilder;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
|
||||
@@ -533,12 +536,16 @@ impl StartCommand {
|
||||
flow: opts.flow.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let fe_server_addr = fe_opts.grpc.bind_addr.clone();
|
||||
let frontend_client = FrontendClient::from_static_grpc_addr(fe_server_addr);
|
||||
let flow_builder = FlownodeBuilder::new(
|
||||
flownode_options,
|
||||
plugins.clone(),
|
||||
table_metadata_manager.clone(),
|
||||
catalog_manager.clone(),
|
||||
flow_metadata_manager.clone(),
|
||||
Arc::new(frontend_client),
|
||||
);
|
||||
let flownode = Arc::new(
|
||||
flow_builder
|
||||
|
||||
@@ -12,6 +12,7 @@ default = ["geo"]
|
||||
geo = ["geohash", "h3o", "s2", "wkt", "geo-types", "dep:geo"]
|
||||
|
||||
[dependencies]
|
||||
ahash = "0.8"
|
||||
api.workspace = true
|
||||
arc-swap = "1.0"
|
||||
async-trait.workspace = true
|
||||
@@ -33,6 +34,7 @@ geo = { version = "0.29", optional = true }
|
||||
geo-types = { version = "0.7", optional = true }
|
||||
geohash = { version = "0.13", optional = true }
|
||||
h3o = { version = "0.6", optional = true }
|
||||
hyperloglogplus = "0.4"
|
||||
jsonb.workspace = true
|
||||
nalgebra.workspace = true
|
||||
num = "0.4"
|
||||
|
||||
@@ -12,6 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod hll;
|
||||
mod uddsketch_state;
|
||||
|
||||
pub(crate) use hll::HllStateType;
|
||||
pub use hll::{HllState, HLL_MERGE_NAME, HLL_NAME};
|
||||
pub use uddsketch_state::{UddSketchState, UDDSKETCH_STATE_NAME};
|
||||
|
||||
319
src/common/function/src/aggr/hll.rs
Normal file
319
src/common/function/src/aggr/hll.rs
Normal file
@@ -0,0 +1,319 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::prelude::*;
|
||||
use common_telemetry::trace;
|
||||
use datafusion::arrow::array::ArrayRef;
|
||||
use datafusion::common::cast::{as_binary_array, as_string_array};
|
||||
use datafusion::common::not_impl_err;
|
||||
use datafusion::error::{DataFusionError, Result as DfResult};
|
||||
use datafusion::logical_expr::function::AccumulatorArgs;
|
||||
use datafusion::logical_expr::{Accumulator as DfAccumulator, AggregateUDF};
|
||||
use datafusion::prelude::create_udaf;
|
||||
use datatypes::arrow::datatypes::DataType;
|
||||
use hyperloglogplus::{HyperLogLog, HyperLogLogPlus};
|
||||
|
||||
use crate::utils::FixedRandomState;
|
||||
|
||||
pub const HLL_NAME: &str = "hll";
|
||||
pub const HLL_MERGE_NAME: &str = "hll_merge";
|
||||
|
||||
const DEFAULT_PRECISION: u8 = 14;
|
||||
|
||||
pub(crate) type HllStateType = HyperLogLogPlus<String, FixedRandomState>;
|
||||
|
||||
pub struct HllState {
|
||||
hll: HllStateType,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for HllState {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "HllState<Opaque>")
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for HllState {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl HllState {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
// Safety: the DEFAULT_PRECISION is fixed and valid
|
||||
hll: HllStateType::new(DEFAULT_PRECISION, FixedRandomState::new()).unwrap(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a UDF for the `hll` function.
|
||||
///
|
||||
/// `hll` accepts a string column and aggregates the
|
||||
/// values into a HyperLogLog state.
|
||||
pub fn state_udf_impl() -> AggregateUDF {
|
||||
create_udaf(
|
||||
HLL_NAME,
|
||||
vec![DataType::Utf8],
|
||||
Arc::new(DataType::Binary),
|
||||
Volatility::Immutable,
|
||||
Arc::new(Self::create_accumulator),
|
||||
Arc::new(vec![DataType::Binary]),
|
||||
)
|
||||
}
|
||||
|
||||
/// Create a UDF for the `hll_merge` function.
|
||||
///
|
||||
/// `hll_merge` accepts a binary column of states generated by `hll`
|
||||
/// and merges them into a single state.
|
||||
pub fn merge_udf_impl() -> AggregateUDF {
|
||||
create_udaf(
|
||||
HLL_MERGE_NAME,
|
||||
vec![DataType::Binary],
|
||||
Arc::new(DataType::Binary),
|
||||
Volatility::Immutable,
|
||||
Arc::new(Self::create_merge_accumulator),
|
||||
Arc::new(vec![DataType::Binary]),
|
||||
)
|
||||
}
|
||||
|
||||
fn update(&mut self, value: &str) {
|
||||
self.hll.insert(value);
|
||||
}
|
||||
|
||||
fn merge(&mut self, raw: &[u8]) {
|
||||
if let Ok(serialized) = bincode::deserialize::<HllStateType>(raw) {
|
||||
if let Ok(()) = self.hll.merge(&serialized) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
trace!("Warning: Failed to merge HyperLogLog from {:?}", raw);
|
||||
}
|
||||
|
||||
fn create_accumulator(acc_args: AccumulatorArgs) -> DfResult<Box<dyn DfAccumulator>> {
|
||||
let data_type = acc_args.exprs[0].data_type(acc_args.schema)?;
|
||||
|
||||
match data_type {
|
||||
DataType::Utf8 => Ok(Box::new(HllState::new())),
|
||||
other => not_impl_err!("{HLL_NAME} does not support data type: {other}"),
|
||||
}
|
||||
}
|
||||
|
||||
fn create_merge_accumulator(acc_args: AccumulatorArgs) -> DfResult<Box<dyn DfAccumulator>> {
|
||||
let data_type = acc_args.exprs[0].data_type(acc_args.schema)?;
|
||||
|
||||
match data_type {
|
||||
DataType::Binary => Ok(Box::new(HllState::new())),
|
||||
other => not_impl_err!("{HLL_MERGE_NAME} does not support data type: {other}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DfAccumulator for HllState {
|
||||
fn update_batch(&mut self, values: &[ArrayRef]) -> DfResult<()> {
|
||||
let array = &values[0];
|
||||
|
||||
match array.data_type() {
|
||||
DataType::Utf8 => {
|
||||
let string_array = as_string_array(array)?;
|
||||
for value in string_array.iter().flatten() {
|
||||
self.update(value);
|
||||
}
|
||||
}
|
||||
DataType::Binary => {
|
||||
let binary_array = as_binary_array(array)?;
|
||||
for v in binary_array.iter().flatten() {
|
||||
self.merge(v);
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return not_impl_err!(
|
||||
"HLL functions do not support data type: {}",
|
||||
array.data_type()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn evaluate(&mut self) -> DfResult<ScalarValue> {
|
||||
Ok(ScalarValue::Binary(Some(
|
||||
bincode::serialize(&self.hll).map_err(|e| {
|
||||
DataFusionError::Internal(format!("Failed to serialize HyperLogLog: {}", e))
|
||||
})?,
|
||||
)))
|
||||
}
|
||||
|
||||
fn size(&self) -> usize {
|
||||
std::mem::size_of_val(&self.hll)
|
||||
}
|
||||
|
||||
fn state(&mut self) -> DfResult<Vec<ScalarValue>> {
|
||||
Ok(vec![ScalarValue::Binary(Some(
|
||||
bincode::serialize(&self.hll).map_err(|e| {
|
||||
DataFusionError::Internal(format!("Failed to serialize HyperLogLog: {}", e))
|
||||
})?,
|
||||
))])
|
||||
}
|
||||
|
||||
fn merge_batch(&mut self, states: &[ArrayRef]) -> DfResult<()> {
|
||||
let array = &states[0];
|
||||
let binary_array = as_binary_array(array)?;
|
||||
for v in binary_array.iter().flatten() {
|
||||
self.merge(v);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use datafusion::arrow::array::{BinaryArray, StringArray};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_hll_basic() {
|
||||
let mut state = HllState::new();
|
||||
state.update("1");
|
||||
state.update("2");
|
||||
state.update("3");
|
||||
|
||||
let result = state.evaluate().unwrap();
|
||||
if let ScalarValue::Binary(Some(bytes)) = result {
|
||||
let mut hll: HllStateType = bincode::deserialize(&bytes).unwrap();
|
||||
assert_eq!(hll.count().trunc() as u32, 3);
|
||||
} else {
|
||||
panic!("Expected binary scalar value");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hll_roundtrip() {
|
||||
let mut state = HllState::new();
|
||||
state.update("1");
|
||||
state.update("2");
|
||||
|
||||
// Serialize
|
||||
let serialized = state.evaluate().unwrap();
|
||||
|
||||
// Create new state and merge the serialized data
|
||||
let mut new_state = HllState::new();
|
||||
if let ScalarValue::Binary(Some(bytes)) = &serialized {
|
||||
new_state.merge(bytes);
|
||||
|
||||
// Verify the merged state matches original
|
||||
let result = new_state.evaluate().unwrap();
|
||||
if let ScalarValue::Binary(Some(new_bytes)) = result {
|
||||
let mut original: HllStateType = bincode::deserialize(bytes).unwrap();
|
||||
let mut merged: HllStateType = bincode::deserialize(&new_bytes).unwrap();
|
||||
assert_eq!(original.count(), merged.count());
|
||||
} else {
|
||||
panic!("Expected binary scalar value");
|
||||
}
|
||||
} else {
|
||||
panic!("Expected binary scalar value");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hll_batch_update() {
|
||||
let mut state = HllState::new();
|
||||
|
||||
// Test string values
|
||||
let str_values = vec!["a", "b", "c", "d", "e", "f", "g", "h", "i"];
|
||||
let str_array = Arc::new(StringArray::from(str_values)) as ArrayRef;
|
||||
state.update_batch(&[str_array]).unwrap();
|
||||
|
||||
let result = state.evaluate().unwrap();
|
||||
if let ScalarValue::Binary(Some(bytes)) = result {
|
||||
let mut hll: HllStateType = bincode::deserialize(&bytes).unwrap();
|
||||
assert_eq!(hll.count().trunc() as u32, 9);
|
||||
} else {
|
||||
panic!("Expected binary scalar value");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hll_merge_batch() {
|
||||
let mut state1 = HllState::new();
|
||||
state1.update("1");
|
||||
let state1_binary = state1.evaluate().unwrap();
|
||||
|
||||
let mut state2 = HllState::new();
|
||||
state2.update("2");
|
||||
let state2_binary = state2.evaluate().unwrap();
|
||||
|
||||
let mut merged_state = HllState::new();
|
||||
if let (ScalarValue::Binary(Some(bytes1)), ScalarValue::Binary(Some(bytes2))) =
|
||||
(&state1_binary, &state2_binary)
|
||||
{
|
||||
let binary_array = Arc::new(BinaryArray::from(vec![
|
||||
bytes1.as_slice(),
|
||||
bytes2.as_slice(),
|
||||
])) as ArrayRef;
|
||||
merged_state.merge_batch(&[binary_array]).unwrap();
|
||||
|
||||
let result = merged_state.evaluate().unwrap();
|
||||
if let ScalarValue::Binary(Some(bytes)) = result {
|
||||
let mut hll: HllStateType = bincode::deserialize(&bytes).unwrap();
|
||||
assert_eq!(hll.count().trunc() as u32, 2);
|
||||
} else {
|
||||
panic!("Expected binary scalar value");
|
||||
}
|
||||
} else {
|
||||
panic!("Expected binary scalar values");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hll_merge_function() {
|
||||
// Create two HLL states with different values
|
||||
let mut state1 = HllState::new();
|
||||
state1.update("1");
|
||||
state1.update("2");
|
||||
let state1_binary = state1.evaluate().unwrap();
|
||||
|
||||
let mut state2 = HllState::new();
|
||||
state2.update("2");
|
||||
state2.update("3");
|
||||
let state2_binary = state2.evaluate().unwrap();
|
||||
|
||||
// Create a merge state and merge both states
|
||||
let mut merge_state = HllState::new();
|
||||
if let (ScalarValue::Binary(Some(bytes1)), ScalarValue::Binary(Some(bytes2))) =
|
||||
(&state1_binary, &state2_binary)
|
||||
{
|
||||
let binary_array = Arc::new(BinaryArray::from(vec![
|
||||
bytes1.as_slice(),
|
||||
bytes2.as_slice(),
|
||||
])) as ArrayRef;
|
||||
merge_state.update_batch(&[binary_array]).unwrap();
|
||||
|
||||
let result = merge_state.evaluate().unwrap();
|
||||
if let ScalarValue::Binary(Some(bytes)) = result {
|
||||
let mut hll: HllStateType = bincode::deserialize(&bytes).unwrap();
|
||||
// Should have 3 unique values: "1", "2", "3"
|
||||
assert_eq!(hll.count().trunc() as u32, 3);
|
||||
} else {
|
||||
panic!("Expected binary scalar value");
|
||||
}
|
||||
} else {
|
||||
panic!("Expected binary scalar values");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -22,6 +22,7 @@ use crate::function::{AsyncFunctionRef, FunctionRef};
|
||||
use crate::scalars::aggregate::{AggregateFunctionMetaRef, AggregateFunctions};
|
||||
use crate::scalars::date::DateFunction;
|
||||
use crate::scalars::expression::ExpressionFunction;
|
||||
use crate::scalars::hll_count::HllCalcFunction;
|
||||
use crate::scalars::json::JsonFunction;
|
||||
use crate::scalars::matches::MatchesFunction;
|
||||
use crate::scalars::math::MathFunction;
|
||||
@@ -107,6 +108,7 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
|
||||
DateFunction::register(&function_registry);
|
||||
ExpressionFunction::register(&function_registry);
|
||||
UddSketchCalcFunction::register(&function_registry);
|
||||
HllCalcFunction::register(&function_registry);
|
||||
|
||||
// Aggregate functions
|
||||
AggregateFunctions::register(&function_registry);
|
||||
|
||||
@@ -22,6 +22,7 @@ pub mod matches;
|
||||
pub mod math;
|
||||
pub mod vector;
|
||||
|
||||
pub(crate) mod hll_count;
|
||||
#[cfg(test)]
|
||||
pub(crate) mod test;
|
||||
pub(crate) mod timestamp;
|
||||
|
||||
175
src/common/function/src/scalars/hll_count.rs
Normal file
175
src/common/function/src/scalars/hll_count.rs
Normal file
@@ -0,0 +1,175 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Implementation of the scalar function `hll_count`.
|
||||
|
||||
use std::fmt;
|
||||
use std::fmt::Display;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::{DowncastVectorSnafu, InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::prelude::Vector;
|
||||
use datatypes::scalars::{ScalarVector, ScalarVectorBuilder};
|
||||
use datatypes::vectors::{BinaryVector, MutableVector, UInt64VectorBuilder, VectorRef};
|
||||
use hyperloglogplus::HyperLogLog;
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::aggr::HllStateType;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
const NAME: &str = "hll_count";
|
||||
|
||||
/// HllCalcFunction implements the scalar function `hll_count`.
|
||||
///
|
||||
/// It accepts one argument:
|
||||
/// 1. The serialized HyperLogLogPlus state, as produced by the aggregator (binary).
|
||||
///
|
||||
/// For each row, it deserializes the sketch and returns the estimated cardinality.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct HllCalcFunction;
|
||||
|
||||
impl HllCalcFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(HllCalcFunction));
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for HllCalcFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for HllCalcFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::uint64_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
// Only argument: HyperLogLogPlus state (binary)
|
||||
Signature::exact(
|
||||
vec![ConcreteDataType::binary_datatype()],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
if columns.len() != 1 {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!("hll_count expects 1 argument, got {}", columns.len()),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
let hll_vec = columns[0]
|
||||
.as_any()
|
||||
.downcast_ref::<BinaryVector>()
|
||||
.with_context(|| DowncastVectorSnafu {
|
||||
err_msg: format!("expect BinaryVector, got {}", columns[0].vector_type_name()),
|
||||
})?;
|
||||
let len = hll_vec.len();
|
||||
let mut builder = UInt64VectorBuilder::with_capacity(len);
|
||||
|
||||
for i in 0..len {
|
||||
let hll_opt = hll_vec.get_data(i);
|
||||
|
||||
if hll_opt.is_none() {
|
||||
builder.push_null();
|
||||
continue;
|
||||
}
|
||||
|
||||
let hll_bytes = hll_opt.unwrap();
|
||||
|
||||
// Deserialize the HyperLogLogPlus from its bincode representation
|
||||
let mut hll: HllStateType = match bincode::deserialize(hll_bytes) {
|
||||
Ok(h) => h,
|
||||
Err(e) => {
|
||||
common_telemetry::trace!("Failed to deserialize HyperLogLogPlus: {}", e);
|
||||
builder.push_null();
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
builder.push(Some(hll.count().round() as u64));
|
||||
}
|
||||
|
||||
Ok(builder.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use datatypes::vectors::BinaryVector;
|
||||
|
||||
use super::*;
|
||||
use crate::utils::FixedRandomState;
|
||||
|
||||
#[test]
|
||||
fn test_hll_count_function() {
|
||||
let function = HllCalcFunction;
|
||||
assert_eq!("hll_count", function.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
function
|
||||
.return_type(&[ConcreteDataType::uint64_datatype()])
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
// Create a test HLL
|
||||
let mut hll = HllStateType::new(14, FixedRandomState::new()).unwrap();
|
||||
for i in 1..=10 {
|
||||
hll.insert(&i.to_string());
|
||||
}
|
||||
|
||||
let serialized_bytes = bincode::serialize(&hll).unwrap();
|
||||
let args: Vec<VectorRef> = vec![Arc::new(BinaryVector::from(vec![Some(serialized_bytes)]))];
|
||||
|
||||
let result = function.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(result.len(), 1);
|
||||
|
||||
// Test cardinality estimate
|
||||
if let datatypes::value::Value::UInt64(v) = result.get(0) {
|
||||
assert_eq!(v, 10);
|
||||
} else {
|
||||
panic!("Expected uint64 value");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hll_count_function_errors() {
|
||||
let function = HllCalcFunction;
|
||||
|
||||
// Test with invalid number of arguments
|
||||
let args: Vec<VectorRef> = vec![];
|
||||
let result = function.eval(FunctionContext::default(), &args);
|
||||
assert!(result.is_err());
|
||||
assert!(result
|
||||
.unwrap_err()
|
||||
.to_string()
|
||||
.contains("hll_count expects 1 argument"));
|
||||
|
||||
// Test with invalid binary data
|
||||
let args: Vec<VectorRef> = vec![Arc::new(BinaryVector::from(vec![Some(vec![1, 2, 3])]))]; // Invalid binary data
|
||||
let result = function.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(result.len(), 1);
|
||||
assert!(matches!(result.get(0), datatypes::value::Value::Null));
|
||||
}
|
||||
}
|
||||
@@ -12,6 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::hash::BuildHasher;
|
||||
|
||||
use ahash::RandomState;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Escapes special characters in the provided pattern string for `LIKE`.
|
||||
///
|
||||
/// Specifically, it prefixes the backslash (`\`), percent (`%`), and underscore (`_`)
|
||||
@@ -32,6 +37,71 @@ pub fn escape_like_pattern(pattern: &str) -> String {
|
||||
})
|
||||
.collect::<String>()
|
||||
}
|
||||
|
||||
/// A random state with fixed seeds.
|
||||
///
|
||||
/// This is used to ensure that the hash values are consistent across
|
||||
/// different processes, and easy to serialize and deserialize.
|
||||
#[derive(Debug)]
|
||||
pub struct FixedRandomState {
|
||||
state: RandomState,
|
||||
}
|
||||
|
||||
impl FixedRandomState {
|
||||
// some random seeds
|
||||
const RANDOM_SEED_0: u64 = 0x517cc1b727220a95;
|
||||
const RANDOM_SEED_1: u64 = 0x428a2f98d728ae22;
|
||||
const RANDOM_SEED_2: u64 = 0x7137449123ef65cd;
|
||||
const RANDOM_SEED_3: u64 = 0xb5c0fbcfec4d3b2f;
|
||||
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
state: ahash::RandomState::with_seeds(
|
||||
Self::RANDOM_SEED_0,
|
||||
Self::RANDOM_SEED_1,
|
||||
Self::RANDOM_SEED_2,
|
||||
Self::RANDOM_SEED_3,
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for FixedRandomState {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl BuildHasher for FixedRandomState {
|
||||
type Hasher = ahash::AHasher;
|
||||
|
||||
fn build_hasher(&self) -> Self::Hasher {
|
||||
self.state.build_hasher()
|
||||
}
|
||||
|
||||
fn hash_one<T: std::hash::Hash>(&self, x: T) -> u64 {
|
||||
self.state.hash_one(x)
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for FixedRandomState {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
serializer.serialize_unit()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for FixedRandomState {
|
||||
fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
Ok(Self::new())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -445,10 +445,16 @@ impl Pool {
|
||||
|
||||
async fn recycle_channel_in_loop(pool: Arc<Pool>, interval_secs: u64) {
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(interval_secs));
|
||||
|
||||
// use weak ref here to prevent pool being leaked
|
||||
let pool_weak = Arc::downgrade(&pool);
|
||||
loop {
|
||||
let _ = interval.tick().await;
|
||||
pool.retain_channel(|_, c| c.access.swap(0, Ordering::Relaxed) != 0)
|
||||
if let Some(pool) = pool_weak.upgrade() {
|
||||
pool.retain_channel(|_, c| c.access.swap(0, Ordering::Relaxed) != 0)
|
||||
} else {
|
||||
// no one is using this pool, so we can also let go
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -57,12 +57,10 @@ pub trait ClusterInfo {
|
||||
}
|
||||
|
||||
/// The key of [NodeInfo] in the storage. The format is `__meta_cluster_node_info-{cluster_id}-{role}-{node_id}`.
|
||||
///
|
||||
/// This key cannot be used to describe the `Metasrv` because the `Metasrv` does not have
|
||||
/// a `cluster_id`, it serves multiple clusters.
|
||||
#[derive(Debug, Clone, Copy, Eq, Hash, PartialEq, Serialize, Deserialize)]
|
||||
pub struct NodeInfoKey {
|
||||
/// The cluster id.
|
||||
// todo(hl): remove cluster_id as it is not assigned anywhere.
|
||||
pub cluster_id: ClusterId,
|
||||
/// The role of the node. It can be `[Role::Datanode]` or `[Role::Frontend]`.
|
||||
pub role: Role,
|
||||
@@ -232,8 +230,8 @@ impl TryFrom<Vec<u8>> for NodeInfoKey {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<NodeInfoKey> for Vec<u8> {
|
||||
fn from(key: NodeInfoKey) -> Self {
|
||||
impl From<&NodeInfoKey> for Vec<u8> {
|
||||
fn from(key: &NodeInfoKey) -> Self {
|
||||
format!(
|
||||
"{}-{}-{}-{}",
|
||||
CLUSTER_NODE_INFO_PREFIX,
|
||||
@@ -315,7 +313,7 @@ mod tests {
|
||||
node_id: 2,
|
||||
};
|
||||
|
||||
let key_bytes: Vec<u8> = key.into();
|
||||
let key_bytes: Vec<u8> = (&key).into();
|
||||
let new_key: NodeInfoKey = key_bytes.try_into().unwrap();
|
||||
|
||||
assert_eq!(1, new_key.cluster_id);
|
||||
|
||||
@@ -343,6 +343,7 @@ pub enum FlowType {
|
||||
impl FlowType {
|
||||
pub const RECORDING_RULE: &str = "recording_rule";
|
||||
pub const STREAMING: &str = "streaming";
|
||||
pub const FLOW_TYPE_KEY: &str = "flow_type";
|
||||
}
|
||||
|
||||
impl Default for FlowType {
|
||||
@@ -398,7 +399,8 @@ impl From<&CreateFlowData> for CreateRequest {
|
||||
};
|
||||
|
||||
let flow_type = value.flow_type.unwrap_or_default().to_string();
|
||||
req.flow_options.insert("flow_type".to_string(), flow_type);
|
||||
req.flow_options
|
||||
.insert(FlowType::FLOW_TYPE_KEY.to_string(), flow_type);
|
||||
req
|
||||
}
|
||||
}
|
||||
@@ -430,7 +432,7 @@ impl From<&CreateFlowData> for (FlowInfoValue, Vec<(FlowPartitionId, FlowRouteVa
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let flow_type = value.flow_type.unwrap_or_default().to_string();
|
||||
options.insert("flow_type".to_string(), flow_type);
|
||||
options.insert(FlowType::FLOW_TYPE_KEY.to_string(), flow_type);
|
||||
|
||||
let flow_info = FlowInfoValue {
|
||||
source_table_ids: value.source_table_ids.clone(),
|
||||
|
||||
@@ -128,7 +128,7 @@ impl State for DropDatabaseExecutor {
|
||||
.await?;
|
||||
executor.invalidate_table_cache(ddl_ctx).await?;
|
||||
executor
|
||||
.on_drop_regions(ddl_ctx, &self.physical_region_routes)
|
||||
.on_drop_regions(ddl_ctx, &self.physical_region_routes, true)
|
||||
.await?;
|
||||
info!("Table: {}({}) is dropped", self.table_name, self.table_id);
|
||||
|
||||
|
||||
@@ -156,7 +156,7 @@ impl DropTableProcedure {
|
||||
|
||||
pub async fn on_datanode_drop_regions(&mut self) -> Result<Status> {
|
||||
self.executor
|
||||
.on_drop_regions(&self.context, &self.data.physical_region_routes)
|
||||
.on_drop_regions(&self.context, &self.data.physical_region_routes, false)
|
||||
.await?;
|
||||
self.data.state = DropTableState::DeleteTombstone;
|
||||
Ok(Status::executing(true))
|
||||
|
||||
@@ -214,6 +214,7 @@ impl DropTableExecutor {
|
||||
&self,
|
||||
ctx: &DdlContext,
|
||||
region_routes: &[RegionRoute],
|
||||
fast_path: bool,
|
||||
) -> Result<()> {
|
||||
let leaders = find_leaders(region_routes);
|
||||
let mut drop_region_tasks = Vec::with_capacity(leaders.len());
|
||||
@@ -236,6 +237,7 @@ impl DropTableExecutor {
|
||||
}),
|
||||
body: Some(region_request::Body::Drop(PbDropRegionRequest {
|
||||
region_id: region_id.as_u64(),
|
||||
fast_path,
|
||||
})),
|
||||
};
|
||||
let datanode = datanode.clone();
|
||||
|
||||
@@ -34,6 +34,7 @@ pub mod kv_backend;
|
||||
pub mod leadership_notifier;
|
||||
pub mod lock_key;
|
||||
pub mod metrics;
|
||||
pub mod node_expiry_listener;
|
||||
pub mod node_manager;
|
||||
pub mod peer;
|
||||
pub mod range_stream;
|
||||
|
||||
152
src/common/meta/src/node_expiry_listener.rs
Normal file
152
src/common/meta/src/node_expiry_listener.rs
Normal file
@@ -0,0 +1,152 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Mutex;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_telemetry::{debug, error, info, warn};
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio::time::{interval, MissedTickBehavior};
|
||||
|
||||
use crate::cluster::{NodeInfo, NodeInfoKey};
|
||||
use crate::error;
|
||||
use crate::kv_backend::ResettableKvBackendRef;
|
||||
use crate::leadership_notifier::LeadershipChangeListener;
|
||||
use crate::rpc::store::RangeRequest;
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
/// [NodeExpiryListener] periodically checks all node info in memory and removes
|
||||
/// expired node info to prevent memory leak.
|
||||
pub struct NodeExpiryListener {
|
||||
handle: Mutex<Option<JoinHandle<()>>>,
|
||||
max_idle_time: Duration,
|
||||
in_memory: ResettableKvBackendRef,
|
||||
}
|
||||
|
||||
impl Drop for NodeExpiryListener {
|
||||
fn drop(&mut self) {
|
||||
self.stop();
|
||||
}
|
||||
}
|
||||
|
||||
impl NodeExpiryListener {
|
||||
pub fn new(max_idle_time: Duration, in_memory: ResettableKvBackendRef) -> Self {
|
||||
Self {
|
||||
handle: Mutex::new(None),
|
||||
max_idle_time,
|
||||
in_memory,
|
||||
}
|
||||
}
|
||||
|
||||
async fn start(&self) {
|
||||
let mut handle = self.handle.lock().unwrap();
|
||||
if handle.is_none() {
|
||||
let in_memory = self.in_memory.clone();
|
||||
|
||||
let max_idle_time = self.max_idle_time;
|
||||
let ticker_loop = tokio::spawn(async move {
|
||||
// Run clean task every minute.
|
||||
let mut interval = interval(Duration::from_secs(60));
|
||||
interval.set_missed_tick_behavior(MissedTickBehavior::Skip);
|
||||
loop {
|
||||
interval.tick().await;
|
||||
if let Err(e) = Self::clean_expired_nodes(&in_memory, max_idle_time).await {
|
||||
error!(e; "Failed to clean expired node");
|
||||
}
|
||||
}
|
||||
});
|
||||
*handle = Some(ticker_loop);
|
||||
}
|
||||
}
|
||||
|
||||
fn stop(&self) {
|
||||
if let Some(handle) = self.handle.lock().unwrap().take() {
|
||||
handle.abort();
|
||||
info!("Node expiry listener stopped")
|
||||
}
|
||||
}
|
||||
|
||||
/// Cleans expired nodes from memory.
|
||||
async fn clean_expired_nodes(
|
||||
in_memory: &ResettableKvBackendRef,
|
||||
max_idle_time: Duration,
|
||||
) -> error::Result<()> {
|
||||
let node_keys = Self::list_expired_nodes(in_memory, max_idle_time).await?;
|
||||
for key in node_keys {
|
||||
let key_bytes: Vec<u8> = (&key).into();
|
||||
if let Err(e) = in_memory.delete(&key_bytes, false).await {
|
||||
warn!(e; "Failed to delete expired node: {:?}", key_bytes);
|
||||
} else {
|
||||
debug!("Deleted expired node key: {:?}", key);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Lists expired nodes that have been inactive more than `max_idle_time`.
|
||||
async fn list_expired_nodes(
|
||||
in_memory: &ResettableKvBackendRef,
|
||||
max_idle_time: Duration,
|
||||
) -> error::Result<impl Iterator<Item = NodeInfoKey>> {
|
||||
let prefix = NodeInfoKey::key_prefix_with_cluster_id(0);
|
||||
let req = RangeRequest::new().with_prefix(prefix);
|
||||
let current_time_millis = common_time::util::current_time_millis();
|
||||
let resp = in_memory.range(req).await?;
|
||||
Ok(resp
|
||||
.kvs
|
||||
.into_iter()
|
||||
.filter_map(move |KeyValue { key, value }| {
|
||||
let Ok(info) = NodeInfo::try_from(value).inspect_err(|e| {
|
||||
warn!(e; "Unrecognized node info value");
|
||||
}) else {
|
||||
return None;
|
||||
};
|
||||
if (current_time_millis - info.last_activity_ts) > max_idle_time.as_millis() as i64
|
||||
{
|
||||
NodeInfoKey::try_from(key)
|
||||
.inspect_err(|e| {
|
||||
warn!(e; "Unrecognized node info key: {:?}", info.peer);
|
||||
})
|
||||
.ok()
|
||||
.inspect(|node_key| {
|
||||
debug!("Found expired node: {:?}", node_key);
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl LeadershipChangeListener for NodeExpiryListener {
|
||||
fn name(&self) -> &str {
|
||||
"NodeExpiryListener"
|
||||
}
|
||||
|
||||
async fn on_leader_start(&self) -> error::Result<()> {
|
||||
self.start().await;
|
||||
info!(
|
||||
"On leader start, node expiry listener started with max idle time: {:?}",
|
||||
self.max_idle_time
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn on_leader_stop(&self) -> error::Result<()> {
|
||||
self.stop();
|
||||
info!("On leader stop, node expiry listener stopped");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1218,7 +1218,10 @@ mod tests {
|
||||
);
|
||||
|
||||
let response = mock_region_server
|
||||
.handle_request(region_id, RegionRequest::Drop(RegionDropRequest {}))
|
||||
.handle_request(
|
||||
region_id,
|
||||
RegionRequest::Drop(RegionDropRequest { fast_path: false }),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(response.affected_rows, 0);
|
||||
@@ -1310,7 +1313,10 @@ mod tests {
|
||||
.insert(region_id, RegionEngineWithStatus::Ready(engine.clone()));
|
||||
|
||||
mock_region_server
|
||||
.handle_request(region_id, RegionRequest::Drop(RegionDropRequest {}))
|
||||
.handle_request(
|
||||
region_id,
|
||||
RegionRequest::Drop(RegionDropRequest { fast_path: false }),
|
||||
)
|
||||
.await
|
||||
.unwrap_err();
|
||||
|
||||
|
||||
@@ -32,5 +32,5 @@ pub mod types;
|
||||
pub mod value;
|
||||
pub mod vectors;
|
||||
|
||||
pub use arrow;
|
||||
pub use arrow::{self, compute};
|
||||
pub use error::{Error, Result};
|
||||
|
||||
@@ -16,6 +16,7 @@ async-trait.workspace = true
|
||||
bytes.workspace = true
|
||||
cache.workspace = true
|
||||
catalog.workspace = true
|
||||
chrono.workspace = true
|
||||
client.workspace = true
|
||||
common-base.workspace = true
|
||||
common-config.workspace = true
|
||||
|
||||
@@ -49,12 +49,13 @@ pub(crate) use crate::adapter::node_context::FlownodeContext;
|
||||
use crate::adapter::refill::RefillTask;
|
||||
use crate::adapter::table_source::ManagedTableSource;
|
||||
use crate::adapter::util::relation_desc_to_column_schemas_with_fallback;
|
||||
pub(crate) use crate::adapter::worker::{create_worker, Worker, WorkerHandle};
|
||||
pub(crate) use crate::adapter::worker::{create_worker, WorkerHandle};
|
||||
use crate::compute::ErrCollector;
|
||||
use crate::df_optimizer::sql_to_flow_plan;
|
||||
use crate::error::{EvalSnafu, ExternalSnafu, InternalSnafu, InvalidQuerySnafu, UnexpectedSnafu};
|
||||
use crate::expr::Batch;
|
||||
use crate::metrics::{METRIC_FLOW_INSERT_ELAPSED, METRIC_FLOW_ROWS, METRIC_FLOW_RUN_INTERVAL_MS};
|
||||
use crate::recording_rules::RecordingRuleEngine;
|
||||
use crate::repr::{self, DiffRow, RelationDesc, Row, BATCH_SIZE};
|
||||
|
||||
mod flownode_impl;
|
||||
@@ -63,7 +64,7 @@ pub(crate) mod refill;
|
||||
mod stat;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
mod util;
|
||||
pub(crate) mod util;
|
||||
mod worker;
|
||||
|
||||
pub(crate) mod node_context;
|
||||
@@ -171,6 +172,8 @@ pub struct FlowWorkerManager {
|
||||
flush_lock: RwLock<()>,
|
||||
/// receive a oneshot sender to send state size report
|
||||
state_report_handler: RwLock<Option<StateReportHandler>>,
|
||||
/// engine for recording rule
|
||||
rule_engine: RecordingRuleEngine,
|
||||
}
|
||||
|
||||
/// Building FlownodeManager
|
||||
@@ -185,6 +188,7 @@ impl FlowWorkerManager {
|
||||
node_id: Option<u32>,
|
||||
query_engine: Arc<dyn QueryEngine>,
|
||||
table_meta: TableMetadataManagerRef,
|
||||
rule_engine: RecordingRuleEngine,
|
||||
) -> Self {
|
||||
let srv_map = ManagedTableSource::new(
|
||||
table_meta.table_info_manager().clone(),
|
||||
@@ -207,6 +211,7 @@ impl FlowWorkerManager {
|
||||
node_id,
|
||||
flush_lock: RwLock::new(()),
|
||||
state_report_handler: RwLock::new(None),
|
||||
rule_engine,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -215,25 +220,6 @@ impl FlowWorkerManager {
|
||||
self
|
||||
}
|
||||
|
||||
/// Create a flownode manager with one worker
|
||||
pub fn new_with_workers<'s>(
|
||||
node_id: Option<u32>,
|
||||
query_engine: Arc<dyn QueryEngine>,
|
||||
table_meta: TableMetadataManagerRef,
|
||||
num_workers: usize,
|
||||
) -> (Self, Vec<Worker<'s>>) {
|
||||
let mut zelf = Self::new(node_id, query_engine, table_meta);
|
||||
|
||||
let workers: Vec<_> = (0..num_workers)
|
||||
.map(|_| {
|
||||
let (handle, worker) = create_worker();
|
||||
zelf.add_worker_handle(handle);
|
||||
worker
|
||||
})
|
||||
.collect();
|
||||
(zelf, workers)
|
||||
}
|
||||
|
||||
/// add a worker handler to manager, meaning this corresponding worker is under it's manage
|
||||
pub fn add_worker_handle(&mut self, handle: WorkerHandle) {
|
||||
self.worker_handles.push(handle);
|
||||
@@ -751,7 +737,11 @@ pub struct CreateFlowArgs {
|
||||
/// Create&Remove flow
|
||||
impl FlowWorkerManager {
|
||||
/// remove a flow by it's id
|
||||
#[allow(unreachable_code)]
|
||||
pub async fn remove_flow(&self, flow_id: FlowId) -> Result<(), Error> {
|
||||
// TODO(discord9): reroute some back to streaming engine later
|
||||
return self.rule_engine.remove_flow(flow_id).await;
|
||||
|
||||
for handle in self.worker_handles.iter() {
|
||||
if handle.contains_flow(flow_id).await? {
|
||||
handle.remove_flow(flow_id).await?;
|
||||
@@ -767,8 +757,10 @@ impl FlowWorkerManager {
|
||||
/// steps to create task:
|
||||
/// 1. parse query into typed plan(and optional parse expire_after expr)
|
||||
/// 2. render source/sink with output table id and used input table id
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[allow(clippy::too_many_arguments, unreachable_code)]
|
||||
pub async fn create_flow(&self, args: CreateFlowArgs) -> Result<Option<FlowId>, Error> {
|
||||
// TODO(discord9): reroute some back to streaming engine later
|
||||
return self.rule_engine.create_flow(args).await;
|
||||
let CreateFlowArgs {
|
||||
flow_id,
|
||||
sink_table_name,
|
||||
|
||||
@@ -153,7 +153,13 @@ impl Flownode for FlowWorkerManager {
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(unreachable_code, unused)]
|
||||
async fn handle_inserts(&self, request: InsertRequests) -> Result<FlowResponse> {
|
||||
return self
|
||||
.rule_engine
|
||||
.handle_inserts(request)
|
||||
.await
|
||||
.map_err(to_meta_err(snafu::location!()));
|
||||
// using try_read to ensure two things:
|
||||
// 1. flush wouldn't happen until inserts before it is inserted
|
||||
// 2. inserts happening concurrently with flush wouldn't be block by flush
|
||||
@@ -206,15 +212,15 @@ impl Flownode for FlowWorkerManager {
|
||||
.collect_vec();
|
||||
let table_col_names = table_schema.relation_desc.names;
|
||||
let table_col_names = table_col_names
|
||||
.iter().enumerate()
|
||||
.map(|(idx,name)| match name {
|
||||
Some(name) => Ok(name.clone()),
|
||||
None => InternalSnafu {
|
||||
reason: format!("Expect column {idx} of table id={table_id} to have name in table schema, found None"),
|
||||
}
|
||||
.fail().map_err(BoxedError::new).context(ExternalSnafu),
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
.iter().enumerate()
|
||||
.map(|(idx,name)| match name {
|
||||
Some(name) => Ok(name.clone()),
|
||||
None => InternalSnafu {
|
||||
reason: format!("Expect column {idx} of table id={table_id} to have name in table schema, found None"),
|
||||
}
|
||||
.fail().map_err(BoxedError::new).context(ExternalSnafu),
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
let name_to_col = HashMap::<_, _>::from_iter(
|
||||
insert_schema
|
||||
.iter()
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Some utility functions
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use arrow_schema::ArrowError;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_error::{define_into_tonic_status, from_err_code_msg_to_header};
|
||||
use common_macro::stack_trace_debug;
|
||||
@@ -53,6 +54,13 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Time error"))]
|
||||
Time {
|
||||
source: common_time::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("External error"))]
|
||||
External {
|
||||
source: BoxedError,
|
||||
@@ -156,6 +164,15 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Arrow error: {raw:?} in context: {context}"))]
|
||||
Arrow {
|
||||
#[snafu(source)]
|
||||
raw: ArrowError,
|
||||
context: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Datafusion error: {raw:?} in context: {context}"))]
|
||||
Datafusion {
|
||||
#[snafu(source)]
|
||||
@@ -230,6 +247,7 @@ impl ErrorExt for Error {
|
||||
match self {
|
||||
Self::Eval { .. }
|
||||
| Self::JoinTask { .. }
|
||||
| Self::Arrow { .. }
|
||||
| Self::Datafusion { .. }
|
||||
| Self::InsertIntoFlow { .. } => StatusCode::Internal,
|
||||
Self::FlowAlreadyExist { .. } => StatusCode::TableAlreadyExists,
|
||||
@@ -238,7 +256,9 @@ impl ErrorExt for Error {
|
||||
| Self::FlowNotFound { .. }
|
||||
| Self::ListFlows { .. } => StatusCode::TableNotFound,
|
||||
Self::Plan { .. } | Self::Datatypes { .. } => StatusCode::PlanQuery,
|
||||
Self::InvalidQuery { .. } | Self::CreateFlow { .. } => StatusCode::EngineExecuteQuery,
|
||||
Self::InvalidQuery { .. } | Self::CreateFlow { .. } | Self::Time { .. } => {
|
||||
StatusCode::EngineExecuteQuery
|
||||
}
|
||||
Self::Unexpected { .. } => StatusCode::Unexpected,
|
||||
Self::NotImplemented { .. } | Self::UnsupportedTemporalFilter { .. } => {
|
||||
StatusCode::Unsupported
|
||||
|
||||
@@ -238,6 +238,7 @@ mod test {
|
||||
|
||||
for (sql, current, expected) in &testcases {
|
||||
let plan = sql_to_substrait(engine.clone(), sql).await;
|
||||
|
||||
let mut ctx = create_test_ctx();
|
||||
let flow_plan = TypedPlan::from_substrait_plan(&mut ctx, &plan)
|
||||
.await
|
||||
|
||||
@@ -103,6 +103,11 @@ impl HeartbeatTask {
|
||||
warn!("Heartbeat task started multiple times");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.create_streams().await
|
||||
}
|
||||
|
||||
async fn create_streams(&self) -> Result<(), Error> {
|
||||
info!("Start to establish the heartbeat connection to metasrv.");
|
||||
let (req_sender, resp_stream) = self
|
||||
.meta_client
|
||||
@@ -125,13 +130,6 @@ impl HeartbeatTask {
|
||||
|
||||
pub fn shutdown(&self) {
|
||||
info!("Close heartbeat task for flownode");
|
||||
if self
|
||||
.running
|
||||
.compare_exchange(true, false, Ordering::AcqRel, Ordering::Acquire)
|
||||
.is_err()
|
||||
{
|
||||
warn!("Call close heartbeat task multiple times");
|
||||
}
|
||||
}
|
||||
|
||||
fn new_heartbeat_request(
|
||||
@@ -231,6 +229,8 @@ impl HeartbeatTask {
|
||||
// set the timeout to half of the report interval so that it wouldn't delay heartbeat if something went horribly wrong
|
||||
latest_report = query_flow_state(&query_stat_size, report_interval / 2).await;
|
||||
}
|
||||
|
||||
info!("flownode heartbeat task stopped.");
|
||||
});
|
||||
}
|
||||
|
||||
@@ -274,7 +274,7 @@ impl HeartbeatTask {
|
||||
|
||||
info!("Try to re-establish the heartbeat connection to metasrv.");
|
||||
|
||||
if self.start().await.is_ok() {
|
||||
if self.create_streams().await.is_ok() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@ mod expr;
|
||||
pub mod heartbeat;
|
||||
mod metrics;
|
||||
mod plan;
|
||||
mod recording_rules;
|
||||
mod repr;
|
||||
mod server;
|
||||
mod transform;
|
||||
@@ -43,4 +44,5 @@ mod test_utils;
|
||||
|
||||
pub use adapter::{FlowConfig, FlowWorkerManager, FlowWorkerManagerRef, FlownodeOptions};
|
||||
pub use error::{Error, Result};
|
||||
pub use recording_rules::FrontendClient;
|
||||
pub use server::{FlownodeBuilder, FlownodeInstance, FlownodeServer, FrontendInvoker};
|
||||
|
||||
@@ -28,6 +28,32 @@ lazy_static! {
|
||||
&["table_id"]
|
||||
)
|
||||
.unwrap();
|
||||
pub static ref METRIC_FLOW_RULE_ENGINE_QUERY_TIME: HistogramVec = register_histogram_vec!(
|
||||
"greptime_flow_rule_engine_query_time",
|
||||
"flow rule engine query time",
|
||||
&["flow_id"],
|
||||
vec![
|
||||
0.0,
|
||||
1.,
|
||||
3.,
|
||||
5.,
|
||||
10.,
|
||||
20.,
|
||||
30.,
|
||||
60.,
|
||||
2. * 60.,
|
||||
5. * 60.,
|
||||
10. * 60.
|
||||
]
|
||||
)
|
||||
.unwrap();
|
||||
pub static ref METRIC_FLOW_RULE_ENGINE_SLOW_QUERY: HistogramVec = register_histogram_vec!(
|
||||
"greptime_flow_rule_engine_slow_query",
|
||||
"flow rule engine slow query",
|
||||
&["flow_id", "sql", "peer"],
|
||||
vec![60., 2. * 60., 3. * 60., 5. * 60., 10. * 60.]
|
||||
)
|
||||
.unwrap();
|
||||
pub static ref METRIC_FLOW_RUN_INTERVAL_MS: IntGauge =
|
||||
register_int_gauge!("greptime_flow_run_interval_ms", "flow run interval in ms").unwrap();
|
||||
pub static ref METRIC_FLOW_ROWS: IntCounterVec = register_int_counter_vec!(
|
||||
|
||||
940
src/flow/src/recording_rules.rs
Normal file
940
src/flow/src/recording_rules.rs
Normal file
@@ -0,0 +1,940 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Run flow as recording rule which is time-window-aware normal query triggered every tick set by user
|
||||
|
||||
mod engine;
|
||||
mod frontend_client;
|
||||
|
||||
use std::collections::BTreeSet;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::helper::pb_value_to_value_ref;
|
||||
use catalog::CatalogManagerRef;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::DfRecordBatch;
|
||||
use common_telemetry::warn;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use common_time::Timestamp;
|
||||
use datafusion::error::Result as DfResult;
|
||||
use datafusion::logical_expr::Expr;
|
||||
use datafusion::physical_planner::{DefaultPhysicalPlanner, PhysicalPlanner};
|
||||
use datafusion::prelude::SessionContext;
|
||||
use datafusion::sql::unparser::Unparser;
|
||||
use datafusion_common::tree_node::{Transformed, TreeNode, TreeNodeRecursion, TreeNodeRewriter};
|
||||
use datafusion_common::{DFSchema, TableReference};
|
||||
use datafusion_expr::{ColumnarValue, LogicalPlan};
|
||||
use datafusion_physical_expr::PhysicalExprRef;
|
||||
use datatypes::prelude::{ConcreteDataType, DataType};
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::schema::TIME_INDEX_KEY;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
TimestampMicrosecondVector, TimestampMillisecondVector, TimestampNanosecondVector,
|
||||
TimestampSecondVector, Vector,
|
||||
};
|
||||
pub use engine::RecordingRuleEngine;
|
||||
pub use frontend_client::FrontendClient;
|
||||
use itertools::Itertools;
|
||||
use query::parser::QueryLanguageParser;
|
||||
use query::QueryEngineRef;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
|
||||
use crate::adapter::util::from_proto_to_data_type;
|
||||
use crate::df_optimizer::apply_df_optimizer;
|
||||
use crate::error::{ArrowSnafu, DatafusionSnafu, DatatypesSnafu, ExternalSnafu, UnexpectedSnafu};
|
||||
use crate::expr::error::DataTypeSnafu;
|
||||
use crate::Error;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TimeWindowExpr {
|
||||
phy_expr: PhysicalExprRef,
|
||||
column_name: String,
|
||||
logical_expr: Expr,
|
||||
df_schema: DFSchema,
|
||||
}
|
||||
|
||||
impl TimeWindowExpr {
|
||||
pub fn from_expr(expr: &Expr, column_name: &str, df_schema: &DFSchema) -> Result<Self, Error> {
|
||||
let phy_planner = DefaultPhysicalPlanner::default();
|
||||
|
||||
let phy_expr: PhysicalExprRef = phy_planner
|
||||
.create_physical_expr(expr, df_schema, &SessionContext::new().state())
|
||||
.with_context(|_e| DatafusionSnafu {
|
||||
context: format!(
|
||||
"Failed to create physical expression from {expr:?} using {df_schema:?}"
|
||||
),
|
||||
})?;
|
||||
Ok(Self {
|
||||
phy_expr,
|
||||
column_name: column_name.to_string(),
|
||||
logical_expr: expr.clone(),
|
||||
df_schema: df_schema.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn eval(
|
||||
&self,
|
||||
current: Timestamp,
|
||||
) -> Result<(Option<Timestamp>, Option<Timestamp>), Error> {
|
||||
let lower_bound =
|
||||
find_expr_time_window_lower_bound(&self.logical_expr, &self.df_schema, current)?;
|
||||
let upper_bound =
|
||||
find_expr_time_window_upper_bound(&self.logical_expr, &self.df_schema, current)?;
|
||||
Ok((lower_bound, upper_bound))
|
||||
}
|
||||
|
||||
/// Find timestamps from rows using time window expr
|
||||
pub async fn handle_rows(
|
||||
&self,
|
||||
rows_list: Vec<api::v1::Rows>,
|
||||
) -> Result<BTreeSet<Timestamp>, Error> {
|
||||
let mut time_windows = BTreeSet::new();
|
||||
|
||||
for rows in rows_list {
|
||||
// pick the time index column and use it to eval on `self.expr`
|
||||
let ts_col_index = rows
|
||||
.schema
|
||||
.iter()
|
||||
.map(|col| col.column_name.clone())
|
||||
.position(|name| name == self.column_name);
|
||||
let Some(ts_col_index) = ts_col_index else {
|
||||
warn!("can't found time index column in schema: {:?}", rows.schema);
|
||||
continue;
|
||||
};
|
||||
let col_schema = &rows.schema[ts_col_index];
|
||||
let cdt = from_proto_to_data_type(col_schema)?;
|
||||
|
||||
let column_values = rows
|
||||
.rows
|
||||
.iter()
|
||||
.map(|row| &row.values[ts_col_index])
|
||||
.collect_vec();
|
||||
|
||||
let mut vector = cdt.create_mutable_vector(column_values.len());
|
||||
for value in column_values {
|
||||
let value = pb_value_to_value_ref(value, &None);
|
||||
vector.try_push_value_ref(value).context(DataTypeSnafu {
|
||||
msg: "Failed to convert rows to columns",
|
||||
})?;
|
||||
}
|
||||
let vector = vector.to_vector();
|
||||
|
||||
let df_schema = create_df_schema_for_ts_column(&self.column_name, cdt)?;
|
||||
|
||||
let rb =
|
||||
DfRecordBatch::try_new(df_schema.inner().clone(), vec![vector.to_arrow_array()])
|
||||
.with_context(|_e| ArrowSnafu {
|
||||
context: format!(
|
||||
"Failed to create record batch from {df_schema:?} and {vector:?}"
|
||||
),
|
||||
})?;
|
||||
|
||||
let eval_res = self
|
||||
.phy_expr
|
||||
.evaluate(&rb)
|
||||
.with_context(|_| DatafusionSnafu {
|
||||
context: format!(
|
||||
"Failed to evaluate physical expression {:?} on {rb:?}",
|
||||
self.phy_expr
|
||||
),
|
||||
})?;
|
||||
|
||||
let res = columnar_to_ts_vector(&eval_res)?;
|
||||
|
||||
for ts in res.into_iter().flatten() {
|
||||
time_windows.insert(ts);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(time_windows)
|
||||
}
|
||||
}
|
||||
|
||||
fn create_df_schema_for_ts_column(name: &str, cdt: ConcreteDataType) -> Result<DFSchema, Error> {
|
||||
let arrow_schema = Arc::new(arrow_schema::Schema::new(vec![arrow_schema::Field::new(
|
||||
name,
|
||||
cdt.as_arrow_type(),
|
||||
false,
|
||||
)]));
|
||||
|
||||
let df_schema = DFSchema::from_field_specific_qualified_schema(
|
||||
vec![Some(TableReference::bare("TimeIndexOnlyTable"))],
|
||||
&arrow_schema,
|
||||
)
|
||||
.with_context(|_e| DatafusionSnafu {
|
||||
context: format!("Failed to create DFSchema from arrow schema {arrow_schema:?}"),
|
||||
})?;
|
||||
|
||||
Ok(df_schema)
|
||||
}
|
||||
|
||||
/// Convert `ColumnarValue` to `Vec<Option<Timestamp>>`
|
||||
fn columnar_to_ts_vector(columnar: &ColumnarValue) -> Result<Vec<Option<Timestamp>>, Error> {
|
||||
let val = match columnar {
|
||||
datafusion_expr::ColumnarValue::Array(array) => {
|
||||
let ty = array.data_type();
|
||||
let ty = ConcreteDataType::from_arrow_type(ty);
|
||||
let time_unit = if let ConcreteDataType::Timestamp(ty) = ty {
|
||||
ty.unit()
|
||||
} else {
|
||||
return UnexpectedSnafu {
|
||||
reason: format!("Non-timestamp type: {ty:?}"),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
|
||||
match time_unit {
|
||||
TimeUnit::Second => TimestampSecondVector::try_from_arrow_array(array.clone())
|
||||
.with_context(|_| DatatypesSnafu {
|
||||
extra: format!("Failed to create vector from arrow array {array:?}"),
|
||||
})?
|
||||
.iter_data()
|
||||
.map(|d| d.map(|d| d.0))
|
||||
.collect_vec(),
|
||||
TimeUnit::Millisecond => {
|
||||
TimestampMillisecondVector::try_from_arrow_array(array.clone())
|
||||
.with_context(|_| DatatypesSnafu {
|
||||
extra: format!("Failed to create vector from arrow array {array:?}"),
|
||||
})?
|
||||
.iter_data()
|
||||
.map(|d| d.map(|d| d.0))
|
||||
.collect_vec()
|
||||
}
|
||||
TimeUnit::Microsecond => {
|
||||
TimestampMicrosecondVector::try_from_arrow_array(array.clone())
|
||||
.with_context(|_| DatatypesSnafu {
|
||||
extra: format!("Failed to create vector from arrow array {array:?}"),
|
||||
})?
|
||||
.iter_data()
|
||||
.map(|d| d.map(|d| d.0))
|
||||
.collect_vec()
|
||||
}
|
||||
TimeUnit::Nanosecond => {
|
||||
TimestampNanosecondVector::try_from_arrow_array(array.clone())
|
||||
.with_context(|_| DatatypesSnafu {
|
||||
extra: format!("Failed to create vector from arrow array {array:?}"),
|
||||
})?
|
||||
.iter_data()
|
||||
.map(|d| d.map(|d| d.0))
|
||||
.collect_vec()
|
||||
}
|
||||
}
|
||||
}
|
||||
datafusion_expr::ColumnarValue::Scalar(scalar) => {
|
||||
let value = Value::try_from(scalar.clone()).with_context(|_| DatatypesSnafu {
|
||||
extra: format!("Failed to convert scalar {scalar:?} to value"),
|
||||
})?;
|
||||
let ts = value.as_timestamp().context(UnexpectedSnafu {
|
||||
reason: format!("Expect Timestamp, found {:?}", value),
|
||||
})?;
|
||||
vec![Some(ts)]
|
||||
}
|
||||
};
|
||||
Ok(val)
|
||||
}
|
||||
|
||||
/// Convert sql to datafusion logical plan
|
||||
pub async fn sql_to_df_plan(
|
||||
query_ctx: QueryContextRef,
|
||||
engine: QueryEngineRef,
|
||||
sql: &str,
|
||||
optimize: bool,
|
||||
) -> Result<LogicalPlan, Error> {
|
||||
let stmt = QueryLanguageParser::parse_sql(sql, &query_ctx)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
let plan = engine
|
||||
.planner()
|
||||
.plan(&stmt, query_ctx)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
let plan = if optimize {
|
||||
apply_df_optimizer(plan).await?
|
||||
} else {
|
||||
plan
|
||||
};
|
||||
Ok(plan)
|
||||
}
|
||||
|
||||
/// Return (the column name of time index column, the time window expr, the expected time unit of time index column, the expr's schema for evaluating the time window)
|
||||
async fn find_time_window_expr(
|
||||
plan: &LogicalPlan,
|
||||
catalog_man: CatalogManagerRef,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<(String, Option<datafusion_expr::Expr>, TimeUnit, DFSchema), Error> {
|
||||
// TODO(discord9): find the expr that do time window
|
||||
|
||||
let mut table_name = None;
|
||||
|
||||
// first find the table source in the logical plan
|
||||
plan.apply(|plan| {
|
||||
let LogicalPlan::TableScan(table_scan) = plan else {
|
||||
return Ok(TreeNodeRecursion::Continue);
|
||||
};
|
||||
table_name = Some(table_scan.table_name.clone());
|
||||
Ok(TreeNodeRecursion::Stop)
|
||||
})
|
||||
.with_context(|_| DatafusionSnafu {
|
||||
context: format!("Can't find table source in plan {plan:?}"),
|
||||
})?;
|
||||
let Some(table_name) = table_name else {
|
||||
UnexpectedSnafu {
|
||||
reason: format!("Can't find table source in plan {plan:?}"),
|
||||
}
|
||||
.fail()?
|
||||
};
|
||||
|
||||
let current_schema = query_ctx.current_schema();
|
||||
|
||||
let catalog_name = table_name.catalog().unwrap_or(query_ctx.current_catalog());
|
||||
let schema_name = table_name.schema().unwrap_or(¤t_schema);
|
||||
let table_name = table_name.table();
|
||||
|
||||
let Some(table_ref) = catalog_man
|
||||
.table(catalog_name, schema_name, table_name, Some(&query_ctx))
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?
|
||||
else {
|
||||
UnexpectedSnafu {
|
||||
reason: format!(
|
||||
"Can't find table {table_name:?} in catalog {catalog_name:?}/{schema_name:?}"
|
||||
),
|
||||
}
|
||||
.fail()?
|
||||
};
|
||||
|
||||
let schema = &table_ref.table_info().meta.schema;
|
||||
|
||||
let ts_index = schema.timestamp_column().context(UnexpectedSnafu {
|
||||
reason: format!("Can't find timestamp column in table {table_name:?}"),
|
||||
})?;
|
||||
|
||||
let ts_col_name = ts_index.name.clone();
|
||||
|
||||
let expected_time_unit = ts_index.data_type.as_timestamp().with_context(|| UnexpectedSnafu {
|
||||
reason: format!(
|
||||
"Expected timestamp column {ts_col_name:?} in table {table_name:?} to be timestamp, but got {ts_index:?}"
|
||||
),
|
||||
})?.unit();
|
||||
|
||||
let arrow_schema = Arc::new(arrow_schema::Schema::new(vec![arrow_schema::Field::new(
|
||||
ts_col_name.clone(),
|
||||
ts_index.data_type.as_arrow_type(),
|
||||
false,
|
||||
)]));
|
||||
|
||||
let df_schema = DFSchema::from_field_specific_qualified_schema(
|
||||
vec![Some(TableReference::bare(table_name))],
|
||||
&arrow_schema,
|
||||
)
|
||||
.with_context(|_e| DatafusionSnafu {
|
||||
context: format!("Failed to create DFSchema from arrow schema {arrow_schema:?}"),
|
||||
})?;
|
||||
|
||||
// find the time window expr which refers to the time index column
|
||||
let mut aggr_expr = None;
|
||||
let mut time_window_expr: Option<Expr> = None;
|
||||
|
||||
let find_inner_aggr_expr = |plan: &LogicalPlan| {
|
||||
if let LogicalPlan::Aggregate(aggregate) = plan {
|
||||
aggr_expr = Some(aggregate.clone());
|
||||
};
|
||||
|
||||
Ok(TreeNodeRecursion::Continue)
|
||||
};
|
||||
plan.apply(find_inner_aggr_expr)
|
||||
.with_context(|_| DatafusionSnafu {
|
||||
context: format!("Can't find aggr expr in plan {plan:?}"),
|
||||
})?;
|
||||
|
||||
if let Some(aggregate) = aggr_expr {
|
||||
for group_expr in &aggregate.group_expr {
|
||||
let refs = group_expr.column_refs();
|
||||
if refs.len() != 1 {
|
||||
continue;
|
||||
}
|
||||
let ref_col = refs.iter().next().unwrap();
|
||||
|
||||
let index = aggregate.input.schema().maybe_index_of_column(ref_col);
|
||||
let Some(index) = index else {
|
||||
continue;
|
||||
};
|
||||
let field = aggregate.input.schema().field(index);
|
||||
|
||||
let is_time_index = field.metadata().get(TIME_INDEX_KEY) == Some(&"true".to_string());
|
||||
|
||||
if is_time_index {
|
||||
let rewrite_column = group_expr.clone();
|
||||
let rewritten = rewrite_column
|
||||
.rewrite(&mut RewriteColumn {
|
||||
table_name: table_name.to_string(),
|
||||
})
|
||||
.with_context(|_| DatafusionSnafu {
|
||||
context: format!("Rewrite expr failed, expr={:?}", group_expr),
|
||||
})?
|
||||
.data;
|
||||
struct RewriteColumn {
|
||||
table_name: String,
|
||||
}
|
||||
|
||||
impl TreeNodeRewriter for RewriteColumn {
|
||||
type Node = Expr;
|
||||
fn f_down(&mut self, node: Self::Node) -> DfResult<Transformed<Self::Node>> {
|
||||
let Expr::Column(mut column) = node else {
|
||||
return Ok(Transformed::no(node));
|
||||
};
|
||||
|
||||
column.relation = Some(TableReference::bare(self.table_name.clone()));
|
||||
|
||||
Ok(Transformed::yes(Expr::Column(column)))
|
||||
}
|
||||
}
|
||||
|
||||
time_window_expr = Some(rewritten);
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok((ts_col_name, time_window_expr, expected_time_unit, df_schema))
|
||||
} else {
|
||||
// can't found time window expr, return None
|
||||
Ok((ts_col_name, None, expected_time_unit, df_schema))
|
||||
}
|
||||
}
|
||||
|
||||
/// Find nearest lower bound for time `current` in given `plan` for the time window expr.
|
||||
/// i.e. for time window expr being `date_bin(INTERVAL '5 minutes', ts) as time_window` and `current="2021-07-01 00:01:01.000"`,
|
||||
/// return `Some("2021-07-01 00:00:00.000")`
|
||||
/// if `plan` doesn't contain a `TIME INDEX` column, return `None`
|
||||
///
|
||||
/// Time window expr is a expr that:
|
||||
/// 1. ref only to a time index column
|
||||
/// 2. is monotonic increasing
|
||||
/// 3. show up in GROUP BY clause
|
||||
///
|
||||
/// note this plan should only contain one TableScan
|
||||
pub async fn find_plan_time_window_bound(
|
||||
plan: &LogicalPlan,
|
||||
current: Timestamp,
|
||||
query_ctx: QueryContextRef,
|
||||
engine: QueryEngineRef,
|
||||
) -> Result<(String, Option<Timestamp>, Option<Timestamp>), Error> {
|
||||
// TODO(discord9): find the expr that do time window
|
||||
let catalog_man = engine.engine_state().catalog_manager();
|
||||
|
||||
let (ts_col_name, time_window_expr, expected_time_unit, df_schema) =
|
||||
find_time_window_expr(plan, catalog_man.clone(), query_ctx).await?;
|
||||
// cast current to ts_index's type
|
||||
let new_current = current
|
||||
.convert_to(expected_time_unit)
|
||||
.with_context(|| UnexpectedSnafu {
|
||||
reason: format!("Failed to cast current timestamp {current:?} to {expected_time_unit}"),
|
||||
})?;
|
||||
|
||||
// if no time_window_expr is found, return None
|
||||
if let Some(time_window_expr) = time_window_expr {
|
||||
let lower_bound =
|
||||
find_expr_time_window_lower_bound(&time_window_expr, &df_schema, new_current)?;
|
||||
let upper_bound =
|
||||
find_expr_time_window_upper_bound(&time_window_expr, &df_schema, new_current)?;
|
||||
Ok((ts_col_name, lower_bound, upper_bound))
|
||||
} else {
|
||||
Ok((ts_col_name, None, None))
|
||||
}
|
||||
}
|
||||
|
||||
/// Find the lower bound of time window in given `expr` and `current` timestamp.
|
||||
///
|
||||
/// i.e. for `current="2021-07-01 00:01:01.000"` and `expr=date_bin(INTERVAL '5 minutes', ts) as time_window` and `ts_col=ts`,
|
||||
/// return `Some("2021-07-01 00:00:00.000")` since it's the lower bound
|
||||
/// return `Some("2021-07-01 00:00:00.000")` since it's the lower bound
|
||||
/// of current time window given the current timestamp
|
||||
///
|
||||
/// if return None, meaning this time window have no lower bound
|
||||
fn find_expr_time_window_lower_bound(
|
||||
expr: &Expr,
|
||||
df_schema: &DFSchema,
|
||||
current: Timestamp,
|
||||
) -> Result<Option<Timestamp>, Error> {
|
||||
let phy_planner = DefaultPhysicalPlanner::default();
|
||||
|
||||
let phy_expr: PhysicalExprRef = phy_planner
|
||||
.create_physical_expr(expr, df_schema, &SessionContext::new().state())
|
||||
.with_context(|_e| DatafusionSnafu {
|
||||
context: format!(
|
||||
"Failed to create physical expression from {expr:?} using {df_schema:?}"
|
||||
),
|
||||
})?;
|
||||
|
||||
let cur_time_window = eval_ts_to_ts(&phy_expr, df_schema, current)?;
|
||||
let input_time_unit = cur_time_window.unit();
|
||||
Ok(cur_time_window.convert_to(input_time_unit))
|
||||
}
|
||||
|
||||
/// Find the upper bound for time window expression
|
||||
fn find_expr_time_window_upper_bound(
|
||||
expr: &Expr,
|
||||
df_schema: &DFSchema,
|
||||
current: Timestamp,
|
||||
) -> Result<Option<Timestamp>, Error> {
|
||||
use std::cmp::Ordering;
|
||||
|
||||
let phy_planner = DefaultPhysicalPlanner::default();
|
||||
|
||||
let phy_expr: PhysicalExprRef = phy_planner
|
||||
.create_physical_expr(expr, df_schema, &SessionContext::new().state())
|
||||
.with_context(|_e| DatafusionSnafu {
|
||||
context: format!(
|
||||
"Failed to create physical expression from {expr:?} using {df_schema:?}"
|
||||
),
|
||||
})?;
|
||||
|
||||
let cur_time_window = eval_ts_to_ts(&phy_expr, df_schema, current)?;
|
||||
|
||||
// search to find the lower bound
|
||||
let mut offset: i64 = 1;
|
||||
let mut lower_bound = Some(current);
|
||||
let upper_bound;
|
||||
// first expontial probe to found a range for binary search
|
||||
loop {
|
||||
let Some(next_val) = current.value().checked_add(offset) else {
|
||||
// no upper bound if overflow
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
let next_time_probe = common_time::Timestamp::new(next_val, current.unit());
|
||||
|
||||
let next_time_window = eval_ts_to_ts(&phy_expr, df_schema, next_time_probe)?;
|
||||
|
||||
match next_time_window.cmp(&cur_time_window) {
|
||||
Ordering::Less => {UnexpectedSnafu {
|
||||
reason: format!(
|
||||
"Unsupported time window expression, expect monotonic increasing for time window expression {expr:?}"
|
||||
),
|
||||
}
|
||||
.fail()?
|
||||
}
|
||||
Ordering::Equal => {
|
||||
lower_bound = Some(next_time_probe);
|
||||
}
|
||||
Ordering::Greater => {
|
||||
upper_bound = Some(next_time_probe);
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
let Some(new_offset) = offset.checked_mul(2) else {
|
||||
// no upper bound if overflow
|
||||
return Ok(None);
|
||||
};
|
||||
offset = new_offset;
|
||||
}
|
||||
|
||||
// binary search for the exact upper bound
|
||||
|
||||
ensure!(lower_bound.map(|v|v.unit())==upper_bound.map(|v|v.unit()), UnexpectedSnafu{
|
||||
reason: format!(" unit mismatch for time window expression {expr:?}, found {lower_bound:?} and {upper_bound:?}"),
|
||||
});
|
||||
|
||||
let output_unit = upper_bound
|
||||
.context(UnexpectedSnafu {
|
||||
reason: "should have lower bound",
|
||||
})?
|
||||
.unit();
|
||||
|
||||
let mut low = lower_bound
|
||||
.context(UnexpectedSnafu {
|
||||
reason: "should have lower bound",
|
||||
})?
|
||||
.value();
|
||||
let mut high = upper_bound
|
||||
.context(UnexpectedSnafu {
|
||||
reason: "should have upper bound",
|
||||
})?
|
||||
.value();
|
||||
while low < high {
|
||||
let mid = (low + high) / 2;
|
||||
let mid_probe = common_time::Timestamp::new(mid, output_unit);
|
||||
let mid_time_window = eval_ts_to_ts(&phy_expr, df_schema, mid_probe)?;
|
||||
|
||||
match mid_time_window.cmp(&cur_time_window) {
|
||||
Ordering::Less => UnexpectedSnafu {
|
||||
reason: format!("Binary search failed for time window expression {expr:?}"),
|
||||
}
|
||||
.fail()?,
|
||||
Ordering::Equal => low = mid + 1,
|
||||
Ordering::Greater => high = mid,
|
||||
}
|
||||
}
|
||||
|
||||
let final_upper_bound_for_time_window = common_time::Timestamp::new(high, output_unit);
|
||||
|
||||
Ok(Some(final_upper_bound_for_time_window))
|
||||
}
|
||||
|
||||
fn eval_ts_to_ts(
|
||||
phy: &PhysicalExprRef,
|
||||
df_schema: &DFSchema,
|
||||
input_value: Timestamp,
|
||||
) -> Result<Timestamp, Error> {
|
||||
let schema_ty = df_schema.field(0).data_type();
|
||||
let schema_cdt = ConcreteDataType::from_arrow_type(schema_ty);
|
||||
let schema_unit = if let ConcreteDataType::Timestamp(ts) = schema_cdt {
|
||||
ts.unit()
|
||||
} else {
|
||||
return UnexpectedSnafu {
|
||||
reason: format!("Expect Timestamp, found {:?}", schema_cdt),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
let input_value = input_value
|
||||
.convert_to(schema_unit)
|
||||
.with_context(|| UnexpectedSnafu {
|
||||
reason: format!("Failed to convert timestamp {input_value:?} to {schema_unit}"),
|
||||
})?;
|
||||
let ts_vector = match schema_unit {
|
||||
TimeUnit::Second => {
|
||||
TimestampSecondVector::from_vec(vec![input_value.value()]).to_arrow_array()
|
||||
}
|
||||
TimeUnit::Millisecond => {
|
||||
TimestampMillisecondVector::from_vec(vec![input_value.value()]).to_arrow_array()
|
||||
}
|
||||
TimeUnit::Microsecond => {
|
||||
TimestampMicrosecondVector::from_vec(vec![input_value.value()]).to_arrow_array()
|
||||
}
|
||||
TimeUnit::Nanosecond => {
|
||||
TimestampNanosecondVector::from_vec(vec![input_value.value()]).to_arrow_array()
|
||||
}
|
||||
};
|
||||
|
||||
let rb = DfRecordBatch::try_new(df_schema.inner().clone(), vec![ts_vector.clone()])
|
||||
.with_context(|_| ArrowSnafu {
|
||||
context: format!("Failed to create record batch from {df_schema:?} and {ts_vector:?}"),
|
||||
})?;
|
||||
|
||||
let eval_res = phy.evaluate(&rb).with_context(|_| DatafusionSnafu {
|
||||
context: format!("Failed to evaluate physical expression {phy:?} on {rb:?}"),
|
||||
})?;
|
||||
|
||||
if let Some(Some(ts)) = columnar_to_ts_vector(&eval_res)?.first() {
|
||||
Ok(*ts)
|
||||
} else {
|
||||
UnexpectedSnafu {
|
||||
reason: format!(
|
||||
"Expected timestamp in expression {phy:?} but got {:?}",
|
||||
eval_res
|
||||
),
|
||||
}
|
||||
.fail()?
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(discord9): a method to found out the precise time window
|
||||
|
||||
/// Find out the `Filter` Node corresponding to outermost `WHERE` and add a new filter expr to it
|
||||
#[derive(Debug)]
|
||||
pub struct AddFilterRewriter {
|
||||
extra_filter: Expr,
|
||||
is_rewritten: bool,
|
||||
}
|
||||
|
||||
impl AddFilterRewriter {
|
||||
fn new(filter: Expr) -> Self {
|
||||
Self {
|
||||
extra_filter: filter,
|
||||
is_rewritten: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TreeNodeRewriter for AddFilterRewriter {
|
||||
type Node = LogicalPlan;
|
||||
fn f_up(&mut self, node: Self::Node) -> DfResult<Transformed<Self::Node>> {
|
||||
if self.is_rewritten {
|
||||
return Ok(Transformed::no(node));
|
||||
}
|
||||
match node {
|
||||
LogicalPlan::Filter(mut filter) if !filter.having => {
|
||||
filter.predicate = filter.predicate.and(self.extra_filter.clone());
|
||||
self.is_rewritten = true;
|
||||
Ok(Transformed::yes(LogicalPlan::Filter(filter)))
|
||||
}
|
||||
LogicalPlan::TableScan(_) => {
|
||||
// add a new filter
|
||||
let filter =
|
||||
datafusion_expr::Filter::try_new(self.extra_filter.clone(), Arc::new(node))?;
|
||||
self.is_rewritten = true;
|
||||
Ok(Transformed::yes(LogicalPlan::Filter(filter)))
|
||||
}
|
||||
_ => Ok(Transformed::no(node)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn df_plan_to_sql(plan: &LogicalPlan) -> Result<String, Error> {
|
||||
/// A dialect that forces all identifiers to be quoted
|
||||
struct ForceQuoteIdentifiers;
|
||||
impl datafusion::sql::unparser::dialect::Dialect for ForceQuoteIdentifiers {
|
||||
fn identifier_quote_style(&self, identifier: &str) -> Option<char> {
|
||||
if identifier.to_lowercase() != identifier {
|
||||
Some('"')
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
let unparser = Unparser::new(&ForceQuoteIdentifiers);
|
||||
// first make all column qualified
|
||||
let sql = unparser
|
||||
.plan_to_sql(plan)
|
||||
.with_context(|_e| DatafusionSnafu {
|
||||
context: format!("Failed to unparse logical plan {plan:?}"),
|
||||
})?;
|
||||
Ok(sql.to_string())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use datafusion_common::tree_node::TreeNode;
|
||||
use pretty_assertions::assert_eq;
|
||||
use session::context::QueryContext;
|
||||
|
||||
use super::{sql_to_df_plan, *};
|
||||
use crate::recording_rules::{df_plan_to_sql, AddFilterRewriter};
|
||||
use crate::test_utils::create_test_query_engine;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_sql_plan_convert() {
|
||||
let query_engine = create_test_query_engine();
|
||||
let ctx = QueryContext::arc();
|
||||
let old = r#"SELECT "NUMBER" FROM "UPPERCASE_NUMBERS_WITH_TS""#;
|
||||
let new = sql_to_df_plan(ctx.clone(), query_engine.clone(), old, false)
|
||||
.await
|
||||
.unwrap();
|
||||
let new_sql = df_plan_to_sql(&new).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
r#"SELECT "UPPERCASE_NUMBERS_WITH_TS"."NUMBER" FROM "UPPERCASE_NUMBERS_WITH_TS""#,
|
||||
new_sql
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_add_filter() {
|
||||
let testcases = vec![
|
||||
(
|
||||
"SELECT number FROM numbers_with_ts GROUP BY number","SELECT numbers_with_ts.number FROM numbers_with_ts WHERE (number > 4) GROUP BY numbers_with_ts.number"
|
||||
),
|
||||
(
|
||||
"SELECT number FROM numbers_with_ts WHERE number < 2 OR number >10",
|
||||
"SELECT numbers_with_ts.number FROM numbers_with_ts WHERE ((numbers_with_ts.number < 2) OR (numbers_with_ts.number > 10)) AND (number > 4)"
|
||||
),
|
||||
(
|
||||
"SELECT date_bin('5 minutes', ts) as time_window FROM numbers_with_ts GROUP BY time_window",
|
||||
"SELECT date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE (number > 4) GROUP BY date_bin('5 minutes', numbers_with_ts.ts)"
|
||||
)
|
||||
];
|
||||
use datafusion_expr::{col, lit};
|
||||
let query_engine = create_test_query_engine();
|
||||
let ctx = QueryContext::arc();
|
||||
|
||||
for (before, after) in testcases {
|
||||
let sql = before;
|
||||
let plan = sql_to_df_plan(ctx.clone(), query_engine.clone(), sql, false)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut add_filter = AddFilterRewriter::new(col("number").gt(lit(4u32)));
|
||||
let plan = plan.rewrite(&mut add_filter).unwrap().data;
|
||||
let new_sql = df_plan_to_sql(&plan).unwrap();
|
||||
assert_eq!(after, new_sql);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_plan_time_window_lower_bound() {
|
||||
use datafusion_expr::{col, lit};
|
||||
let query_engine = create_test_query_engine();
|
||||
let ctx = QueryContext::arc();
|
||||
|
||||
let testcases = [
|
||||
// same alias is not same column
|
||||
(
|
||||
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS ts FROM numbers_with_ts GROUP BY ts;",
|
||||
Timestamp::new(1740394109, TimeUnit::Second),
|
||||
(
|
||||
"ts".to_string(),
|
||||
Some(Timestamp::new(1740394109000, TimeUnit::Millisecond)),
|
||||
Some(Timestamp::new(1740394109001, TimeUnit::Millisecond)),
|
||||
),
|
||||
r#"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS ts FROM numbers_with_ts WHERE ((ts >= CAST('2025-02-24 10:48:29' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:48:29.001' AS TIMESTAMP))) GROUP BY numbers_with_ts.ts"#
|
||||
),
|
||||
// complex time window index
|
||||
(
|
||||
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts GROUP BY time_window;",
|
||||
Timestamp::new(1740394109, TimeUnit::Second),
|
||||
(
|
||||
"ts".to_string(),
|
||||
Some(Timestamp::new(1740394080, TimeUnit::Second)),
|
||||
Some(Timestamp::new(1740394140, TimeUnit::Second)),
|
||||
),
|
||||
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('2025-02-24 10:48:00' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:49:00' AS TIMESTAMP))) GROUP BY arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)')"
|
||||
),
|
||||
// no time index
|
||||
(
|
||||
"SELECT date_bin('5 minutes', ts) FROM numbers_with_ts;",
|
||||
Timestamp::new(23, TimeUnit::Millisecond),
|
||||
("ts".to_string(), None, None),
|
||||
"SELECT date_bin('5 minutes', ts) FROM numbers_with_ts;"
|
||||
),
|
||||
// time index
|
||||
(
|
||||
"SELECT date_bin('5 minutes', ts) as time_window FROM numbers_with_ts GROUP BY time_window;",
|
||||
Timestamp::new(23, TimeUnit::Nanosecond),
|
||||
(
|
||||
"ts".to_string(),
|
||||
Some(Timestamp::new(0, TimeUnit::Millisecond)),
|
||||
Some(Timestamp::new(300000, TimeUnit::Millisecond)),
|
||||
),
|
||||
"SELECT date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP))) GROUP BY date_bin('5 minutes', numbers_with_ts.ts)"
|
||||
),
|
||||
// on spot
|
||||
(
|
||||
"SELECT date_bin('5 minutes', ts) as time_window FROM numbers_with_ts GROUP BY time_window;",
|
||||
Timestamp::new(0, TimeUnit::Nanosecond),
|
||||
(
|
||||
"ts".to_string(),
|
||||
Some(Timestamp::new(0, TimeUnit::Millisecond)),
|
||||
Some(Timestamp::new(300000, TimeUnit::Millisecond)),
|
||||
),
|
||||
"SELECT date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP))) GROUP BY date_bin('5 minutes', numbers_with_ts.ts)"
|
||||
),
|
||||
// different time unit
|
||||
(
|
||||
"SELECT date_bin('5 minutes', ts) as time_window FROM numbers_with_ts GROUP BY time_window;",
|
||||
Timestamp::new(23_000_000, TimeUnit::Nanosecond),
|
||||
(
|
||||
"ts".to_string(),
|
||||
Some(Timestamp::new(0, TimeUnit::Millisecond)),
|
||||
Some(Timestamp::new(300000, TimeUnit::Millisecond)),
|
||||
),
|
||||
"SELECT date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP))) GROUP BY date_bin('5 minutes', numbers_with_ts.ts)"
|
||||
),
|
||||
// time index with other fields
|
||||
(
|
||||
"SELECT sum(number) as sum_up, date_bin('5 minutes', ts) as time_window FROM numbers_with_ts GROUP BY time_window;",
|
||||
Timestamp::new(23, TimeUnit::Millisecond),
|
||||
(
|
||||
"ts".to_string(),
|
||||
Some(Timestamp::new(0, TimeUnit::Millisecond)),
|
||||
Some(Timestamp::new(300000, TimeUnit::Millisecond)),
|
||||
),
|
||||
"SELECT sum(numbers_with_ts.number) AS sum_up, date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP))) GROUP BY date_bin('5 minutes', numbers_with_ts.ts)"
|
||||
),
|
||||
// time index with other pks
|
||||
(
|
||||
"SELECT number, date_bin('5 minutes', ts) as time_window FROM numbers_with_ts GROUP BY time_window, number;",
|
||||
Timestamp::new(23, TimeUnit::Millisecond),
|
||||
(
|
||||
"ts".to_string(),
|
||||
Some(Timestamp::new(0, TimeUnit::Millisecond)),
|
||||
Some(Timestamp::new(300000, TimeUnit::Millisecond)),
|
||||
),
|
||||
"SELECT numbers_with_ts.number, date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP))) GROUP BY date_bin('5 minutes', numbers_with_ts.ts), numbers_with_ts.number"
|
||||
),
|
||||
// subquery
|
||||
(
|
||||
"SELECT number, time_window FROM (SELECT number, date_bin('5 minutes', ts) as time_window FROM numbers_with_ts GROUP BY time_window, number);",
|
||||
Timestamp::new(23, TimeUnit::Millisecond),
|
||||
(
|
||||
"ts".to_string(),
|
||||
Some(Timestamp::new(0, TimeUnit::Millisecond)),
|
||||
Some(Timestamp::new(300000, TimeUnit::Millisecond)),
|
||||
),
|
||||
"SELECT numbers_with_ts.number, time_window FROM (SELECT numbers_with_ts.number, date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP))) GROUP BY date_bin('5 minutes', numbers_with_ts.ts), numbers_with_ts.number)"
|
||||
),
|
||||
// cte
|
||||
(
|
||||
"with cte as (select number, date_bin('5 minutes', ts) as time_window from numbers_with_ts GROUP BY time_window, number) select number, time_window from cte;",
|
||||
Timestamp::new(23, TimeUnit::Millisecond),
|
||||
(
|
||||
"ts".to_string(),
|
||||
Some(Timestamp::new(0, TimeUnit::Millisecond)),
|
||||
Some(Timestamp::new(300000, TimeUnit::Millisecond)),
|
||||
),
|
||||
"SELECT cte.number, cte.time_window FROM (SELECT numbers_with_ts.number, date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP))) GROUP BY date_bin('5 minutes', numbers_with_ts.ts), numbers_with_ts.number) AS cte"
|
||||
),
|
||||
// complex subquery without alias
|
||||
(
|
||||
"SELECT sum(number), number, date_bin('5 minutes', ts) as time_window, bucket_name FROM (SELECT number, ts, case when number < 5 THEN 'bucket_0_5' when number >= 5 THEN 'bucket_5_inf' END as bucket_name FROM numbers_with_ts) GROUP BY number, time_window, bucket_name;",
|
||||
Timestamp::new(23, TimeUnit::Millisecond),
|
||||
(
|
||||
"ts".to_string(),
|
||||
Some(Timestamp::new(0, TimeUnit::Millisecond)),
|
||||
Some(Timestamp::new(300000, TimeUnit::Millisecond)),
|
||||
),
|
||||
"SELECT sum(numbers_with_ts.number), numbers_with_ts.number, date_bin('5 minutes', numbers_with_ts.ts) AS time_window, bucket_name FROM (SELECT numbers_with_ts.number, numbers_with_ts.ts, CASE WHEN (numbers_with_ts.number < 5) THEN 'bucket_0_5' WHEN (numbers_with_ts.number >= 5) THEN 'bucket_5_inf' END AS bucket_name FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP)))) GROUP BY numbers_with_ts.number, date_bin('5 minutes', numbers_with_ts.ts), bucket_name"
|
||||
),
|
||||
// complex subquery alias
|
||||
(
|
||||
"SELECT sum(number), number, date_bin('5 minutes', ts) as time_window, bucket_name FROM (SELECT number, ts, case when number < 5 THEN 'bucket_0_5' when number >= 5 THEN 'bucket_5_inf' END as bucket_name FROM numbers_with_ts) as cte GROUP BY number, time_window, bucket_name;",
|
||||
Timestamp::new(23, TimeUnit::Millisecond),
|
||||
(
|
||||
"ts".to_string(),
|
||||
Some(Timestamp::new(0, TimeUnit::Millisecond)),
|
||||
Some(Timestamp::new(300000, TimeUnit::Millisecond)),
|
||||
),
|
||||
"SELECT sum(cte.number), cte.number, date_bin('5 minutes', cte.ts) AS time_window, cte.bucket_name FROM (SELECT numbers_with_ts.number, numbers_with_ts.ts, CASE WHEN (numbers_with_ts.number < 5) THEN 'bucket_0_5' WHEN (numbers_with_ts.number >= 5) THEN 'bucket_5_inf' END AS bucket_name FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP)))) AS cte GROUP BY cte.number, date_bin('5 minutes', cte.ts), cte.bucket_name"
|
||||
),
|
||||
];
|
||||
|
||||
for (sql, current, expected, expected_unparsed) in testcases {
|
||||
let plan = sql_to_df_plan(ctx.clone(), query_engine.clone(), sql, true)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let real =
|
||||
find_plan_time_window_bound(&plan, current, ctx.clone(), query_engine.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(expected, real);
|
||||
|
||||
let plan = sql_to_df_plan(ctx.clone(), query_engine.clone(), sql, false)
|
||||
.await
|
||||
.unwrap();
|
||||
let (col_name, lower, upper) = real;
|
||||
let new_sql = if lower.is_some() {
|
||||
let to_df_literal = |value| {
|
||||
let value = Value::from(value);
|
||||
|
||||
value.try_to_scalar_value(&value.data_type()).unwrap()
|
||||
};
|
||||
let lower = to_df_literal(lower.unwrap());
|
||||
let upper = to_df_literal(upper.unwrap());
|
||||
let expr = col(&col_name)
|
||||
.gt_eq(lit(lower))
|
||||
.and(col(&col_name).lt_eq(lit(upper)));
|
||||
let mut add_filter = AddFilterRewriter::new(expr);
|
||||
let plan = plan.rewrite(&mut add_filter).unwrap().data;
|
||||
df_plan_to_sql(&plan).unwrap()
|
||||
} else {
|
||||
sql.to_string()
|
||||
};
|
||||
assert_eq!(expected_unparsed, new_sql);
|
||||
}
|
||||
}
|
||||
}
|
||||
815
src/flow/src/recording_rules/engine.rs
Normal file
815
src/flow/src/recording_rules/engine.rs
Normal file
@@ -0,0 +1,815 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||
|
||||
use api::v1::flow::FlowResponse;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::ddl::create_flow::FlowType;
|
||||
use common_meta::key::flow::FlowMetadataManagerRef;
|
||||
use common_meta::key::table_info::TableInfoManager;
|
||||
use common_meta::key::TableMetadataManagerRef;
|
||||
use common_telemetry::tracing::warn;
|
||||
use common_telemetry::{debug, info};
|
||||
use common_time::Timestamp;
|
||||
use datafusion::sql::unparser::expr_to_sql;
|
||||
use datafusion_common::tree_node::TreeNode;
|
||||
use datatypes::value::Value;
|
||||
use query::QueryEngineRef;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::storage::RegionId;
|
||||
use table::metadata::TableId;
|
||||
use tokio::sync::oneshot::error::TryRecvError;
|
||||
use tokio::sync::{oneshot, RwLock};
|
||||
use tokio::time::Instant;
|
||||
|
||||
use super::frontend_client::FrontendClient;
|
||||
use super::{df_plan_to_sql, AddFilterRewriter, TimeWindowExpr};
|
||||
use crate::adapter::{CreateFlowArgs, FlowId, TableName};
|
||||
use crate::error::{
|
||||
DatafusionSnafu, DatatypesSnafu, ExternalSnafu, FlowAlreadyExistSnafu, InternalSnafu,
|
||||
TimeSnafu, UnexpectedSnafu,
|
||||
};
|
||||
use crate::metrics::{METRIC_FLOW_RULE_ENGINE_QUERY_TIME, METRIC_FLOW_RULE_ENGINE_SLOW_QUERY};
|
||||
use crate::recording_rules::{find_time_window_expr, sql_to_df_plan};
|
||||
use crate::Error;
|
||||
|
||||
/// TODO(discord9): make those constants configurable
|
||||
/// The default rule engine query timeout is 10 minutes
|
||||
pub const DEFAULT_RULE_ENGINE_QUERY_TIMEOUT: Duration = Duration::from_secs(10 * 60);
|
||||
|
||||
/// will output a warn log for any query that runs for more that 1 minutes, and also every 1 minutes when that query is still running
|
||||
pub const SLOW_QUERY_THRESHOLD: Duration = Duration::from_secs(60);
|
||||
|
||||
/// TODO(discord9): determine how to configure refresh rate
|
||||
pub struct RecordingRuleEngine {
|
||||
tasks: RwLock<BTreeMap<FlowId, RecordingRuleTask>>,
|
||||
shutdown_txs: RwLock<BTreeMap<FlowId, oneshot::Sender<()>>>,
|
||||
frontend_client: Arc<FrontendClient>,
|
||||
flow_metadata_manager: FlowMetadataManagerRef,
|
||||
table_meta: TableMetadataManagerRef,
|
||||
engine: QueryEngineRef,
|
||||
}
|
||||
|
||||
impl RecordingRuleEngine {
|
||||
pub fn new(
|
||||
frontend_client: Arc<FrontendClient>,
|
||||
engine: QueryEngineRef,
|
||||
flow_metadata_manager: FlowMetadataManagerRef,
|
||||
table_meta: TableMetadataManagerRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
tasks: Default::default(),
|
||||
shutdown_txs: Default::default(),
|
||||
frontend_client,
|
||||
flow_metadata_manager,
|
||||
table_meta,
|
||||
engine,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle_inserts(
|
||||
&self,
|
||||
request: api::v1::region::InsertRequests,
|
||||
) -> Result<FlowResponse, Error> {
|
||||
let table_info_mgr = self.table_meta.table_info_manager();
|
||||
let mut group_by_table_name: HashMap<TableName, Vec<api::v1::Rows>> = HashMap::new();
|
||||
for r in request.requests {
|
||||
let tid = RegionId::from(r.region_id).table_id();
|
||||
let name = get_table_name(table_info_mgr, &tid).await?;
|
||||
let entry = group_by_table_name.entry(name).or_default();
|
||||
if let Some(rows) = r.rows {
|
||||
entry.push(rows);
|
||||
}
|
||||
}
|
||||
|
||||
for (_flow_id, task) in self.tasks.read().await.iter() {
|
||||
let src_table_names = &task.source_table_names;
|
||||
|
||||
for src_table_name in src_table_names {
|
||||
if let Some(entry) = group_by_table_name.get(src_table_name) {
|
||||
let Some(expr) = &task.time_window_expr else {
|
||||
continue;
|
||||
};
|
||||
let involved_time_windows = expr.handle_rows(entry.clone()).await?;
|
||||
let mut state = task.state.write().await;
|
||||
state
|
||||
.dirty_time_windows
|
||||
.add_lower_bounds(involved_time_windows.into_iter());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Default::default())
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_table_name(zelf: &TableInfoManager, table_id: &TableId) -> Result<TableName, Error> {
|
||||
zelf.get(*table_id)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?
|
||||
.with_context(|| UnexpectedSnafu {
|
||||
reason: format!("Table id = {:?}, couldn't found table name", table_id),
|
||||
})
|
||||
.map(|name| name.table_name())
|
||||
.map(|name| [name.catalog_name, name.schema_name, name.table_name])
|
||||
}
|
||||
|
||||
const MIN_REFRESH_DURATION: Duration = Duration::new(5, 0);
|
||||
|
||||
impl RecordingRuleEngine {
|
||||
pub async fn create_flow(&self, args: CreateFlowArgs) -> Result<Option<FlowId>, Error> {
|
||||
let CreateFlowArgs {
|
||||
flow_id,
|
||||
sink_table_name,
|
||||
source_table_ids,
|
||||
create_if_not_exists,
|
||||
or_replace,
|
||||
expire_after,
|
||||
comment: _,
|
||||
sql,
|
||||
flow_options,
|
||||
query_ctx,
|
||||
} = args;
|
||||
|
||||
// or replace logic
|
||||
{
|
||||
let is_exist = self.tasks.read().await.contains_key(&flow_id);
|
||||
match (create_if_not_exists, or_replace, is_exist) {
|
||||
// if replace, ignore that old flow exists
|
||||
(_, true, true) => {
|
||||
info!("Replacing flow with id={}", flow_id);
|
||||
}
|
||||
(false, false, true) => FlowAlreadyExistSnafu { id: flow_id }.fail()?,
|
||||
// already exists, and not replace, return None
|
||||
(true, false, true) => {
|
||||
info!("Flow with id={} already exists, do nothing", flow_id);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// continue as normal
|
||||
(_, _, false) => (),
|
||||
}
|
||||
}
|
||||
|
||||
let flow_type = flow_options.get(FlowType::FLOW_TYPE_KEY);
|
||||
|
||||
ensure!(
|
||||
flow_type == Some(&FlowType::RecordingRule.to_string()) || flow_type.is_none(),
|
||||
UnexpectedSnafu {
|
||||
reason: format!("Flow type is not RecordingRule nor None, got {flow_type:?}")
|
||||
}
|
||||
);
|
||||
|
||||
let Some(query_ctx) = query_ctx else {
|
||||
UnexpectedSnafu {
|
||||
reason: "Query context is None".to_string(),
|
||||
}
|
||||
.fail()?
|
||||
};
|
||||
let query_ctx = Arc::new(query_ctx);
|
||||
let mut source_table_names = Vec::new();
|
||||
for src_id in source_table_ids {
|
||||
let table_name = self
|
||||
.table_meta
|
||||
.table_info_manager()
|
||||
.get(src_id)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?
|
||||
.with_context(|| UnexpectedSnafu {
|
||||
reason: format!("Table id = {:?}, couldn't found table name", src_id),
|
||||
})
|
||||
.map(|name| name.table_name())
|
||||
.map(|name| [name.catalog_name, name.schema_name, name.table_name])?;
|
||||
source_table_names.push(table_name);
|
||||
}
|
||||
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
let plan = sql_to_df_plan(query_ctx.clone(), self.engine.clone(), &sql, true).await?;
|
||||
let (column_name, time_window_expr, _, df_schema) = find_time_window_expr(
|
||||
&plan,
|
||||
self.engine.engine_state().catalog_manager().clone(),
|
||||
query_ctx.clone(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let phy_expr = time_window_expr
|
||||
.map(|expr| TimeWindowExpr::from_expr(&expr, &column_name, &df_schema))
|
||||
.transpose()?;
|
||||
|
||||
info!("Flow id={}, found time window expr={:?}", flow_id, phy_expr);
|
||||
|
||||
let task = RecordingRuleTask::new(
|
||||
flow_id,
|
||||
&sql,
|
||||
phy_expr,
|
||||
expire_after,
|
||||
sink_table_name,
|
||||
source_table_names,
|
||||
query_ctx,
|
||||
rx,
|
||||
);
|
||||
|
||||
let task_inner = task.clone();
|
||||
let engine = self.engine.clone();
|
||||
let frontend = self.frontend_client.clone();
|
||||
|
||||
// TODO(discord9): also save handle & use time wheel or what for better
|
||||
let _handle = common_runtime::spawn_global(async move {
|
||||
match task_inner.start_executing(engine, frontend).await {
|
||||
Ok(()) => info!("Flow {} shutdown", task_inner.flow_id),
|
||||
Err(err) => common_telemetry::error!(
|
||||
"Flow {} encounter unrecoverable error: {err:?}",
|
||||
task_inner.flow_id
|
||||
),
|
||||
}
|
||||
});
|
||||
|
||||
// TODO(discord9): deal with replace logic
|
||||
let replaced_old_task_opt = self.tasks.write().await.insert(flow_id, task);
|
||||
drop(replaced_old_task_opt);
|
||||
|
||||
self.shutdown_txs.write().await.insert(flow_id, tx);
|
||||
|
||||
Ok(Some(flow_id))
|
||||
}
|
||||
|
||||
pub async fn remove_flow(&self, flow_id: FlowId) -> Result<(), Error> {
|
||||
if self.tasks.write().await.remove(&flow_id).is_none() {
|
||||
warn!("Flow {flow_id} not found in tasks")
|
||||
}
|
||||
let Some(tx) = self.shutdown_txs.write().await.remove(&flow_id) else {
|
||||
UnexpectedSnafu {
|
||||
reason: format!("Can't found shutdown tx for flow {flow_id}"),
|
||||
}
|
||||
.fail()?
|
||||
};
|
||||
if tx.send(()).is_err() {
|
||||
warn!("Fail to shutdown flow {flow_id} due to receiver already dropped, maybe flow {flow_id} is already dropped?")
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RecordingRuleTask {
|
||||
pub flow_id: FlowId,
|
||||
query: String,
|
||||
pub time_window_expr: Option<TimeWindowExpr>,
|
||||
/// in seconds
|
||||
pub expire_after: Option<i64>,
|
||||
sink_table_name: [String; 3],
|
||||
source_table_names: HashSet<[String; 3]>,
|
||||
state: Arc<RwLock<RecordingRuleState>>,
|
||||
}
|
||||
|
||||
impl RecordingRuleTask {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
flow_id: FlowId,
|
||||
query: &str,
|
||||
time_window_expr: Option<TimeWindowExpr>,
|
||||
expire_after: Option<i64>,
|
||||
sink_table_name: [String; 3],
|
||||
source_table_names: Vec<[String; 3]>,
|
||||
query_ctx: QueryContextRef,
|
||||
shutdown_rx: oneshot::Receiver<()>,
|
||||
) -> Self {
|
||||
Self {
|
||||
flow_id,
|
||||
query: query.to_string(),
|
||||
time_window_expr,
|
||||
expire_after,
|
||||
sink_table_name,
|
||||
source_table_names: source_table_names.into_iter().collect(),
|
||||
state: Arc::new(RwLock::new(RecordingRuleState::new(query_ctx, shutdown_rx))),
|
||||
}
|
||||
}
|
||||
}
|
||||
impl RecordingRuleTask {
|
||||
/// This should be called in a new tokio task
|
||||
pub async fn start_executing(
|
||||
&self,
|
||||
engine: QueryEngineRef,
|
||||
frontend_client: Arc<FrontendClient>,
|
||||
) -> Result<(), Error> {
|
||||
// only first query don't need upper bound
|
||||
let mut is_first = true;
|
||||
|
||||
loop {
|
||||
// FIXME(discord9): test if need upper bound also works
|
||||
let new_query = self.gen_query_with_time_window(engine.clone()).await?;
|
||||
|
||||
let insert_into = if let Some(new_query) = new_query {
|
||||
format!(
|
||||
"INSERT INTO {}.{}.{} {}",
|
||||
self.sink_table_name[0],
|
||||
self.sink_table_name[1],
|
||||
self.sink_table_name[2],
|
||||
new_query
|
||||
)
|
||||
} else {
|
||||
tokio::time::sleep(MIN_REFRESH_DURATION).await;
|
||||
continue;
|
||||
};
|
||||
|
||||
if is_first {
|
||||
is_first = false;
|
||||
}
|
||||
|
||||
let instant = Instant::now();
|
||||
let flow_id = self.flow_id;
|
||||
let db_client = frontend_client.get_database_client().await?;
|
||||
let peer_addr = db_client.peer.addr;
|
||||
debug!(
|
||||
"Executing flow {flow_id}(expire_after={:?} secs) on {:?} with query {}",
|
||||
self.expire_after, peer_addr, &insert_into
|
||||
);
|
||||
|
||||
let timer = METRIC_FLOW_RULE_ENGINE_QUERY_TIME
|
||||
.with_label_values(&[flow_id.to_string().as_str()])
|
||||
.start_timer();
|
||||
|
||||
let res = db_client.database.sql(&insert_into).await;
|
||||
drop(timer);
|
||||
|
||||
let elapsed = instant.elapsed();
|
||||
if let Ok(res1) = &res {
|
||||
debug!(
|
||||
"Flow {flow_id} executed, result: {res1:?}, elapsed: {:?}",
|
||||
elapsed
|
||||
);
|
||||
} else if let Err(res) = &res {
|
||||
warn!(
|
||||
"Failed to execute Flow {flow_id} on frontend {}, result: {res:?}, elapsed: {:?} with query: {}",
|
||||
peer_addr, elapsed, &insert_into
|
||||
);
|
||||
}
|
||||
|
||||
// record slow query
|
||||
if elapsed >= SLOW_QUERY_THRESHOLD {
|
||||
warn!(
|
||||
"Flow {flow_id} on frontend {} executed for {:?} before complete, query: {}",
|
||||
peer_addr, elapsed, &insert_into
|
||||
);
|
||||
METRIC_FLOW_RULE_ENGINE_SLOW_QUERY
|
||||
.with_label_values(&[flow_id.to_string().as_str(), &insert_into, &peer_addr])
|
||||
.observe(elapsed.as_secs_f64());
|
||||
}
|
||||
|
||||
self.state
|
||||
.write()
|
||||
.await
|
||||
.after_query_exec(elapsed, res.is_ok());
|
||||
// drop the result to free client-related resources
|
||||
drop(res);
|
||||
|
||||
let sleep_until = {
|
||||
let mut state = self.state.write().await;
|
||||
match state.shutdown_rx.try_recv() {
|
||||
Ok(()) => break Ok(()),
|
||||
Err(TryRecvError::Closed) => {
|
||||
warn!("Unexpected shutdown flow {flow_id}, shutdown anyway");
|
||||
break Ok(());
|
||||
}
|
||||
Err(TryRecvError::Empty) => (),
|
||||
}
|
||||
state.get_next_start_query_time(None)
|
||||
};
|
||||
tokio::time::sleep_until(sleep_until).await;
|
||||
}
|
||||
}
|
||||
|
||||
/// will merge and use the first ten time window in query
|
||||
async fn gen_query_with_time_window(
|
||||
&self,
|
||||
engine: QueryEngineRef,
|
||||
) -> Result<Option<String>, Error> {
|
||||
let query_ctx = self.state.read().await.query_ctx.clone();
|
||||
let start = SystemTime::now();
|
||||
let since_the_epoch = start
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("Time went backwards");
|
||||
let low_bound = self
|
||||
.expire_after
|
||||
.map(|e| since_the_epoch.as_secs() - e as u64)
|
||||
.unwrap_or(u64::MIN);
|
||||
|
||||
let low_bound = Timestamp::new_second(low_bound as i64);
|
||||
|
||||
// TODO(discord9): use time window expr to get the precise expire lower bound
|
||||
let expire_time_window_bound = self
|
||||
.time_window_expr
|
||||
.as_ref()
|
||||
.map(|expr| expr.eval(low_bound))
|
||||
.transpose()?;
|
||||
|
||||
let new_sql = {
|
||||
let expr = {
|
||||
match expire_time_window_bound {
|
||||
Some((Some(l), Some(u))) => {
|
||||
let window_size = u.sub(&l).with_context(|| UnexpectedSnafu {
|
||||
reason: format!("Can't get window size from {u:?} - {l:?}"),
|
||||
})?;
|
||||
let col_name = self
|
||||
.time_window_expr
|
||||
.as_ref()
|
||||
.map(|expr| expr.column_name.clone())
|
||||
.with_context(|| UnexpectedSnafu {
|
||||
reason: format!(
|
||||
"Flow id={:?}, Failed to get column name from time window expr",
|
||||
self.flow_id
|
||||
),
|
||||
})?;
|
||||
|
||||
self.state
|
||||
.write()
|
||||
.await
|
||||
.dirty_time_windows
|
||||
.gen_filter_exprs(&col_name, Some(l), window_size, self)?
|
||||
}
|
||||
_ => {
|
||||
debug!(
|
||||
"Flow id = {:?}, can't get window size: precise_lower_bound={expire_time_window_bound:?}, using the same query", self.flow_id
|
||||
);
|
||||
// since no time window lower/upper bound is found, just return the original query
|
||||
return Ok(Some(self.query.clone()));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
debug!(
|
||||
"Flow id={:?}, Generated filter expr: {:?}",
|
||||
self.flow_id,
|
||||
expr.as_ref()
|
||||
.map(|expr| expr_to_sql(expr).with_context(|_| DatafusionSnafu {
|
||||
context: format!("Failed to generate filter expr from {expr:?}"),
|
||||
}))
|
||||
.transpose()?
|
||||
.map(|s| s.to_string())
|
||||
);
|
||||
|
||||
let Some(expr) = expr else {
|
||||
// no new data, hence no need to update
|
||||
debug!("Flow id={:?}, no new data, not update", self.flow_id);
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
let mut add_filter = AddFilterRewriter::new(expr);
|
||||
// make a not optimized plan for clearer unparse
|
||||
let plan =
|
||||
sql_to_df_plan(query_ctx.clone(), engine.clone(), &self.query, false).await?;
|
||||
let plan = plan
|
||||
.clone()
|
||||
.rewrite(&mut add_filter)
|
||||
.with_context(|_| DatafusionSnafu {
|
||||
context: format!("Failed to rewrite plan {plan:?}"),
|
||||
})?
|
||||
.data;
|
||||
df_plan_to_sql(&plan)?
|
||||
};
|
||||
|
||||
Ok(Some(new_sql))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RecordingRuleState {
|
||||
query_ctx: QueryContextRef,
|
||||
/// last query complete time
|
||||
last_update_time: Instant,
|
||||
/// last time query duration
|
||||
last_query_duration: Duration,
|
||||
/// Dirty Time windows need to be updated
|
||||
/// mapping of `start -> end` and non-overlapping
|
||||
dirty_time_windows: DirtyTimeWindows,
|
||||
exec_state: ExecState,
|
||||
shutdown_rx: oneshot::Receiver<()>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct DirtyTimeWindows {
|
||||
windows: BTreeMap<Timestamp, Option<Timestamp>>,
|
||||
}
|
||||
|
||||
fn to_df_literal(value: Timestamp) -> Result<datafusion_common::ScalarValue, Error> {
|
||||
let value = Value::from(value);
|
||||
let value = value
|
||||
.try_to_scalar_value(&value.data_type())
|
||||
.with_context(|_| DatatypesSnafu {
|
||||
extra: format!("Failed to convert to scalar value: {}", value),
|
||||
})?;
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
impl DirtyTimeWindows {
|
||||
/// Time window merge distance
|
||||
const MERGE_DIST: i32 = 3;
|
||||
|
||||
/// Maximum number of filters allowed in a single query
|
||||
const MAX_FILTER_NUM: usize = 20;
|
||||
|
||||
/// Add lower bounds to the dirty time windows. Upper bounds are ignored.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `lower_bounds` - An iterator of lower bounds to be added.
|
||||
pub fn add_lower_bounds(&mut self, lower_bounds: impl Iterator<Item = Timestamp>) {
|
||||
for lower_bound in lower_bounds {
|
||||
let entry = self.windows.entry(lower_bound);
|
||||
entry.or_insert(None);
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate all filter expressions consuming all time windows
|
||||
pub fn gen_filter_exprs(
|
||||
&mut self,
|
||||
col_name: &str,
|
||||
expire_lower_bound: Option<Timestamp>,
|
||||
window_size: chrono::Duration,
|
||||
task_ctx: &RecordingRuleTask,
|
||||
) -> Result<Option<datafusion_expr::Expr>, Error> {
|
||||
debug!(
|
||||
"expire_lower_bound: {:?}, window_size: {:?}",
|
||||
expire_lower_bound.map(|t| t.to_iso8601_string()),
|
||||
window_size
|
||||
);
|
||||
self.merge_dirty_time_windows(window_size, expire_lower_bound)?;
|
||||
|
||||
if self.windows.len() > Self::MAX_FILTER_NUM {
|
||||
let first_time_window = self.windows.first_key_value();
|
||||
let last_time_window = self.windows.last_key_value();
|
||||
warn!(
|
||||
"Flow id = {:?}, too many time windows: {}, only the first {} are taken for this query, the group by expression might be wrong. Time window expr={:?}, expire_after={:?}, first_time_window={:?}, last_time_window={:?}, the original query: {:?}",
|
||||
task_ctx.flow_id,
|
||||
self.windows.len(),
|
||||
Self::MAX_FILTER_NUM,
|
||||
task_ctx.time_window_expr,
|
||||
task_ctx.expire_after,
|
||||
first_time_window,
|
||||
last_time_window,
|
||||
task_ctx.query
|
||||
);
|
||||
}
|
||||
|
||||
// get the first `MAX_FILTER_NUM` time windows
|
||||
let nth = self
|
||||
.windows
|
||||
.iter()
|
||||
.nth(Self::MAX_FILTER_NUM)
|
||||
.map(|(key, _)| *key);
|
||||
let first_nth = {
|
||||
if let Some(nth) = nth {
|
||||
let mut after = self.windows.split_off(&nth);
|
||||
std::mem::swap(&mut self.windows, &mut after);
|
||||
|
||||
after
|
||||
} else {
|
||||
std::mem::take(&mut self.windows)
|
||||
}
|
||||
};
|
||||
|
||||
let mut expr_lst = vec![];
|
||||
for (start, end) in first_nth.into_iter() {
|
||||
debug!(
|
||||
"Time window start: {:?}, end: {:?}",
|
||||
start.to_iso8601_string(),
|
||||
end.map(|t| t.to_iso8601_string())
|
||||
);
|
||||
|
||||
use datafusion_expr::{col, lit};
|
||||
let lower = to_df_literal(start)?;
|
||||
let upper = end.map(to_df_literal).transpose()?;
|
||||
let expr = if let Some(upper) = upper {
|
||||
col(col_name)
|
||||
.gt_eq(lit(lower))
|
||||
.and(col(col_name).lt(lit(upper)))
|
||||
} else {
|
||||
col(col_name).gt_eq(lit(lower))
|
||||
};
|
||||
expr_lst.push(expr);
|
||||
}
|
||||
let expr = expr_lst.into_iter().reduce(|a, b| a.or(b));
|
||||
Ok(expr)
|
||||
}
|
||||
|
||||
/// Merge time windows that overlaps or get too close
|
||||
pub fn merge_dirty_time_windows(
|
||||
&mut self,
|
||||
window_size: chrono::Duration,
|
||||
expire_lower_bound: Option<Timestamp>,
|
||||
) -> Result<(), Error> {
|
||||
let mut new_windows = BTreeMap::new();
|
||||
|
||||
let mut prev_tw = None;
|
||||
for (lower_bound, upper_bound) in std::mem::take(&mut self.windows) {
|
||||
// filter out expired time window
|
||||
if let Some(expire_lower_bound) = expire_lower_bound {
|
||||
if lower_bound <= expire_lower_bound {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
let Some(prev_tw) = &mut prev_tw else {
|
||||
prev_tw = Some((lower_bound, upper_bound));
|
||||
continue;
|
||||
};
|
||||
|
||||
let std_window_size = window_size.to_std().map_err(|e| {
|
||||
InternalSnafu {
|
||||
reason: e.to_string(),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
|
||||
// if cur.lower - prev.upper <= window_size * 2, merge
|
||||
let prev_upper = prev_tw
|
||||
.1
|
||||
.unwrap_or(prev_tw.0.add_duration(std_window_size).context(TimeSnafu)?);
|
||||
prev_tw.1 = Some(prev_upper);
|
||||
|
||||
let cur_upper = upper_bound.unwrap_or(
|
||||
lower_bound
|
||||
.add_duration(std_window_size)
|
||||
.context(TimeSnafu)?,
|
||||
);
|
||||
|
||||
if lower_bound
|
||||
.sub(&prev_upper)
|
||||
.map(|dist| dist <= window_size * Self::MERGE_DIST)
|
||||
.unwrap_or(false)
|
||||
{
|
||||
prev_tw.1 = Some(cur_upper);
|
||||
} else {
|
||||
new_windows.insert(prev_tw.0, prev_tw.1);
|
||||
*prev_tw = (lower_bound, Some(cur_upper));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(prev_tw) = prev_tw {
|
||||
new_windows.insert(prev_tw.0, prev_tw.1);
|
||||
}
|
||||
|
||||
self.windows = new_windows;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl RecordingRuleState {
|
||||
pub fn new(query_ctx: QueryContextRef, shutdown_rx: oneshot::Receiver<()>) -> Self {
|
||||
Self {
|
||||
query_ctx,
|
||||
last_update_time: Instant::now(),
|
||||
last_query_duration: Duration::from_secs(0),
|
||||
dirty_time_windows: Default::default(),
|
||||
exec_state: ExecState::Idle,
|
||||
shutdown_rx,
|
||||
}
|
||||
}
|
||||
|
||||
/// called after last query is done
|
||||
/// `is_succ` indicate whether the last query is successful
|
||||
pub fn after_query_exec(&mut self, elapsed: Duration, _is_succ: bool) {
|
||||
self.exec_state = ExecState::Idle;
|
||||
self.last_query_duration = elapsed;
|
||||
self.last_update_time = Instant::now();
|
||||
}
|
||||
|
||||
/// wait for at least `last_query_duration`, at most `max_timeout` to start next query
|
||||
pub fn get_next_start_query_time(&self, max_timeout: Option<Duration>) -> Instant {
|
||||
let next_duration = max_timeout
|
||||
.unwrap_or(self.last_query_duration)
|
||||
.min(self.last_query_duration);
|
||||
let next_duration = next_duration.max(MIN_REFRESH_DURATION);
|
||||
|
||||
self.last_update_time + next_duration
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
enum ExecState {
|
||||
Idle,
|
||||
Executing,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_merge_dirty_time_windows() {
|
||||
let mut dirty = DirtyTimeWindows::default();
|
||||
dirty.add_lower_bounds(
|
||||
vec![
|
||||
Timestamp::new_second(0),
|
||||
Timestamp::new_second((1 + DirtyTimeWindows::MERGE_DIST as i64) * 5 * 60),
|
||||
]
|
||||
.into_iter(),
|
||||
);
|
||||
dirty
|
||||
.merge_dirty_time_windows(chrono::Duration::seconds(5 * 60), None)
|
||||
.unwrap();
|
||||
// just enough to merge
|
||||
assert_eq!(
|
||||
dirty.windows,
|
||||
BTreeMap::from([(
|
||||
Timestamp::new_second(0),
|
||||
Some(Timestamp::new_second(
|
||||
(2 + DirtyTimeWindows::MERGE_DIST as i64) * 5 * 60
|
||||
))
|
||||
)])
|
||||
);
|
||||
|
||||
// separate time window
|
||||
let mut dirty = DirtyTimeWindows::default();
|
||||
dirty.add_lower_bounds(
|
||||
vec![
|
||||
Timestamp::new_second(0),
|
||||
Timestamp::new_second((2 + DirtyTimeWindows::MERGE_DIST as i64) * 5 * 60),
|
||||
]
|
||||
.into_iter(),
|
||||
);
|
||||
dirty
|
||||
.merge_dirty_time_windows(chrono::Duration::seconds(5 * 60), None)
|
||||
.unwrap();
|
||||
// just enough to merge
|
||||
assert_eq!(
|
||||
BTreeMap::from([
|
||||
(
|
||||
Timestamp::new_second(0),
|
||||
Some(Timestamp::new_second(5 * 60))
|
||||
),
|
||||
(
|
||||
Timestamp::new_second((2 + DirtyTimeWindows::MERGE_DIST as i64) * 5 * 60),
|
||||
Some(Timestamp::new_second(
|
||||
(3 + DirtyTimeWindows::MERGE_DIST as i64) * 5 * 60
|
||||
))
|
||||
)
|
||||
]),
|
||||
dirty.windows
|
||||
);
|
||||
|
||||
// overlapping
|
||||
let mut dirty = DirtyTimeWindows::default();
|
||||
dirty.add_lower_bounds(
|
||||
vec![
|
||||
Timestamp::new_second(0),
|
||||
Timestamp::new_second((DirtyTimeWindows::MERGE_DIST as i64) * 5 * 60),
|
||||
]
|
||||
.into_iter(),
|
||||
);
|
||||
dirty
|
||||
.merge_dirty_time_windows(chrono::Duration::seconds(5 * 60), None)
|
||||
.unwrap();
|
||||
// just enough to merge
|
||||
assert_eq!(
|
||||
BTreeMap::from([(
|
||||
Timestamp::new_second(0),
|
||||
Some(Timestamp::new_second(
|
||||
(1 + DirtyTimeWindows::MERGE_DIST as i64) * 5 * 60
|
||||
))
|
||||
),]),
|
||||
dirty.windows
|
||||
);
|
||||
|
||||
// expired
|
||||
let mut dirty = DirtyTimeWindows::default();
|
||||
dirty.add_lower_bounds(
|
||||
vec![
|
||||
Timestamp::new_second(0),
|
||||
Timestamp::new_second((DirtyTimeWindows::MERGE_DIST as i64) * 5 * 60),
|
||||
]
|
||||
.into_iter(),
|
||||
);
|
||||
dirty
|
||||
.merge_dirty_time_windows(
|
||||
chrono::Duration::seconds(5 * 60),
|
||||
Some(Timestamp::new_second(
|
||||
(DirtyTimeWindows::MERGE_DIST as i64) * 6 * 60,
|
||||
)),
|
||||
)
|
||||
.unwrap();
|
||||
// just enough to merge
|
||||
assert_eq!(BTreeMap::from([]), dirty.windows);
|
||||
}
|
||||
}
|
||||
163
src/flow/src/recording_rules/frontend_client.rs
Normal file
163
src/flow/src/recording_rules/frontend_client.rs
Normal file
@@ -0,0 +1,163 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Frontend client to run flow as recording rule which is time-window-aware normal query triggered every tick set by user
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
|
||||
use common_meta::cluster::{NodeInfo, NodeInfoKey, Role};
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::rpc::store::RangeRequest;
|
||||
use meta_client::client::MetaClient;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{ExternalSnafu, UnexpectedSnafu};
|
||||
use crate::recording_rules::engine::DEFAULT_RULE_ENGINE_QUERY_TIMEOUT;
|
||||
use crate::Error;
|
||||
|
||||
fn default_channel_mgr() -> ChannelManager {
|
||||
let cfg = ChannelConfig::new().timeout(DEFAULT_RULE_ENGINE_QUERY_TIMEOUT);
|
||||
ChannelManager::with_config(cfg)
|
||||
}
|
||||
|
||||
fn client_from_urls(addrs: Vec<String>) -> Client {
|
||||
Client::with_manager_and_urls(default_channel_mgr(), addrs)
|
||||
}
|
||||
|
||||
/// A simple frontend client able to execute sql using grpc protocol
|
||||
#[derive(Debug)]
|
||||
pub enum FrontendClient {
|
||||
Distributed {
|
||||
meta_client: Arc<MetaClient>,
|
||||
channel_mgr: ChannelManager,
|
||||
},
|
||||
Standalone {
|
||||
/// for the sake of simplicity still use grpc even in standalone mode
|
||||
/// notice the client here should all be lazy, so that can wait after frontend is booted then make conn
|
||||
/// TODO(discord9): not use grpc under standalone mode
|
||||
database_client: DatabaseWithPeer,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DatabaseWithPeer {
|
||||
pub database: Database,
|
||||
pub peer: Peer,
|
||||
}
|
||||
|
||||
impl DatabaseWithPeer {
|
||||
fn new(database: Database, peer: Peer) -> Self {
|
||||
Self { database, peer }
|
||||
}
|
||||
}
|
||||
|
||||
impl FrontendClient {
|
||||
pub fn from_meta_client(meta_client: Arc<MetaClient>) -> Self {
|
||||
Self::Distributed {
|
||||
meta_client,
|
||||
channel_mgr: default_channel_mgr(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_static_grpc_addr(addr: String) -> Self {
|
||||
let peer = Peer {
|
||||
id: 0,
|
||||
addr: addr.clone(),
|
||||
};
|
||||
|
||||
let mgr = default_channel_mgr();
|
||||
let client = Client::with_manager_and_urls(mgr.clone(), vec![addr]);
|
||||
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
Self::Standalone {
|
||||
database_client: DatabaseWithPeer::new(database, peer),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FrontendClient {
|
||||
async fn scan_for_frontend(&self) -> Result<Vec<(NodeInfoKey, NodeInfo)>, Error> {
|
||||
let Self::Distributed { meta_client, .. } = self else {
|
||||
return Ok(vec![]);
|
||||
};
|
||||
let cluster_client = meta_client
|
||||
.cluster_client()
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
let cluster_id = meta_client.id().0;
|
||||
let prefix = NodeInfoKey::key_prefix_with_role(cluster_id, Role::Frontend);
|
||||
let req = RangeRequest::new().with_prefix(prefix);
|
||||
let resp = cluster_client
|
||||
.range(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
let mut res = Vec::with_capacity(resp.kvs.len());
|
||||
for kv in resp.kvs {
|
||||
let key = NodeInfoKey::try_from(kv.key)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
|
||||
let val = NodeInfo::try_from(kv.value)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
res.push((key, val));
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Get the database with max `last_activity_ts`
|
||||
async fn get_last_active_frontend(&self) -> Result<DatabaseWithPeer, Error> {
|
||||
if let Self::Standalone { database_client } = self {
|
||||
return Ok(database_client.clone());
|
||||
}
|
||||
match &self {
|
||||
Self::Standalone { database_client } => Ok(database_client.clone()),
|
||||
Self::Distributed {
|
||||
meta_client: _,
|
||||
channel_mgr,
|
||||
} => {
|
||||
let frontends = self.scan_for_frontend().await?;
|
||||
let mut last_activity_ts = i64::MIN;
|
||||
let mut peer = None;
|
||||
for (_key, val) in frontends.iter() {
|
||||
if val.last_activity_ts > last_activity_ts {
|
||||
last_activity_ts = val.last_activity_ts;
|
||||
peer = Some(val.peer.clone());
|
||||
}
|
||||
}
|
||||
let Some(peer) = peer else {
|
||||
UnexpectedSnafu {
|
||||
reason: format!("No frontend available: {:?}", frontends),
|
||||
}
|
||||
.fail()?
|
||||
};
|
||||
let client =
|
||||
Client::with_manager_and_urls(channel_mgr.clone(), vec![peer.addr.clone()]);
|
||||
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
Ok(DatabaseWithPeer::new(database, peer))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a database client, and possibly update it before returning.
|
||||
pub async fn get_database_client(&self) -> Result<DatabaseWithPeer, Error> {
|
||||
match self {
|
||||
Self::Standalone { database_client } => Ok(database_client.clone()),
|
||||
Self::Distributed { meta_client: _, .. } => self.get_last_active_frontend().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -57,6 +57,7 @@ use crate::error::{
|
||||
};
|
||||
use crate::heartbeat::HeartbeatTask;
|
||||
use crate::metrics::{METRIC_FLOW_PROCESSING_TIME, METRIC_FLOW_ROWS};
|
||||
use crate::recording_rules::{FrontendClient, RecordingRuleEngine};
|
||||
use crate::transform::register_function_to_query_engine;
|
||||
use crate::utils::{SizeReportSender, StateReportHandler};
|
||||
use crate::{Error, FlowWorkerManager, FlownodeOptions};
|
||||
@@ -245,6 +246,7 @@ impl FlownodeInstance {
|
||||
self.server.shutdown().await.context(ShutdownServerSnafu)?;
|
||||
|
||||
if let Some(task) = &self.heartbeat_task {
|
||||
info!("Close heartbeat task for flownode");
|
||||
task.shutdown();
|
||||
}
|
||||
|
||||
@@ -271,6 +273,8 @@ pub struct FlownodeBuilder {
|
||||
heartbeat_task: Option<HeartbeatTask>,
|
||||
/// receive a oneshot sender to send state size report
|
||||
state_report_handler: Option<StateReportHandler>,
|
||||
/// Client to send sql to frontend
|
||||
frontend_client: Arc<FrontendClient>,
|
||||
}
|
||||
|
||||
impl FlownodeBuilder {
|
||||
@@ -281,6 +285,7 @@ impl FlownodeBuilder {
|
||||
table_meta: TableMetadataManagerRef,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
flow_metadata_manager: FlowMetadataManagerRef,
|
||||
frontend_client: Arc<FrontendClient>,
|
||||
) -> Self {
|
||||
Self {
|
||||
opts,
|
||||
@@ -290,6 +295,7 @@ impl FlownodeBuilder {
|
||||
flow_metadata_manager,
|
||||
heartbeat_task: None,
|
||||
state_report_handler: None,
|
||||
frontend_client,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -447,7 +453,14 @@ impl FlownodeBuilder {
|
||||
|
||||
let node_id = self.opts.node_id.map(|id| id as u32);
|
||||
|
||||
let mut man = FlowWorkerManager::new(node_id, query_engine, table_meta);
|
||||
let rule_engine = RecordingRuleEngine::new(
|
||||
self.frontend_client.clone(),
|
||||
query_engine.clone(),
|
||||
self.flow_metadata_manager.clone(),
|
||||
table_meta.clone(),
|
||||
);
|
||||
|
||||
let mut man = FlowWorkerManager::new(node_id, query_engine, table_meta, rule_engine);
|
||||
for worker_id in 0..num_workers {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
|
||||
@@ -86,7 +86,8 @@ pub fn create_test_query_engine() -> Arc<dyn QueryEngine> {
|
||||
|
||||
let schema = vec![
|
||||
datatypes::schema::ColumnSchema::new("number", CDT::uint32_datatype(), false),
|
||||
datatypes::schema::ColumnSchema::new("ts", CDT::timestamp_millisecond_datatype(), false),
|
||||
datatypes::schema::ColumnSchema::new("ts", CDT::timestamp_millisecond_datatype(), false)
|
||||
.with_time_index(true),
|
||||
];
|
||||
let mut columns = vec![];
|
||||
let numbers = (1..=10).collect_vec();
|
||||
@@ -114,6 +115,37 @@ pub fn create_test_query_engine() -> Arc<dyn QueryEngine> {
|
||||
};
|
||||
catalog_list.register_table_sync(req_with_ts).unwrap();
|
||||
|
||||
let schema = vec![
|
||||
datatypes::schema::ColumnSchema::new("NUMBER", CDT::uint32_datatype(), false),
|
||||
datatypes::schema::ColumnSchema::new("ts", CDT::timestamp_millisecond_datatype(), false)
|
||||
.with_time_index(true),
|
||||
];
|
||||
let mut columns = vec![];
|
||||
let numbers = (1..=10).collect_vec();
|
||||
let column: VectorRef = Arc::new(<u32 as Scalar>::VectorType::from_vec(numbers));
|
||||
columns.push(column);
|
||||
|
||||
let ts = (1..=10).collect_vec();
|
||||
let mut builder = TimestampMillisecondVectorBuilder::with_capacity(10);
|
||||
ts.into_iter()
|
||||
.map(|v| builder.push(Some(TimestampMillisecond::new(v))))
|
||||
.count();
|
||||
let column: VectorRef = builder.to_vector_cloned();
|
||||
columns.push(column);
|
||||
|
||||
let schema = Arc::new(Schema::new(schema));
|
||||
let recordbatch = common_recordbatch::RecordBatch::new(schema, columns).unwrap();
|
||||
let table = MemTable::table("UPPERCASE_NUMBERS_WITH_TS", recordbatch);
|
||||
|
||||
let req_with_ts = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "UPPERCASE_NUMBERS_WITH_TS".to_string(),
|
||||
table_id: 1025,
|
||||
table,
|
||||
};
|
||||
catalog_list.register_table_sync(req_with_ts).unwrap();
|
||||
|
||||
let factory = query::QueryEngineFactory::new(catalog_list, None, None, None, None, false);
|
||||
|
||||
let engine = factory.query_engine();
|
||||
|
||||
@@ -112,6 +112,7 @@ impl MetaClientBuilder {
|
||||
.enable_store()
|
||||
.enable_heartbeat()
|
||||
.enable_procedure()
|
||||
.enable_access_cluster_info()
|
||||
}
|
||||
|
||||
pub fn enable_heartbeat(self) -> Self {
|
||||
|
||||
@@ -198,13 +198,13 @@ impl Inner {
|
||||
}
|
||||
);
|
||||
|
||||
let leader = self
|
||||
let leader_addr = self
|
||||
.ask_leader
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.get_leader()
|
||||
.context(error::NoLeaderSnafu)?;
|
||||
let mut leader = self.make_client(leader)?;
|
||||
let mut leader = self.make_client(&leader_addr)?;
|
||||
|
||||
let (sender, receiver) = mpsc::channel::<HeartbeatRequest>(128);
|
||||
|
||||
@@ -236,7 +236,11 @@ impl Inner {
|
||||
.await
|
||||
.map_err(error::Error::from)?
|
||||
.context(error::CreateHeartbeatStreamSnafu)?;
|
||||
info!("Success to create heartbeat stream to server: {:#?}", res);
|
||||
|
||||
info!(
|
||||
"Success to create heartbeat stream to server: {}, response: {:#?}",
|
||||
leader_addr, res
|
||||
);
|
||||
|
||||
Ok((
|
||||
HeartbeatSender::new(self.id, self.role, sender),
|
||||
|
||||
@@ -7,6 +7,7 @@ license.workspace = true
|
||||
[features]
|
||||
mock = []
|
||||
pg_kvbackend = ["dep:tokio-postgres", "common-meta/pg_kvbackend"]
|
||||
mysql_kvbackend = [] # placeholder features so CI can compile
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
@@ -23,8 +23,8 @@ pub struct CheckLeaderHandler;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl HeartbeatHandler for CheckLeaderHandler {
|
||||
fn is_acceptable(&self, role: Role) -> bool {
|
||||
role == Role::Datanode
|
||||
fn is_acceptable(&self, _role: Role) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
async fn handle(
|
||||
|
||||
@@ -157,7 +157,7 @@ fn extract_base_info(request: &HeartbeatRequest) -> Option<(NodeInfoKey, Peer, P
|
||||
}
|
||||
|
||||
async fn put_into_memory_store(ctx: &mut Context, key: NodeInfoKey, value: NodeInfo) -> Result<()> {
|
||||
let key = key.into();
|
||||
let key = (&key).into();
|
||||
let value = value.try_into().context(InvalidClusterInfoFormatSnafu)?;
|
||||
let put_req = PutRequest {
|
||||
key,
|
||||
|
||||
@@ -32,6 +32,7 @@ use common_meta::kv_backend::{KvBackendRef, ResettableKvBackend, ResettableKvBac
|
||||
use common_meta::leadership_notifier::{
|
||||
LeadershipChangeNotifier, LeadershipChangeNotifierCustomizerRef,
|
||||
};
|
||||
use common_meta::node_expiry_listener::NodeExpiryListener;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::region_keeper::MemoryRegionKeeperRef;
|
||||
use common_meta::wal_options_allocator::WalOptionsAllocatorRef;
|
||||
@@ -151,6 +152,8 @@ pub struct MetasrvOptions {
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
/// Lock id for meta kv election. Only effect when using pg_kvbackend.
|
||||
pub meta_election_lock_id: u64,
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub node_max_idle_time: Duration,
|
||||
}
|
||||
|
||||
const DEFAULT_METASRV_ADDR_PORT: &str = "3002";
|
||||
@@ -192,6 +195,7 @@ impl Default for MetasrvOptions {
|
||||
meta_table_name: DEFAULT_META_TABLE_NAME.to_string(),
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
meta_election_lock_id: DEFAULT_META_ELECTION_LOCK_ID,
|
||||
node_max_idle_time: Duration::from_secs(24 * 60 * 60),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -442,6 +446,10 @@ impl Metasrv {
|
||||
leadership_change_notifier.add_listener(self.wal_options_allocator.clone());
|
||||
leadership_change_notifier
|
||||
.add_listener(Arc::new(ProcedureManagerListenerAdapter(procedure_manager)));
|
||||
leadership_change_notifier.add_listener(Arc::new(NodeExpiryListener::new(
|
||||
self.options.node_max_idle_time,
|
||||
self.in_memory.clone(),
|
||||
)));
|
||||
if let Some(region_supervisor_ticker) = &self.region_supervisor_ticker {
|
||||
leadership_change_notifier.add_listener(region_supervisor_ticker.clone() as _);
|
||||
}
|
||||
|
||||
@@ -68,13 +68,15 @@ impl heartbeat_server::Heartbeat for Metasrv {
|
||||
};
|
||||
|
||||
if pusher_id.is_none() {
|
||||
pusher_id = register_pusher(&handler_group, header, tx.clone()).await;
|
||||
pusher_id =
|
||||
Some(register_pusher(&handler_group, header, tx.clone()).await);
|
||||
}
|
||||
if let Some(k) = &pusher_id {
|
||||
METRIC_META_HEARTBEAT_RECV.with_label_values(&[&k.to_string()]);
|
||||
} else {
|
||||
METRIC_META_HEARTBEAT_RECV.with_label_values(&["none"]);
|
||||
}
|
||||
|
||||
let res = handler_group
|
||||
.handle(req, ctx.clone())
|
||||
.await
|
||||
@@ -173,13 +175,13 @@ async fn register_pusher(
|
||||
handler_group: &HeartbeatHandlerGroup,
|
||||
header: &RequestHeader,
|
||||
sender: Sender<std::result::Result<HeartbeatResponse, tonic::Status>>,
|
||||
) -> Option<PusherId> {
|
||||
) -> PusherId {
|
||||
let role = header.role();
|
||||
let id = get_node_id(header);
|
||||
let pusher_id = PusherId::new(role, id);
|
||||
let pusher = Pusher::new(sender, header);
|
||||
handler_group.register_pusher(pusher_id, pusher).await;
|
||||
Some(pusher_id)
|
||||
pusher_id
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -17,13 +17,15 @@ use std::time::Duration;
|
||||
|
||||
use api::v1::meta::{
|
||||
procedure_service_server, DdlTaskRequest as PbDdlTaskRequest,
|
||||
DdlTaskResponse as PbDdlTaskResponse, MigrateRegionRequest, MigrateRegionResponse,
|
||||
DdlTaskResponse as PbDdlTaskResponse, Error, MigrateRegionRequest, MigrateRegionResponse,
|
||||
ProcedureDetailRequest, ProcedureDetailResponse, ProcedureStateResponse, QueryProcedureRequest,
|
||||
ResponseHeader,
|
||||
};
|
||||
use common_meta::ddl::ExecutorContext;
|
||||
use common_meta::rpc::ddl::{DdlTask, SubmitDdlTaskRequest};
|
||||
use common_meta::rpc::procedure;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use common_telemetry::warn;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tonic::{Request, Response};
|
||||
|
||||
use super::GrpcResult;
|
||||
@@ -37,6 +39,16 @@ impl procedure_service_server::ProcedureService for Metasrv {
|
||||
&self,
|
||||
request: Request<QueryProcedureRequest>,
|
||||
) -> GrpcResult<ProcedureStateResponse> {
|
||||
if !self.is_leader() {
|
||||
let resp = ProcedureStateResponse {
|
||||
header: Some(ResponseHeader::failed(0, Error::is_not_leader())),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
warn!("The current meta is not leader, but a `query procedure state` request have reached the meta. Detail: {:?}.", request);
|
||||
return Ok(Response::new(resp));
|
||||
}
|
||||
|
||||
let QueryProcedureRequest { header, pid, .. } = request.into_inner();
|
||||
let _header = header.context(error::MissingRequestHeaderSnafu)?;
|
||||
let pid = pid.context(error::MissingRequiredParameterSnafu { param: "pid" })?;
|
||||
@@ -57,6 +69,16 @@ impl procedure_service_server::ProcedureService for Metasrv {
|
||||
}
|
||||
|
||||
async fn ddl(&self, request: Request<PbDdlTaskRequest>) -> GrpcResult<PbDdlTaskResponse> {
|
||||
if !self.is_leader() {
|
||||
let resp = PbDdlTaskResponse {
|
||||
header: Some(ResponseHeader::failed(0, Error::is_not_leader())),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
warn!("The current meta is not leader, but a `ddl` request have reached the meta. Detail: {:?}.", request);
|
||||
return Ok(Response::new(resp));
|
||||
}
|
||||
|
||||
let PbDdlTaskRequest {
|
||||
header,
|
||||
query_context,
|
||||
@@ -99,12 +121,15 @@ impl procedure_service_server::ProcedureService for Metasrv {
|
||||
&self,
|
||||
request: Request<MigrateRegionRequest>,
|
||||
) -> GrpcResult<MigrateRegionResponse> {
|
||||
ensure!(
|
||||
self.meta_peer_client().is_leader(),
|
||||
error::UnexpectedSnafu {
|
||||
violated: "Trying to submit a region migration procedure to non-leader meta server"
|
||||
}
|
||||
);
|
||||
if !self.is_leader() {
|
||||
let resp = MigrateRegionResponse {
|
||||
header: Some(ResponseHeader::failed(0, Error::is_not_leader())),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
warn!("The current meta is not leader, but a `migrate` request have reached the meta. Detail: {:?}.", request);
|
||||
return Ok(Response::new(resp));
|
||||
}
|
||||
|
||||
let MigrateRegionRequest {
|
||||
header,
|
||||
@@ -150,6 +175,16 @@ impl procedure_service_server::ProcedureService for Metasrv {
|
||||
&self,
|
||||
request: Request<ProcedureDetailRequest>,
|
||||
) -> GrpcResult<ProcedureDetailResponse> {
|
||||
if !self.is_leader() {
|
||||
let resp = ProcedureDetailResponse {
|
||||
header: Some(ResponseHeader::failed(0, Error::is_not_leader())),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
warn!("The current meta is not leader, but a `procedure details` request have reached the meta. Detail: {:?}.", request);
|
||||
return Ok(Response::new(resp));
|
||||
}
|
||||
|
||||
let ProcedureDetailRequest { header } = request.into_inner();
|
||||
let _header = header.context(error::MissingRequestHeaderSnafu)?;
|
||||
let metas = self
|
||||
|
||||
@@ -30,9 +30,10 @@ impl MetricEngineInner {
|
||||
pub async fn drop_region(
|
||||
&self,
|
||||
region_id: RegionId,
|
||||
_req: RegionDropRequest,
|
||||
req: RegionDropRequest,
|
||||
) -> Result<AffectedRows> {
|
||||
let data_region_id = utils::to_data_region_id(region_id);
|
||||
let fast_path = req.fast_path;
|
||||
|
||||
// enclose the guard in a block to prevent the guard from polluting the async context
|
||||
let (is_physical_region, is_physical_region_busy) = {
|
||||
@@ -52,7 +53,7 @@ impl MetricEngineInner {
|
||||
|
||||
if is_physical_region {
|
||||
// check if there is no logical region relates to this physical region
|
||||
if is_physical_region_busy {
|
||||
if is_physical_region_busy && !fast_path {
|
||||
// reject if there is any present logical region
|
||||
return Err(PhysicalRegionBusySnafu {
|
||||
region_id: data_region_id,
|
||||
@@ -60,9 +61,21 @@ impl MetricEngineInner {
|
||||
.build());
|
||||
}
|
||||
|
||||
self.drop_physical_region(data_region_id).await
|
||||
return self.drop_physical_region(data_region_id).await;
|
||||
}
|
||||
|
||||
if fast_path {
|
||||
// for fast path, we don't delete the metadata in the metadata region.
|
||||
// it only remove the logical region from the engine state.
|
||||
//
|
||||
// The drop database procedure will ensure the metadata region and data region are dropped eventually.
|
||||
self.state
|
||||
.write()
|
||||
.unwrap()
|
||||
.remove_logical_region(region_id)?;
|
||||
|
||||
Ok(0)
|
||||
} else {
|
||||
// cannot merge these two `if` otherwise the stupid type checker will complain
|
||||
let metadata_region_id = self
|
||||
.state
|
||||
.read()
|
||||
@@ -87,13 +100,16 @@ impl MetricEngineInner {
|
||||
// Since the physical regions are going to be dropped, we don't need to
|
||||
// update the contents in metadata region.
|
||||
self.mito
|
||||
.handle_request(data_region_id, RegionRequest::Drop(RegionDropRequest {}))
|
||||
.handle_request(
|
||||
data_region_id,
|
||||
RegionRequest::Drop(RegionDropRequest { fast_path: false }),
|
||||
)
|
||||
.await
|
||||
.with_context(|_| CloseMitoRegionSnafu { region_id })?;
|
||||
self.mito
|
||||
.handle_request(
|
||||
metadata_region_id,
|
||||
RegionRequest::Drop(RegionDropRequest {}),
|
||||
RegionRequest::Drop(RegionDropRequest { fast_path: false }),
|
||||
)
|
||||
.await
|
||||
.with_context(|_| CloseMitoRegionSnafu { region_id })?;
|
||||
|
||||
3
src/mito2/src/cache/file_cache.rs
vendored
3
src/mito2/src/cache/file_cache.rs
vendored
@@ -187,9 +187,12 @@ impl FileCache {
|
||||
}
|
||||
|
||||
/// Removes a file from the cache explicitly.
|
||||
/// It always tries to remove the file from the local store because we may not have the file
|
||||
/// in the memory index if upload is failed.
|
||||
pub(crate) async fn remove(&self, key: IndexKey) {
|
||||
let file_path = self.cache_file_path(key);
|
||||
self.memory_index.remove(&key).await;
|
||||
// Always delete the file from the local store.
|
||||
if let Err(e) = self.local_store.delete(&file_path).await {
|
||||
warn!(e; "Failed to delete a cached file {}", file_path);
|
||||
}
|
||||
|
||||
81
src/mito2/src/cache/write_cache.rs
vendored
81
src/mito2/src/cache/write_cache.rs
vendored
@@ -22,6 +22,7 @@ use common_telemetry::{debug, info};
|
||||
use futures::AsyncWriteExt;
|
||||
use object_store::ObjectStore;
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use crate::access_layer::{
|
||||
new_fs_cache_store, FilePathProvider, RegionFilePathFactory, SstInfoArray, SstWriteRequest,
|
||||
@@ -149,24 +150,41 @@ impl WriteCache {
|
||||
return Ok(sst_info);
|
||||
}
|
||||
|
||||
let mut upload_tracker = UploadTracker::new(region_id);
|
||||
let mut err = None;
|
||||
let remote_store = &upload_request.remote_store;
|
||||
for sst in &sst_info {
|
||||
let parquet_key = IndexKey::new(region_id, sst.file_id, FileType::Parquet);
|
||||
let parquet_path = upload_request
|
||||
.dest_path_provider
|
||||
.build_sst_file_path(sst.file_id);
|
||||
self.upload(parquet_key, &parquet_path, remote_store)
|
||||
.await?;
|
||||
if let Err(e) = self.upload(parquet_key, &parquet_path, remote_store).await {
|
||||
err = Some(e);
|
||||
break;
|
||||
}
|
||||
upload_tracker.push_uploaded_file(parquet_path);
|
||||
|
||||
if sst.index_metadata.file_size > 0 {
|
||||
let puffin_key = IndexKey::new(region_id, sst.file_id, FileType::Puffin);
|
||||
let puffin_path = &upload_request
|
||||
let puffin_path = upload_request
|
||||
.dest_path_provider
|
||||
.build_index_file_path(sst.file_id);
|
||||
self.upload(puffin_key, puffin_path, remote_store).await?;
|
||||
if let Err(e) = self.upload(puffin_key, &puffin_path, remote_store).await {
|
||||
err = Some(e);
|
||||
break;
|
||||
}
|
||||
upload_tracker.push_uploaded_file(puffin_path);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(err) = err {
|
||||
// Cleans files on failure.
|
||||
upload_tracker
|
||||
.clean(&sst_info, &self.file_cache, remote_store)
|
||||
.await;
|
||||
return Err(err);
|
||||
}
|
||||
|
||||
Ok(sst_info)
|
||||
}
|
||||
|
||||
@@ -332,6 +350,61 @@ pub struct SstUploadRequest {
|
||||
pub remote_store: ObjectStore,
|
||||
}
|
||||
|
||||
/// A structs to track files to upload and clean them if upload failed.
|
||||
struct UploadTracker {
|
||||
/// Id of the region to track.
|
||||
region_id: RegionId,
|
||||
/// Paths of files uploaded successfully.
|
||||
files_uploaded: Vec<String>,
|
||||
}
|
||||
|
||||
impl UploadTracker {
|
||||
/// Creates a new instance of `UploadTracker` for a given region.
|
||||
fn new(region_id: RegionId) -> Self {
|
||||
Self {
|
||||
region_id,
|
||||
files_uploaded: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a file path to the list of uploaded files.
|
||||
fn push_uploaded_file(&mut self, path: String) {
|
||||
self.files_uploaded.push(path);
|
||||
}
|
||||
|
||||
/// Cleans uploaded files and files in the file cache at best effort.
|
||||
async fn clean(
|
||||
&self,
|
||||
sst_info: &SstInfoArray,
|
||||
file_cache: &FileCacheRef,
|
||||
remote_store: &ObjectStore,
|
||||
) {
|
||||
common_telemetry::info!(
|
||||
"Start cleaning files on upload failure, region: {}, num_ssts: {}",
|
||||
self.region_id,
|
||||
sst_info.len()
|
||||
);
|
||||
|
||||
// Cleans files in the file cache first.
|
||||
for sst in sst_info {
|
||||
let parquet_key = IndexKey::new(self.region_id, sst.file_id, FileType::Parquet);
|
||||
file_cache.remove(parquet_key).await;
|
||||
|
||||
if sst.index_metadata.file_size > 0 {
|
||||
let puffin_key = IndexKey::new(self.region_id, sst.file_id, FileType::Puffin);
|
||||
file_cache.remove(puffin_key).await;
|
||||
}
|
||||
}
|
||||
|
||||
// Cleans uploaded files.
|
||||
for file_path in &self.files_uploaded {
|
||||
if let Err(e) = remote_store.delete(file_path).await {
|
||||
common_telemetry::error!(e; "Failed to delete file {}", file_path);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
|
||||
@@ -56,7 +56,10 @@ async fn test_engine_drop_region() {
|
||||
|
||||
// It's okay to drop a region doesn't exist.
|
||||
engine
|
||||
.handle_request(region_id, RegionRequest::Drop(RegionDropRequest {}))
|
||||
.handle_request(
|
||||
region_id,
|
||||
RegionRequest::Drop(RegionDropRequest { fast_path: false }),
|
||||
)
|
||||
.await
|
||||
.unwrap_err();
|
||||
|
||||
@@ -86,7 +89,10 @@ async fn test_engine_drop_region() {
|
||||
|
||||
// drop the created region.
|
||||
engine
|
||||
.handle_request(region_id, RegionRequest::Drop(RegionDropRequest {}))
|
||||
.handle_request(
|
||||
region_id,
|
||||
RegionRequest::Drop(RegionDropRequest { fast_path: false }),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!engine.is_region_exists(region_id));
|
||||
@@ -192,7 +198,10 @@ async fn test_engine_drop_region_for_custom_store() {
|
||||
|
||||
// Drop the custom region.
|
||||
engine
|
||||
.handle_request(custom_region_id, RegionRequest::Drop(RegionDropRequest {}))
|
||||
.handle_request(
|
||||
custom_region_id,
|
||||
RegionRequest::Drop(RegionDropRequest { fast_path: false }),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!engine.is_region_exists(custom_region_id));
|
||||
|
||||
@@ -35,8 +35,8 @@ use store_api::metadata::{ColumnMetadata, RegionMetadata, RegionMetadataRef};
|
||||
use store_api::region_engine::{SetRegionRoleStateResponse, SettableRegionRoleState};
|
||||
use store_api::region_request::{
|
||||
AffectedRows, RegionAlterRequest, RegionCatchupRequest, RegionCloseRequest,
|
||||
RegionCompactRequest, RegionCreateRequest, RegionDropRequest, RegionFlushRequest,
|
||||
RegionOpenRequest, RegionRequest, RegionTruncateRequest,
|
||||
RegionCompactRequest, RegionCreateRequest, RegionFlushRequest, RegionOpenRequest,
|
||||
RegionRequest, RegionTruncateRequest,
|
||||
};
|
||||
use store_api::storage::{RegionId, SequenceNumber};
|
||||
use tokio::sync::oneshot::{self, Receiver, Sender};
|
||||
@@ -624,10 +624,10 @@ impl WorkerRequest {
|
||||
sender: sender.into(),
|
||||
request: DdlRequest::Create(v),
|
||||
}),
|
||||
RegionRequest::Drop(v) => WorkerRequest::Ddl(SenderDdlRequest {
|
||||
RegionRequest::Drop(_) => WorkerRequest::Ddl(SenderDdlRequest {
|
||||
region_id,
|
||||
sender: sender.into(),
|
||||
request: DdlRequest::Drop(v),
|
||||
request: DdlRequest::Drop,
|
||||
}),
|
||||
RegionRequest::Open(v) => WorkerRequest::Ddl(SenderDdlRequest {
|
||||
region_id,
|
||||
@@ -690,7 +690,7 @@ impl WorkerRequest {
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum DdlRequest {
|
||||
Create(RegionCreateRequest),
|
||||
Drop(RegionDropRequest),
|
||||
Drop,
|
||||
Open((RegionOpenRequest, Option<WalEntryReceiver>)),
|
||||
Close(RegionCloseRequest),
|
||||
Alter(RegionAlterRequest),
|
||||
|
||||
@@ -836,7 +836,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
|
||||
for ddl in ddl_requests.drain(..) {
|
||||
let res = match ddl.request {
|
||||
DdlRequest::Create(req) => self.handle_create_request(ddl.region_id, req).await,
|
||||
DdlRequest::Drop(_) => self.handle_drop_request(ddl.region_id).await,
|
||||
DdlRequest::Drop => self.handle_drop_request(ddl.region_id).await,
|
||||
DdlRequest::Open((req, wal_entry_receiver)) => {
|
||||
self.handle_open_request(ddl.region_id, req, wal_entry_receiver, ddl.sender)
|
||||
.await;
|
||||
|
||||
@@ -68,6 +68,7 @@ pub struct Inserter {
|
||||
catalog_manager: CatalogManagerRef,
|
||||
partition_manager: PartitionRuleManagerRef,
|
||||
node_manager: NodeManagerRef,
|
||||
#[allow(unused)]
|
||||
table_flownode_set_cache: TableFlownodeSetCacheRef,
|
||||
}
|
||||
|
||||
@@ -338,6 +339,8 @@ impl Inserter {
|
||||
instant_requests,
|
||||
} = requests;
|
||||
|
||||
// TODO(discord9): mirror some
|
||||
|
||||
// Mirror requests for source table to flownode asynchronously
|
||||
let flow_mirror_task = FlowMirrorTask::new(
|
||||
&self.table_flownode_set_cache,
|
||||
@@ -817,12 +820,14 @@ struct CreateAlterTableResult {
|
||||
table_infos: HashMap<TableId, Arc<TableInfo>>,
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
struct FlowMirrorTask {
|
||||
requests: HashMap<Peer, RegionInsertRequests>,
|
||||
num_rows: usize,
|
||||
}
|
||||
|
||||
impl FlowMirrorTask {
|
||||
#[allow(unused)]
|
||||
async fn new(
|
||||
cache: &TableFlownodeSetCacheRef,
|
||||
requests: impl Iterator<Item = &RegionInsertRequest>,
|
||||
@@ -896,6 +901,7 @@ impl FlowMirrorTask {
|
||||
})
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
fn detach(self, node_manager: NodeManagerRef) -> Result<()> {
|
||||
crate::metrics::DIST_MIRROR_PENDING_ROW_COUNT.add(self.num_rows as i64);
|
||||
for (peer, inserts) in self.requests {
|
||||
|
||||
@@ -583,7 +583,8 @@ impl HistogramFoldStream {
|
||||
.expect("field column should not be nullable");
|
||||
counters.push(counter);
|
||||
}
|
||||
let result = Self::evaluate_row(self.quantile, &bucket, &counters)?;
|
||||
// ignore invalid data
|
||||
let result = Self::evaluate_row(self.quantile, &bucket, &counters).unwrap_or(f64::NAN);
|
||||
self.output_buffer[self.field_column_index].push_value_ref(ValueRef::from(result));
|
||||
cursor += bucket_num;
|
||||
remaining_rows -= bucket_num;
|
||||
@@ -672,7 +673,7 @@ impl HistogramFoldStream {
|
||||
if bucket.len() <= 1 {
|
||||
return Ok(f64::NAN);
|
||||
}
|
||||
if *bucket.last().unwrap() != f64::INFINITY {
|
||||
if bucket.last().unwrap().is_finite() {
|
||||
return Err(DataFusionError::Execution(
|
||||
"last bucket should be +Inf".to_string(),
|
||||
));
|
||||
@@ -692,8 +693,8 @@ impl HistogramFoldStream {
|
||||
}
|
||||
|
||||
// check input value
|
||||
debug_assert!(bucket.windows(2).all(|w| w[0] <= w[1]));
|
||||
debug_assert!(counter.windows(2).all(|w| w[0] <= w[1]));
|
||||
debug_assert!(bucket.windows(2).all(|w| w[0] <= w[1]), "{bucket:?}");
|
||||
debug_assert!(counter.windows(2).all(|w| w[0] <= w[1]), "{counter:?}");
|
||||
|
||||
let total = *counter.last().unwrap();
|
||||
let expected_pos = total * quantile;
|
||||
|
||||
@@ -21,6 +21,7 @@ mod idelta;
|
||||
mod predict_linear;
|
||||
mod quantile;
|
||||
mod resets;
|
||||
mod round;
|
||||
#[cfg(test)]
|
||||
mod test_util;
|
||||
|
||||
@@ -39,6 +40,7 @@ pub use idelta::IDelta;
|
||||
pub use predict_linear::PredictLinear;
|
||||
pub use quantile::QuantileOverTime;
|
||||
pub use resets::Resets;
|
||||
pub use round::Round;
|
||||
|
||||
pub(crate) fn extract_array(columnar_value: &ColumnarValue) -> Result<ArrayRef, DataFusionError> {
|
||||
if let ColumnarValue::Array(array) = columnar_value {
|
||||
|
||||
105
src/promql/src/functions/round.rs
Normal file
105
src/promql/src/functions/round.rs
Normal file
@@ -0,0 +1,105 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use datafusion::error::DataFusionError;
|
||||
use datafusion_expr::{create_udf, ColumnarValue, ScalarUDF, Volatility};
|
||||
use datatypes::arrow::array::AsArray;
|
||||
use datatypes::arrow::datatypes::{DataType, Float64Type};
|
||||
use datatypes::compute;
|
||||
|
||||
use crate::functions::extract_array;
|
||||
|
||||
pub struct Round {
|
||||
nearest: f64,
|
||||
}
|
||||
|
||||
impl Round {
|
||||
fn new(nearest: f64) -> Self {
|
||||
Self { nearest }
|
||||
}
|
||||
|
||||
pub const fn name() -> &'static str {
|
||||
"prom_round"
|
||||
}
|
||||
|
||||
fn input_type() -> Vec<DataType> {
|
||||
vec![DataType::Float64]
|
||||
}
|
||||
|
||||
pub fn return_type() -> DataType {
|
||||
DataType::Float64
|
||||
}
|
||||
|
||||
pub fn scalar_udf(nearest: f64) -> ScalarUDF {
|
||||
create_udf(
|
||||
Self::name(),
|
||||
Self::input_type(),
|
||||
Self::return_type(),
|
||||
Volatility::Immutable,
|
||||
Arc::new(move |input: &_| Self::new(nearest).calc(input)) as _,
|
||||
)
|
||||
}
|
||||
|
||||
fn calc(&self, input: &[ColumnarValue]) -> Result<ColumnarValue, DataFusionError> {
|
||||
assert_eq!(input.len(), 1);
|
||||
|
||||
let value_array = extract_array(&input[0])?;
|
||||
|
||||
if self.nearest == 0.0 {
|
||||
let values = value_array.as_primitive::<Float64Type>();
|
||||
let result = compute::unary::<_, _, Float64Type>(values, |a| a.round());
|
||||
Ok(ColumnarValue::Array(Arc::new(result) as _))
|
||||
} else {
|
||||
let values = value_array.as_primitive::<Float64Type>();
|
||||
let nearest = self.nearest;
|
||||
let result =
|
||||
compute::unary::<_, _, Float64Type>(values, |a| ((a / nearest).round() * nearest));
|
||||
Ok(ColumnarValue::Array(Arc::new(result) as _))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use datatypes::arrow::array::Float64Array;
|
||||
|
||||
use super::*;
|
||||
|
||||
fn test_round_f64(value: Vec<f64>, nearest: f64, expected: Vec<f64>) {
|
||||
let round_udf = Round::scalar_udf(nearest);
|
||||
let input = vec![ColumnarValue::Array(Arc::new(Float64Array::from(value)))];
|
||||
let result = round_udf.invoke_batch(&input, 1).unwrap();
|
||||
let result_array = extract_array(&result).unwrap();
|
||||
assert_eq!(result_array.len(), 1);
|
||||
assert_eq!(
|
||||
result_array.as_primitive::<Float64Type>().values(),
|
||||
&expected
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_round() {
|
||||
test_round_f64(vec![123.456], 0.001, vec![123.456]);
|
||||
test_round_f64(vec![123.456], 0.01, vec![123.46000000000001]);
|
||||
test_round_f64(vec![123.456], 0.1, vec![123.5]);
|
||||
test_round_f64(vec![123.456], 0.0, vec![123.0]);
|
||||
test_round_f64(vec![123.456], 1.0, vec![123.0]);
|
||||
test_round_f64(vec![123.456], 10.0, vec![120.0]);
|
||||
test_round_f64(vec![123.456], 100.0, vec![100.0]);
|
||||
test_round_f64(vec![123.456], 105.0, vec![105.0]);
|
||||
test_round_f64(vec![123.456], 1000.0, vec![0.0]);
|
||||
}
|
||||
}
|
||||
@@ -18,7 +18,9 @@ use std::sync::Arc;
|
||||
|
||||
use arrow_schema::DataType;
|
||||
use catalog::table_source::DfTableSourceProvider;
|
||||
use common_function::aggr::{UddSketchState, UDDSKETCH_STATE_NAME};
|
||||
use common_function::aggr::{
|
||||
HllState, UddSketchState, HLL_MERGE_NAME, HLL_NAME, UDDSKETCH_STATE_NAME,
|
||||
};
|
||||
use common_function::scalars::udf::create_udf;
|
||||
use common_query::logical_plan::create_aggregate_function;
|
||||
use datafusion::common::TableReference;
|
||||
@@ -169,6 +171,12 @@ impl ContextProvider for DfContextProviderAdapter {
|
||||
if name == UDDSKETCH_STATE_NAME {
|
||||
return Some(Arc::new(UddSketchState::udf_impl()));
|
||||
}
|
||||
if name == HLL_NAME {
|
||||
return Some(Arc::new(HllState::state_udf_impl()));
|
||||
}
|
||||
if name == HLL_MERGE_NAME {
|
||||
return Some(Arc::new(HllState::merge_udf_impl()));
|
||||
}
|
||||
|
||||
self.engine_state.aggregate_function(name).map_or_else(
|
||||
|| self.session_state.aggregate_functions().get(name).cloned(),
|
||||
|
||||
@@ -52,7 +52,7 @@ use promql::extension_plan::{
|
||||
use promql::functions::{
|
||||
AbsentOverTime, AvgOverTime, Changes, CountOverTime, Delta, Deriv, HoltWinters, IDelta,
|
||||
Increase, LastOverTime, MaxOverTime, MinOverTime, PredictLinear, PresentOverTime,
|
||||
QuantileOverTime, Rate, Resets, StddevOverTime, StdvarOverTime, SumOverTime,
|
||||
QuantileOverTime, Rate, Resets, Round, StddevOverTime, StdvarOverTime, SumOverTime,
|
||||
};
|
||||
use promql_parser::label::{MatchOp, Matcher, Matchers, METRIC_NAME};
|
||||
use promql_parser::parser::token::TokenType;
|
||||
@@ -200,10 +200,9 @@ impl PromPlanner {
|
||||
PromExpr::Paren(ParenExpr { expr }) => {
|
||||
self.prom_expr_to_plan(expr, session_state).await?
|
||||
}
|
||||
PromExpr::Subquery(SubqueryExpr { .. }) => UnsupportedExprSnafu {
|
||||
name: "Prom Subquery",
|
||||
PromExpr::Subquery(expr) => {
|
||||
self.prom_subquery_expr_to_plan(session_state, expr).await?
|
||||
}
|
||||
.fail()?,
|
||||
PromExpr::NumberLiteral(lit) => self.prom_number_lit_to_plan(lit)?,
|
||||
PromExpr::StringLiteral(lit) => self.prom_string_lit_to_plan(lit)?,
|
||||
PromExpr::VectorSelector(selector) => {
|
||||
@@ -218,6 +217,48 @@ impl PromPlanner {
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
async fn prom_subquery_expr_to_plan(
|
||||
&mut self,
|
||||
session_state: &SessionState,
|
||||
subquery_expr: &SubqueryExpr,
|
||||
) -> Result<LogicalPlan> {
|
||||
let SubqueryExpr {
|
||||
expr, range, step, ..
|
||||
} = subquery_expr;
|
||||
|
||||
let current_interval = self.ctx.interval;
|
||||
if let Some(step) = step {
|
||||
self.ctx.interval = step.as_millis() as _;
|
||||
}
|
||||
let current_start = self.ctx.start;
|
||||
self.ctx.start -= range.as_millis() as i64 - self.ctx.interval;
|
||||
let input = self.prom_expr_to_plan(expr, session_state).await?;
|
||||
self.ctx.interval = current_interval;
|
||||
self.ctx.start = current_start;
|
||||
|
||||
ensure!(!range.is_zero(), ZeroRangeSelectorSnafu);
|
||||
let range_ms = range.as_millis() as _;
|
||||
self.ctx.range = Some(range_ms);
|
||||
|
||||
let manipulate = RangeManipulate::new(
|
||||
self.ctx.start,
|
||||
self.ctx.end,
|
||||
self.ctx.interval,
|
||||
range_ms,
|
||||
self.ctx
|
||||
.time_index_column
|
||||
.clone()
|
||||
.expect("time index should be set in `setup_context`"),
|
||||
self.ctx.field_columns.clone(),
|
||||
input,
|
||||
)
|
||||
.context(DataFusionPlanningSnafu)?;
|
||||
|
||||
Ok(LogicalPlan::Extension(Extension {
|
||||
node: Arc::new(manipulate),
|
||||
}))
|
||||
}
|
||||
|
||||
async fn prom_aggr_expr_to_plan(
|
||||
&mut self,
|
||||
session_state: &SessionState,
|
||||
@@ -441,6 +482,7 @@ impl PromPlanner {
|
||||
// if left plan or right plan tag is empty, means case like `scalar(...) + host` or `host + scalar(...)`
|
||||
// under this case we only join on time index
|
||||
left_context.tag_columns.is_empty() || right_context.tag_columns.is_empty(),
|
||||
modifier,
|
||||
)?;
|
||||
let join_plan_schema = join_plan.schema().clone();
|
||||
|
||||
@@ -1468,6 +1510,20 @@ impl PromPlanner {
|
||||
|
||||
ScalarFunc::GeneratedExpr
|
||||
}
|
||||
"round" => {
|
||||
let nearest = match other_input_exprs.pop_front() {
|
||||
Some(DfExpr::Literal(ScalarValue::Float64(Some(t)))) => t,
|
||||
Some(DfExpr::Literal(ScalarValue::Int64(Some(t)))) => t as f64,
|
||||
None => 0.0,
|
||||
other => UnexpectedPlanExprSnafu {
|
||||
desc: format!("expected f64 literal as t, but found {:?}", other),
|
||||
}
|
||||
.fail()?,
|
||||
};
|
||||
|
||||
ScalarFunc::DataFusionUdf(Arc::new(Round::scalar_udf(nearest)))
|
||||
}
|
||||
|
||||
_ => {
|
||||
if let Some(f) = session_state.scalar_functions().get(func.name) {
|
||||
ScalarFunc::DataFusionBuiltin(f.clone())
|
||||
@@ -1674,7 +1730,7 @@ impl PromPlanner {
|
||||
ensure!(
|
||||
!src_labels.is_empty(),
|
||||
FunctionInvalidArgumentSnafu {
|
||||
fn_name: "label_join",
|
||||
fn_name: "label_join"
|
||||
}
|
||||
);
|
||||
|
||||
@@ -2121,24 +2177,49 @@ impl PromPlanner {
|
||||
left_time_index_column: Option<String>,
|
||||
right_time_index_column: Option<String>,
|
||||
only_join_time_index: bool,
|
||||
modifier: &Option<BinModifier>,
|
||||
) -> Result<LogicalPlan> {
|
||||
let mut left_tag_columns = if only_join_time_index {
|
||||
vec![]
|
||||
BTreeSet::new()
|
||||
} else {
|
||||
self.ctx
|
||||
.tag_columns
|
||||
.iter()
|
||||
.map(Column::from_name)
|
||||
.collect::<Vec<_>>()
|
||||
.cloned()
|
||||
.collect::<BTreeSet<_>>()
|
||||
};
|
||||
let mut right_tag_columns = left_tag_columns.clone();
|
||||
|
||||
// apply modifier
|
||||
if let Some(modifier) = modifier {
|
||||
// apply label modifier
|
||||
if let Some(matching) = &modifier.matching {
|
||||
match matching {
|
||||
// keeps columns mentioned in `on`
|
||||
LabelModifier::Include(on) => {
|
||||
let mask = on.labels.iter().cloned().collect::<BTreeSet<_>>();
|
||||
left_tag_columns = left_tag_columns.intersection(&mask).cloned().collect();
|
||||
right_tag_columns =
|
||||
right_tag_columns.intersection(&mask).cloned().collect();
|
||||
}
|
||||
// removes columns memtioned in `ignoring`
|
||||
LabelModifier::Exclude(ignoring) => {
|
||||
// doesn't check existence of label
|
||||
for label in &ignoring.labels {
|
||||
let _ = left_tag_columns.remove(label);
|
||||
let _ = right_tag_columns.remove(label);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// push time index column if it exists
|
||||
if let (Some(left_time_index_column), Some(right_time_index_column)) =
|
||||
(left_time_index_column, right_time_index_column)
|
||||
{
|
||||
left_tag_columns.push(Column::from_name(left_time_index_column));
|
||||
right_tag_columns.push(Column::from_name(right_time_index_column));
|
||||
left_tag_columns.insert(left_time_index_column);
|
||||
right_tag_columns.insert(right_time_index_column);
|
||||
}
|
||||
|
||||
let right = LogicalPlanBuilder::from(right)
|
||||
@@ -2154,7 +2235,16 @@ impl PromPlanner {
|
||||
.join(
|
||||
right,
|
||||
JoinType::Inner,
|
||||
(left_tag_columns, right_tag_columns),
|
||||
(
|
||||
left_tag_columns
|
||||
.into_iter()
|
||||
.map(Column::from_name)
|
||||
.collect::<Vec<_>>(),
|
||||
right_tag_columns
|
||||
.into_iter()
|
||||
.map(Column::from_name)
|
||||
.collect::<Vec<_>>(),
|
||||
),
|
||||
None,
|
||||
)
|
||||
.context(DataFusionPlanningSnafu)?
|
||||
@@ -3340,6 +3430,59 @@ mod test {
|
||||
indie_query_plan_compare(query, expected).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_hash_join() {
|
||||
let mut eval_stmt = EvalStmt {
|
||||
expr: PromExpr::NumberLiteral(NumberLiteral { val: 1.0 }),
|
||||
start: UNIX_EPOCH,
|
||||
end: UNIX_EPOCH
|
||||
.checked_add(Duration::from_secs(100_000))
|
||||
.unwrap(),
|
||||
interval: Duration::from_secs(5),
|
||||
lookback_delta: Duration::from_secs(1),
|
||||
};
|
||||
|
||||
let case = r#"http_server_requests_seconds_sum{uri="/accounts/login"} / ignoring(kubernetes_pod_name,kubernetes_namespace) http_server_requests_seconds_count{uri="/accounts/login"}"#;
|
||||
|
||||
let prom_expr = parser::parse(case).unwrap();
|
||||
eval_stmt.expr = prom_expr;
|
||||
let table_provider = build_test_table_provider_with_fields(
|
||||
&[
|
||||
(
|
||||
DEFAULT_SCHEMA_NAME.to_string(),
|
||||
"http_server_requests_seconds_sum".to_string(),
|
||||
),
|
||||
(
|
||||
DEFAULT_SCHEMA_NAME.to_string(),
|
||||
"http_server_requests_seconds_count".to_string(),
|
||||
),
|
||||
],
|
||||
&["uri", "kubernetes_namespace", "kubernetes_pod_name"],
|
||||
)
|
||||
.await;
|
||||
// Should be ok
|
||||
let plan = PromPlanner::stmt_to_plan(table_provider, &eval_stmt, &build_session_state())
|
||||
.await
|
||||
.unwrap();
|
||||
let expected = r#"Projection: http_server_requests_seconds_count.uri, http_server_requests_seconds_count.kubernetes_namespace, http_server_requests_seconds_count.kubernetes_pod_name, http_server_requests_seconds_count.greptime_timestamp, http_server_requests_seconds_sum.greptime_value / http_server_requests_seconds_count.greptime_value AS http_server_requests_seconds_sum.greptime_value / http_server_requests_seconds_count.greptime_value
|
||||
Inner Join: http_server_requests_seconds_sum.greptime_timestamp = http_server_requests_seconds_count.greptime_timestamp, http_server_requests_seconds_sum.uri = http_server_requests_seconds_count.uri
|
||||
SubqueryAlias: http_server_requests_seconds_sum
|
||||
PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[greptime_timestamp]
|
||||
PromSeriesNormalize: offset=[0], time index=[greptime_timestamp], filter NaN: [false]
|
||||
PromSeriesDivide: tags=["uri", "kubernetes_namespace", "kubernetes_pod_name"]
|
||||
Sort: http_server_requests_seconds_sum.uri DESC NULLS LAST, http_server_requests_seconds_sum.kubernetes_namespace DESC NULLS LAST, http_server_requests_seconds_sum.kubernetes_pod_name DESC NULLS LAST, http_server_requests_seconds_sum.greptime_timestamp DESC NULLS LAST
|
||||
Filter: http_server_requests_seconds_sum.uri = Utf8("/accounts/login") AND http_server_requests_seconds_sum.greptime_timestamp >= TimestampMillisecond(-1000, None) AND http_server_requests_seconds_sum.greptime_timestamp <= TimestampMillisecond(100001000, None)
|
||||
TableScan: http_server_requests_seconds_sum
|
||||
SubqueryAlias: http_server_requests_seconds_count
|
||||
PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[greptime_timestamp]
|
||||
PromSeriesNormalize: offset=[0], time index=[greptime_timestamp], filter NaN: [false]
|
||||
PromSeriesDivide: tags=["uri", "kubernetes_namespace", "kubernetes_pod_name"]
|
||||
Sort: http_server_requests_seconds_count.uri DESC NULLS LAST, http_server_requests_seconds_count.kubernetes_namespace DESC NULLS LAST, http_server_requests_seconds_count.kubernetes_pod_name DESC NULLS LAST, http_server_requests_seconds_count.greptime_timestamp DESC NULLS LAST
|
||||
Filter: http_server_requests_seconds_count.uri = Utf8("/accounts/login") AND http_server_requests_seconds_count.greptime_timestamp >= TimestampMillisecond(-1000, None) AND http_server_requests_seconds_count.greptime_timestamp <= TimestampMillisecond(100001000, None)
|
||||
TableScan: http_server_requests_seconds_count"#;
|
||||
assert_eq!(plan.to_string(), expected);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_nested_histogram_quantile() {
|
||||
let mut eval_stmt = EvalStmt {
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_function::aggr::UddSketchState;
|
||||
use common_function::aggr::{HllState, UddSketchState};
|
||||
use common_function::function_registry::FUNCTION_REGISTRY;
|
||||
use common_function::scalars::udf::create_udf;
|
||||
use common_query::error::RegisterUdfSnafu;
|
||||
@@ -127,6 +127,8 @@ impl SubstraitPlanDecoder for DefaultPlanDecoder {
|
||||
.register_udf(udf)
|
||||
.context(RegisterUdfSnafu { name: func.name() })?;
|
||||
let _ = session_state.register_udaf(Arc::new(UddSketchState::udf_impl()));
|
||||
let _ = session_state.register_udaf(Arc::new(HllState::state_udf_impl()));
|
||||
let _ = session_state.register_udaf(Arc::new(HllState::merge_udf_impl()));
|
||||
}
|
||||
let logical_plan = DFLogicalSubstraitConvertor
|
||||
.decode(message, session_state)
|
||||
|
||||
@@ -327,7 +327,7 @@ CREATE TABLE IF NOT EXISTS "system_metrics" (
|
||||
"host" STRING NULL INVERTED INDEX,
|
||||
"cpu" DOUBLE NULL,
|
||||
"disk" FLOAT NULL,
|
||||
"msg" STRING NULL FULLTEXT WITH(analyzer = 'English', case_sensitive = 'false'),
|
||||
"msg" STRING NULL FULLTEXT INDEX WITH(analyzer = 'English', case_sensitive = 'false'),
|
||||
"ts" TIMESTAMP(3) NOT NULL DEFAULT current_timestamp(),
|
||||
TIME INDEX ("ts"),
|
||||
PRIMARY KEY ("id", "host")
|
||||
|
||||
@@ -65,6 +65,7 @@ http-body = "1"
|
||||
humantime.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
hyper = { workspace = true, features = ["full"] }
|
||||
indexmap = "2.7"
|
||||
influxdb_line_protocol = { git = "https://github.com/evenyag/influxdb_iox", branch = "feat/line-protocol" }
|
||||
itertools.workspace = true
|
||||
jsonb.workspace = true
|
||||
|
||||
@@ -1 +1 @@
|
||||
v0.7.10
|
||||
v0.7.11
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
//! prom supply the prometheus HTTP API Server compliance
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use axum::http::HeaderValue;
|
||||
use axum::response::{IntoResponse, Response};
|
||||
@@ -25,6 +25,7 @@ use common_recordbatch::RecordBatches;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::vectors::{Float64Vector, StringVector, TimestampMillisecondVector};
|
||||
use indexmap::IndexMap;
|
||||
use promql_parser::label::METRIC_NAME;
|
||||
use promql_parser::parser::value::ValueType;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -229,7 +230,9 @@ impl PrometheusJsonResponse {
|
||||
})?;
|
||||
|
||||
let metric_name = (METRIC_NAME, metric_name.as_str());
|
||||
let mut buffer = BTreeMap::<Vec<(&str, &str)>, Vec<(f64, String)>>::new();
|
||||
// Preserves the order of output tags.
|
||||
// Tag order matters, e.g., after sorc and sort_desc, the output order must be kept.
|
||||
let mut buffer = IndexMap::<Vec<(&str, &str)>, Vec<(f64, String)>>::new();
|
||||
|
||||
let schema = batches.schema();
|
||||
for batch in batches.iter() {
|
||||
|
||||
@@ -156,9 +156,9 @@ impl Display for Column {
|
||||
if let Some(fulltext_options) = &self.extensions.fulltext_index_options {
|
||||
if !fulltext_options.is_empty() {
|
||||
let options = fulltext_options.kv_pairs();
|
||||
write!(f, " FULLTEXT WITH({})", format_list_comma!(options))?;
|
||||
write!(f, " FULLTEXT INDEX WITH({})", format_list_comma!(options))?;
|
||||
} else {
|
||||
write!(f, " FULLTEXT")?;
|
||||
write!(f, " FULLTEXT INDEX")?;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -222,7 +222,12 @@ fn make_region_creates(creates: CreateRequests) -> Result<Vec<(RegionId, RegionR
|
||||
|
||||
fn parse_region_drop(drop: DropRequest) -> Result<(RegionId, RegionDropRequest)> {
|
||||
let region_id = drop.region_id.into();
|
||||
Ok((region_id, RegionDropRequest {}))
|
||||
Ok((
|
||||
region_id,
|
||||
RegionDropRequest {
|
||||
fast_path: drop.fast_path,
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
fn make_region_drop(drop: DropRequest) -> Result<Vec<(RegionId, RegionRequest)>> {
|
||||
@@ -397,8 +402,10 @@ impl RegionCreateRequest {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct RegionDropRequest {}
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RegionDropRequest {
|
||||
pub fast_path: bool,
|
||||
}
|
||||
|
||||
/// Open region request.
|
||||
#[derive(Debug, Clone)]
|
||||
|
||||
@@ -40,7 +40,7 @@ use common_procedure::options::ProcedureConfig;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use common_wal::config::{DatanodeWalConfig, MetasrvWalConfig};
|
||||
use datanode::datanode::DatanodeBuilder;
|
||||
use flow::FlownodeBuilder;
|
||||
use flow::{FlownodeBuilder, FrontendClient};
|
||||
use frontend::instance::builder::FrontendBuilder;
|
||||
use frontend::instance::{FrontendInstance, Instance, StandaloneDatanodeManager};
|
||||
use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ};
|
||||
@@ -164,12 +164,15 @@ impl GreptimeDbStandaloneBuilder {
|
||||
Some(procedure_manager.clone()),
|
||||
);
|
||||
|
||||
let fe_server_addr = opts.frontend_options().grpc.bind_addr.clone();
|
||||
let frontend_client = FrontendClient::from_static_grpc_addr(fe_server_addr);
|
||||
let flow_builder = FlownodeBuilder::new(
|
||||
Default::default(),
|
||||
plugins.clone(),
|
||||
table_metadata_manager.clone(),
|
||||
catalog_manager.clone(),
|
||||
flow_metadata_manager.clone(),
|
||||
Arc::new(frontend_client),
|
||||
);
|
||||
let flownode = Arc::new(flow_builder.build().await.unwrap());
|
||||
|
||||
|
||||
84
tests/cases/standalone/common/aggregate/hll.result
Normal file
84
tests/cases/standalone/common/aggregate/hll.result
Normal file
@@ -0,0 +1,84 @@
|
||||
CREATE TABLE test_hll (
|
||||
`id` INT PRIMARY KEY,
|
||||
`value` STRING,
|
||||
`ts` timestamp time index default now()
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO test_hll (`id`, `value`) VALUES
|
||||
(1, "a"),
|
||||
(2, "b"),
|
||||
(5, "e"),
|
||||
(6, "f"),
|
||||
(7, "g"),
|
||||
(8, "h"),
|
||||
(9, "i"),
|
||||
(10, "j"),
|
||||
(11, "i"),
|
||||
(12, "j"),
|
||||
(13, "i"),
|
||||
(14, "n"),
|
||||
(15, "o");
|
||||
|
||||
Affected Rows: 13
|
||||
|
||||
select hll_count(hll(`value`)) from test_hll;
|
||||
|
||||
+--------------------------------+
|
||||
| hll_count(hll(test_hll.value)) |
|
||||
+--------------------------------+
|
||||
| 10 |
|
||||
+--------------------------------+
|
||||
|
||||
INSERT INTO test_hll (`id`, `value`) VALUES
|
||||
(16, "b"),
|
||||
(17, "i"),
|
||||
(18, "j"),
|
||||
(19, "s"),
|
||||
(20, "t");
|
||||
|
||||
Affected Rows: 5
|
||||
|
||||
select hll_count(hll(`value`)) from test_hll;
|
||||
|
||||
+--------------------------------+
|
||||
| hll_count(hll(test_hll.value)) |
|
||||
+--------------------------------+
|
||||
| 12 |
|
||||
+--------------------------------+
|
||||
|
||||
create table test_hll_merge (
|
||||
`id` INT PRIMARY KEY,
|
||||
`state` BINARY,
|
||||
`ts` timestamp time index default now()
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
insert into test_hll_merge (`id`, `state`)
|
||||
select 1, hll(`value`) from test_hll;
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
insert into test_hll_merge (`id`, `state`)
|
||||
select 2, hll(`value`) from test_hll;
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
select hll_count(hll_merge(`state`)) from test_hll_merge;
|
||||
|
||||
+--------------------------------------------+
|
||||
| hll_count(hll_merge(test_hll_merge.state)) |
|
||||
+--------------------------------------------+
|
||||
| 12 |
|
||||
+--------------------------------------------+
|
||||
|
||||
drop table test_hll;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
drop table test_hll_merge;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
49
tests/cases/standalone/common/aggregate/hll.sql
Normal file
49
tests/cases/standalone/common/aggregate/hll.sql
Normal file
@@ -0,0 +1,49 @@
|
||||
CREATE TABLE test_hll (
|
||||
`id` INT PRIMARY KEY,
|
||||
`value` STRING,
|
||||
`ts` timestamp time index default now()
|
||||
);
|
||||
|
||||
INSERT INTO test_hll (`id`, `value`) VALUES
|
||||
(1, "a"),
|
||||
(2, "b"),
|
||||
(5, "e"),
|
||||
(6, "f"),
|
||||
(7, "g"),
|
||||
(8, "h"),
|
||||
(9, "i"),
|
||||
(10, "j"),
|
||||
(11, "i"),
|
||||
(12, "j"),
|
||||
(13, "i"),
|
||||
(14, "n"),
|
||||
(15, "o");
|
||||
|
||||
select hll_count(hll(`value`)) from test_hll;
|
||||
|
||||
INSERT INTO test_hll (`id`, `value`) VALUES
|
||||
(16, "b"),
|
||||
(17, "i"),
|
||||
(18, "j"),
|
||||
(19, "s"),
|
||||
(20, "t");
|
||||
|
||||
select hll_count(hll(`value`)) from test_hll;
|
||||
|
||||
create table test_hll_merge (
|
||||
`id` INT PRIMARY KEY,
|
||||
`state` BINARY,
|
||||
`ts` timestamp time index default now()
|
||||
);
|
||||
|
||||
insert into test_hll_merge (`id`, `state`)
|
||||
select 1, hll(`value`) from test_hll;
|
||||
|
||||
insert into test_hll_merge (`id`, `state`)
|
||||
select 2, hll(`value`) from test_hll;
|
||||
|
||||
select hll_count(hll_merge(`state`)) from test_hll_merge;
|
||||
|
||||
drop table test_hll;
|
||||
|
||||
drop table test_hll_merge;
|
||||
@@ -79,20 +79,20 @@ SELECT * FROM test WHERE MATCHES(message, 'hello') ORDER BY message;
|
||||
-- SQLNESS ARG restart=true
|
||||
SHOW CREATE TABLE test;
|
||||
|
||||
+-------+---------------------------------------------------------------------------------------+
|
||||
| Table | Create Table |
|
||||
+-------+---------------------------------------------------------------------------------------+
|
||||
| test | CREATE TABLE IF NOT EXISTS "test" ( |
|
||||
| | "message" STRING NULL FULLTEXT WITH(analyzer = 'Chinese', case_sensitive = 'true'), |
|
||||
| | "time" TIMESTAMP(3) NOT NULL, |
|
||||
| | TIME INDEX ("time") |
|
||||
| | ) |
|
||||
| | |
|
||||
| | ENGINE=mito |
|
||||
| | WITH( |
|
||||
| | append_mode = 'true' |
|
||||
| | ) |
|
||||
+-------+---------------------------------------------------------------------------------------+
|
||||
+-------+---------------------------------------------------------------------------------------------+
|
||||
| Table | Create Table |
|
||||
+-------+---------------------------------------------------------------------------------------------+
|
||||
| test | CREATE TABLE IF NOT EXISTS "test" ( |
|
||||
| | "message" STRING NULL FULLTEXT INDEX WITH(analyzer = 'Chinese', case_sensitive = 'true'), |
|
||||
| | "time" TIMESTAMP(3) NOT NULL, |
|
||||
| | TIME INDEX ("time") |
|
||||
| | ) |
|
||||
| | |
|
||||
| | ENGINE=mito |
|
||||
| | WITH( |
|
||||
| | append_mode = 'true' |
|
||||
| | ) |
|
||||
+-------+---------------------------------------------------------------------------------------------+
|
||||
|
||||
SHOW INDEX FROM test;
|
||||
|
||||
@@ -138,20 +138,20 @@ Affected Rows: 0
|
||||
|
||||
SHOW CREATE TABLE test;
|
||||
|
||||
+-------+---------------------------------------------------------------------------------------+
|
||||
| Table | Create Table |
|
||||
+-------+---------------------------------------------------------------------------------------+
|
||||
| test | CREATE TABLE IF NOT EXISTS "test" ( |
|
||||
| | "message" STRING NULL FULLTEXT WITH(analyzer = 'Chinese', case_sensitive = 'true'), |
|
||||
| | "time" TIMESTAMP(3) NOT NULL, |
|
||||
| | TIME INDEX ("time") |
|
||||
| | ) |
|
||||
| | |
|
||||
| | ENGINE=mito |
|
||||
| | WITH( |
|
||||
| | append_mode = 'true' |
|
||||
| | ) |
|
||||
+-------+---------------------------------------------------------------------------------------+
|
||||
+-------+---------------------------------------------------------------------------------------------+
|
||||
| Table | Create Table |
|
||||
+-------+---------------------------------------------------------------------------------------------+
|
||||
| test | CREATE TABLE IF NOT EXISTS "test" ( |
|
||||
| | "message" STRING NULL FULLTEXT INDEX WITH(analyzer = 'Chinese', case_sensitive = 'true'), |
|
||||
| | "time" TIMESTAMP(3) NOT NULL, |
|
||||
| | TIME INDEX ("time") |
|
||||
| | ) |
|
||||
| | |
|
||||
| | ENGINE=mito |
|
||||
| | WITH( |
|
||||
| | append_mode = 'true' |
|
||||
| | ) |
|
||||
+-------+---------------------------------------------------------------------------------------------+
|
||||
|
||||
SHOW INDEX FROM test;
|
||||
|
||||
|
||||
@@ -7,18 +7,18 @@ Affected Rows: 0
|
||||
|
||||
SHOW CREATE TABLE log;
|
||||
|
||||
+-------+------------------------------------------------------------------------------------+
|
||||
| Table | Create Table |
|
||||
+-------+------------------------------------------------------------------------------------+
|
||||
| log | CREATE TABLE IF NOT EXISTS "log" ( |
|
||||
| | "ts" TIMESTAMP(3) NOT NULL, |
|
||||
| | "msg" STRING NULL FULLTEXT WITH(analyzer = 'English', case_sensitive = 'false'), |
|
||||
| | TIME INDEX ("ts") |
|
||||
| | ) |
|
||||
| | |
|
||||
| | ENGINE=mito |
|
||||
| | |
|
||||
+-------+------------------------------------------------------------------------------------+
|
||||
+-------+------------------------------------------------------------------------------------------+
|
||||
| Table | Create Table |
|
||||
+-------+------------------------------------------------------------------------------------------+
|
||||
| log | CREATE TABLE IF NOT EXISTS "log" ( |
|
||||
| | "ts" TIMESTAMP(3) NOT NULL, |
|
||||
| | "msg" STRING NULL FULLTEXT INDEX WITH(analyzer = 'English', case_sensitive = 'false'), |
|
||||
| | TIME INDEX ("ts") |
|
||||
| | ) |
|
||||
| | |
|
||||
| | ENGINE=mito |
|
||||
| | |
|
||||
+-------+------------------------------------------------------------------------------------------+
|
||||
|
||||
DROP TABLE log;
|
||||
|
||||
@@ -33,18 +33,18 @@ Affected Rows: 0
|
||||
|
||||
SHOW CREATE TABLE log_with_opts;
|
||||
|
||||
+---------------+-----------------------------------------------------------------------------------+
|
||||
| Table | Create Table |
|
||||
+---------------+-----------------------------------------------------------------------------------+
|
||||
| log_with_opts | CREATE TABLE IF NOT EXISTS "log_with_opts" ( |
|
||||
| | "ts" TIMESTAMP(3) NOT NULL, |
|
||||
| | "msg" STRING NULL FULLTEXT WITH(analyzer = 'English', case_sensitive = 'true'), |
|
||||
| | TIME INDEX ("ts") |
|
||||
| | ) |
|
||||
| | |
|
||||
| | ENGINE=mito |
|
||||
| | |
|
||||
+---------------+-----------------------------------------------------------------------------------+
|
||||
+---------------+-----------------------------------------------------------------------------------------+
|
||||
| Table | Create Table |
|
||||
+---------------+-----------------------------------------------------------------------------------------+
|
||||
| log_with_opts | CREATE TABLE IF NOT EXISTS "log_with_opts" ( |
|
||||
| | "ts" TIMESTAMP(3) NOT NULL, |
|
||||
| | "msg" STRING NULL FULLTEXT INDEX WITH(analyzer = 'English', case_sensitive = 'true'), |
|
||||
| | TIME INDEX ("ts") |
|
||||
| | ) |
|
||||
| | |
|
||||
| | ENGINE=mito |
|
||||
| | |
|
||||
+---------------+-----------------------------------------------------------------------------------------+
|
||||
|
||||
DROP TABLE log_with_opts;
|
||||
|
||||
@@ -60,19 +60,19 @@ Affected Rows: 0
|
||||
|
||||
SHOW CREATE TABLE log_multi_fulltext_cols;
|
||||
|
||||
+-------------------------+-------------------------------------------------------------------------------------+
|
||||
| Table | Create Table |
|
||||
+-------------------------+-------------------------------------------------------------------------------------+
|
||||
| log_multi_fulltext_cols | CREATE TABLE IF NOT EXISTS "log_multi_fulltext_cols" ( |
|
||||
| | "ts" TIMESTAMP(3) NOT NULL, |
|
||||
| | "msg" STRING NULL FULLTEXT WITH(analyzer = 'English', case_sensitive = 'false'), |
|
||||
| | "msg2" STRING NULL FULLTEXT WITH(analyzer = 'English', case_sensitive = 'false'), |
|
||||
| | TIME INDEX ("ts") |
|
||||
| | ) |
|
||||
| | |
|
||||
| | ENGINE=mito |
|
||||
| | |
|
||||
+-------------------------+-------------------------------------------------------------------------------------+
|
||||
+-------------------------+-------------------------------------------------------------------------------------------+
|
||||
| Table | Create Table |
|
||||
+-------------------------+-------------------------------------------------------------------------------------------+
|
||||
| log_multi_fulltext_cols | CREATE TABLE IF NOT EXISTS "log_multi_fulltext_cols" ( |
|
||||
| | "ts" TIMESTAMP(3) NOT NULL, |
|
||||
| | "msg" STRING NULL FULLTEXT INDEX WITH(analyzer = 'English', case_sensitive = 'false'), |
|
||||
| | "msg2" STRING NULL FULLTEXT INDEX WITH(analyzer = 'English', case_sensitive = 'false'), |
|
||||
| | TIME INDEX ("ts") |
|
||||
| | ) |
|
||||
| | |
|
||||
| | ENGINE=mito |
|
||||
| | |
|
||||
+-------------------------+-------------------------------------------------------------------------------------------+
|
||||
|
||||
DROP TABLE log_multi_fulltext_cols;
|
||||
|
||||
|
||||
81
tests/cases/standalone/common/promql/round_fn.result
Normal file
81
tests/cases/standalone/common/promql/round_fn.result
Normal file
@@ -0,0 +1,81 @@
|
||||
create table cache_hit (
|
||||
ts timestamp time index,
|
||||
job string,
|
||||
greptime_value double,
|
||||
primary key (job)
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
insert into cache_hit values
|
||||
(3000, "read", 123.45),
|
||||
(3000, "write", 234.567),
|
||||
(4000, "read", 345.678),
|
||||
(4000, "write", 456.789);
|
||||
|
||||
Affected Rows: 4
|
||||
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
tql eval (3, 4, '1s') round(cache_hit, 0.01);
|
||||
|
||||
+---------------------+----------------------------+-------+
|
||||
| ts | prom_round(greptime_value) | job |
|
||||
+---------------------+----------------------------+-------+
|
||||
| 1970-01-01T00:00:03 | 123.45 | read |
|
||||
| 1970-01-01T00:00:03 | 234.57 | write |
|
||||
| 1970-01-01T00:00:04 | 345.68 | read |
|
||||
| 1970-01-01T00:00:04 | 456.79 | write |
|
||||
+---------------------+----------------------------+-------+
|
||||
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
tql eval (3, 4, '1s') round(cache_hit, 0.1);
|
||||
|
||||
+---------------------+----------------------------+-------+
|
||||
| ts | prom_round(greptime_value) | job |
|
||||
+---------------------+----------------------------+-------+
|
||||
| 1970-01-01T00:00:03 | 123.5 | read |
|
||||
| 1970-01-01T00:00:03 | 234.60000000000002 | write |
|
||||
| 1970-01-01T00:00:04 | 345.70000000000005 | read |
|
||||
| 1970-01-01T00:00:04 | 456.8 | write |
|
||||
+---------------------+----------------------------+-------+
|
||||
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
tql eval (3, 4, '1s') round(cache_hit, 1.0);
|
||||
|
||||
+---------------------+----------------------------+-------+
|
||||
| ts | prom_round(greptime_value) | job |
|
||||
+---------------------+----------------------------+-------+
|
||||
| 1970-01-01T00:00:03 | 123.0 | read |
|
||||
| 1970-01-01T00:00:03 | 235.0 | write |
|
||||
| 1970-01-01T00:00:04 | 346.0 | read |
|
||||
| 1970-01-01T00:00:04 | 457.0 | write |
|
||||
+---------------------+----------------------------+-------+
|
||||
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
tql eval (3, 4, '1s') round(cache_hit);
|
||||
|
||||
+---------------------+----------------------------+-------+
|
||||
| ts | prom_round(greptime_value) | job |
|
||||
+---------------------+----------------------------+-------+
|
||||
| 1970-01-01T00:00:03 | 123.0 | read |
|
||||
| 1970-01-01T00:00:03 | 235.0 | write |
|
||||
| 1970-01-01T00:00:04 | 346.0 | read |
|
||||
| 1970-01-01T00:00:04 | 457.0 | write |
|
||||
+---------------------+----------------------------+-------+
|
||||
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
tql eval (3, 4, '1s') round(cache_hit, 10.0);
|
||||
|
||||
+---------------------+----------------------------+-------+
|
||||
| ts | prom_round(greptime_value) | job |
|
||||
+---------------------+----------------------------+-------+
|
||||
| 1970-01-01T00:00:03 | 120.0 | read |
|
||||
| 1970-01-01T00:00:03 | 230.0 | write |
|
||||
| 1970-01-01T00:00:04 | 350.0 | read |
|
||||
| 1970-01-01T00:00:04 | 460.0 | write |
|
||||
+---------------------+----------------------------+-------+
|
||||
|
||||
drop table cache_hit;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
30
tests/cases/standalone/common/promql/round_fn.sql
Normal file
30
tests/cases/standalone/common/promql/round_fn.sql
Normal file
@@ -0,0 +1,30 @@
|
||||
|
||||
create table cache_hit (
|
||||
ts timestamp time index,
|
||||
job string,
|
||||
greptime_value double,
|
||||
primary key (job)
|
||||
);
|
||||
|
||||
insert into cache_hit values
|
||||
(3000, "read", 123.45),
|
||||
(3000, "write", 234.567),
|
||||
(4000, "read", 345.678),
|
||||
(4000, "write", 456.789);
|
||||
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
tql eval (3, 4, '1s') round(cache_hit, 0.01);
|
||||
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
tql eval (3, 4, '1s') round(cache_hit, 0.1);
|
||||
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
tql eval (3, 4, '1s') round(cache_hit, 1.0);
|
||||
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
tql eval (3, 4, '1s') round(cache_hit);
|
||||
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
tql eval (3, 4, '1s') round(cache_hit, 10.0);
|
||||
|
||||
drop table cache_hit;
|
||||
@@ -638,3 +638,78 @@ drop table cache_miss;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
create table cache_hit_with_null_label (
|
||||
ts timestamp time index,
|
||||
job string,
|
||||
null_label string null,
|
||||
greptime_value double,
|
||||
primary key (job, null_label)
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
create table cache_miss_with_null_label (
|
||||
ts timestamp time index,
|
||||
job string,
|
||||
null_label string null,
|
||||
greptime_value double,
|
||||
primary key (job, null_label)
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
insert into cache_hit_with_null_label values
|
||||
(3000, "read", null, 1.0),
|
||||
(3000, "write", null, 2.0),
|
||||
(4000, "read", null, 3.0),
|
||||
(4000, "write", null, 4.0);
|
||||
|
||||
Affected Rows: 4
|
||||
|
||||
insert into cache_miss_with_null_label values
|
||||
(3000, "read", null, 1.0),
|
||||
(3000, "write", null, 2.0),
|
||||
(4000, "read", null, 1.0),
|
||||
(4000, "write", null, 2.0);
|
||||
|
||||
Affected Rows: 4
|
||||
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
-- null!=null, so it will returns the empty set.
|
||||
tql eval (3, 4, '1s') cache_hit_with_null_label / (cache_miss_with_null_label + cache_hit_with_null_label);
|
||||
|
||||
++
|
||||
++
|
||||
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
tql eval (3, 4, '1s') cache_hit_with_null_label / ignoring(null_label) (cache_miss_with_null_label + ignoring(null_label) cache_hit_with_null_label);
|
||||
|
||||
+-------+------------+---------------------+---------------------------------------------------------------------------------------------------------------+
|
||||
| job | null_label | ts | lhs.greptime_value / rhs.cache_miss_with_null_label.greptime_value + cache_hit_with_null_label.greptime_value |
|
||||
+-------+------------+---------------------+---------------------------------------------------------------------------------------------------------------+
|
||||
| read | | 1970-01-01T00:00:03 | 0.5 |
|
||||
| read | | 1970-01-01T00:00:04 | 0.75 |
|
||||
| write | | 1970-01-01T00:00:03 | 0.5 |
|
||||
| write | | 1970-01-01T00:00:04 | 0.6666666666666666 |
|
||||
+-------+------------+---------------------+---------------------------------------------------------------------------------------------------------------+
|
||||
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
tql eval (3, 4, '1s') cache_hit_with_null_label / on(job) (cache_miss_with_null_label + on(job) cache_hit_with_null_label);
|
||||
|
||||
+-------+------------+---------------------+---------------------------------------------------------------------------------------------------------------+
|
||||
| job | null_label | ts | lhs.greptime_value / rhs.cache_miss_with_null_label.greptime_value + cache_hit_with_null_label.greptime_value |
|
||||
+-------+------------+---------------------+---------------------------------------------------------------------------------------------------------------+
|
||||
| read | | 1970-01-01T00:00:03 | 0.5 |
|
||||
| read | | 1970-01-01T00:00:04 | 0.75 |
|
||||
| write | | 1970-01-01T00:00:03 | 0.5 |
|
||||
| write | | 1970-01-01T00:00:04 | 0.6666666666666666 |
|
||||
+-------+------------+---------------------+---------------------------------------------------------------------------------------------------------------+
|
||||
|
||||
drop table cache_hit_with_null_label;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
drop table cache_miss_with_null_label;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
|
||||
@@ -295,3 +295,45 @@ tql eval (3, 4, '1s') cache_hit / (cache_miss + cache_hit);
|
||||
drop table cache_hit;
|
||||
|
||||
drop table cache_miss;
|
||||
|
||||
create table cache_hit_with_null_label (
|
||||
ts timestamp time index,
|
||||
job string,
|
||||
null_label string null,
|
||||
greptime_value double,
|
||||
primary key (job, null_label)
|
||||
);
|
||||
|
||||
create table cache_miss_with_null_label (
|
||||
ts timestamp time index,
|
||||
job string,
|
||||
null_label string null,
|
||||
greptime_value double,
|
||||
primary key (job, null_label)
|
||||
);
|
||||
|
||||
insert into cache_hit_with_null_label values
|
||||
(3000, "read", null, 1.0),
|
||||
(3000, "write", null, 2.0),
|
||||
(4000, "read", null, 3.0),
|
||||
(4000, "write", null, 4.0);
|
||||
|
||||
insert into cache_miss_with_null_label values
|
||||
(3000, "read", null, 1.0),
|
||||
(3000, "write", null, 2.0),
|
||||
(4000, "read", null, 1.0),
|
||||
(4000, "write", null, 2.0);
|
||||
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
-- null!=null, so it will returns the empty set.
|
||||
tql eval (3, 4, '1s') cache_hit_with_null_label / (cache_miss_with_null_label + cache_hit_with_null_label);
|
||||
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
tql eval (3, 4, '1s') cache_hit_with_null_label / ignoring(null_label) (cache_miss_with_null_label + ignoring(null_label) cache_hit_with_null_label);
|
||||
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
tql eval (3, 4, '1s') cache_hit_with_null_label / on(job) (cache_miss_with_null_label + on(job) cache_hit_with_null_label);
|
||||
|
||||
drop table cache_hit_with_null_label;
|
||||
|
||||
drop table cache_miss_with_null_label;
|
||||
|
||||
@@ -295,3 +295,40 @@ drop table histogram3_bucket;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
-- test with invalid data (unaligned buckets)
|
||||
create table histogram4_bucket (
|
||||
ts timestamp time index,
|
||||
le string,
|
||||
s string,
|
||||
val double,
|
||||
primary key (s, le),
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
insert into histogram4_bucket values
|
||||
(2900000, "0.1", "a", 0),
|
||||
(2900000, "1", "a", 10),
|
||||
(2900000, "5", "a", 20),
|
||||
(2900000, "+Inf", "a", 150),
|
||||
(3000000, "0.1", "a", 50),
|
||||
(3000000, "1", "a", 70),
|
||||
(3000000, "5", "a", 120),
|
||||
-- INF here is missing
|
||||
;
|
||||
|
||||
Affected Rows: 7
|
||||
|
||||
tql eval (2900, 3000, '100s') histogram_quantile(0.9, histogram4_bucket);
|
||||
|
||||
+---------------------+---+-----+
|
||||
| ts | s | val |
|
||||
+---------------------+---+-----+
|
||||
| 1970-01-01T00:48:20 | a | 5.0 |
|
||||
| 1970-01-01T00:50:00 | a | 5.0 |
|
||||
+---------------------+---+-----+
|
||||
|
||||
drop table histogram4_bucket;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
|
||||
@@ -163,3 +163,27 @@ insert into histogram3_bucket values
|
||||
tql eval (3000, 3005, '3s') histogram_quantile(0.5, sum by(le, s) (rate(histogram3_bucket[5m])));
|
||||
|
||||
drop table histogram3_bucket;
|
||||
|
||||
-- test with invalid data (unaligned buckets)
|
||||
create table histogram4_bucket (
|
||||
ts timestamp time index,
|
||||
le string,
|
||||
s string,
|
||||
val double,
|
||||
primary key (s, le),
|
||||
);
|
||||
|
||||
insert into histogram4_bucket values
|
||||
(2900000, "0.1", "a", 0),
|
||||
(2900000, "1", "a", 10),
|
||||
(2900000, "5", "a", 20),
|
||||
(2900000, "+Inf", "a", 150),
|
||||
(3000000, "0.1", "a", 50),
|
||||
(3000000, "1", "a", 70),
|
||||
(3000000, "5", "a", 120),
|
||||
-- INF here is missing
|
||||
;
|
||||
|
||||
tql eval (2900, 3000, '100s') histogram_quantile(0.9, histogram4_bucket);
|
||||
|
||||
drop table histogram4_bucket;
|
||||
|
||||
65
tests/cases/standalone/common/promql/subquery.result
Normal file
65
tests/cases/standalone/common/promql/subquery.result
Normal file
@@ -0,0 +1,65 @@
|
||||
create table metric_total (
|
||||
ts timestamp time index,
|
||||
val double,
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
insert into metric_total values
|
||||
(0, 1),
|
||||
(10000, 2);
|
||||
|
||||
Affected Rows: 2
|
||||
|
||||
tql eval (10, 10, '1s') sum_over_time(metric_total[50s:10s]);
|
||||
|
||||
+---------------------+----------------------------------+
|
||||
| ts | prom_sum_over_time(ts_range,val) |
|
||||
+---------------------+----------------------------------+
|
||||
| 1970-01-01T00:00:10 | 3.0 |
|
||||
+---------------------+----------------------------------+
|
||||
|
||||
tql eval (10, 10, '1s') sum_over_time(metric_total[50s:5s]);
|
||||
|
||||
+---------------------+----------------------------------+
|
||||
| ts | prom_sum_over_time(ts_range,val) |
|
||||
+---------------------+----------------------------------+
|
||||
| 1970-01-01T00:00:10 | 4.0 |
|
||||
+---------------------+----------------------------------+
|
||||
|
||||
tql eval (300, 300, '1s') sum_over_time(metric_total[50s:10s]);
|
||||
|
||||
+---------------------+----------------------------------+
|
||||
| ts | prom_sum_over_time(ts_range,val) |
|
||||
+---------------------+----------------------------------+
|
||||
| 1970-01-01T00:05:00 | 10.0 |
|
||||
+---------------------+----------------------------------+
|
||||
|
||||
tql eval (359, 359, '1s') sum_over_time(metric_total[60s:10s]);
|
||||
|
||||
+---------------------+----------------------------------+
|
||||
| ts | prom_sum_over_time(ts_range,val) |
|
||||
+---------------------+----------------------------------+
|
||||
| 1970-01-01T00:05:59 | 2.0 |
|
||||
+---------------------+----------------------------------+
|
||||
|
||||
tql eval (10, 10, '1s') rate(metric_total[20s:10s]);
|
||||
|
||||
+---------------------+----------------------------+
|
||||
| ts | prom_rate(ts_range,val,ts) |
|
||||
+---------------------+----------------------------+
|
||||
| 1970-01-01T00:00:10 | 0.1 |
|
||||
+---------------------+----------------------------+
|
||||
|
||||
tql eval (20, 20, '1s') rate(metric_total[20s:5s]);
|
||||
|
||||
+---------------------+----------------------------+
|
||||
| ts | prom_rate(ts_range,val,ts) |
|
||||
+---------------------+----------------------------+
|
||||
| 1970-01-01T00:00:20 | 0.06666666666666667 |
|
||||
+---------------------+----------------------------+
|
||||
|
||||
drop table metric_total;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
22
tests/cases/standalone/common/promql/subquery.sql
Normal file
22
tests/cases/standalone/common/promql/subquery.sql
Normal file
@@ -0,0 +1,22 @@
|
||||
create table metric_total (
|
||||
ts timestamp time index,
|
||||
val double,
|
||||
);
|
||||
|
||||
insert into metric_total values
|
||||
(0, 1),
|
||||
(10000, 2);
|
||||
|
||||
tql eval (10, 10, '1s') sum_over_time(metric_total[50s:10s]);
|
||||
|
||||
tql eval (10, 10, '1s') sum_over_time(metric_total[50s:5s]);
|
||||
|
||||
tql eval (300, 300, '1s') sum_over_time(metric_total[50s:10s]);
|
||||
|
||||
tql eval (359, 359, '1s') sum_over_time(metric_total[60s:10s]);
|
||||
|
||||
tql eval (10, 10, '1s') rate(metric_total[20s:10s]);
|
||||
|
||||
tql eval (20, 20, '1s') rate(metric_total[20s:5s]);
|
||||
|
||||
drop table metric_total;
|
||||
@@ -373,20 +373,20 @@ Affected Rows: 0
|
||||
|
||||
show create table test_column_constrain_composite_indexes;
|
||||
|
||||
+-----------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| Table | Create Table |
|
||||
+-----------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| test_column_constrain_composite_indexes | CREATE TABLE IF NOT EXISTS "test_column_constrain_composite_indexes" ( |
|
||||
| | "id" INT NULL SKIPPING INDEX WITH(granularity = '10240', type = 'BLOOM') INVERTED INDEX, |
|
||||
| | "host" STRING NULL FULLTEXT WITH(analyzer = 'English', case_sensitive = 'false') SKIPPING INDEX WITH(granularity = '10240', type = 'BLOOM') INVERTED INDEX, |
|
||||
| | "ts" TIMESTAMP(3) NOT NULL, |
|
||||
| | TIME INDEX ("ts"), |
|
||||
| | PRIMARY KEY ("host") |
|
||||
| | ) |
|
||||
| | |
|
||||
| | ENGINE=mito |
|
||||
| | |
|
||||
+-----------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
+-----------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| Table | Create Table |
|
||||
+-----------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| test_column_constrain_composite_indexes | CREATE TABLE IF NOT EXISTS "test_column_constrain_composite_indexes" ( |
|
||||
| | "id" INT NULL SKIPPING INDEX WITH(granularity = '10240', type = 'BLOOM') INVERTED INDEX, |
|
||||
| | "host" STRING NULL FULLTEXT INDEX WITH(analyzer = 'English', case_sensitive = 'false') SKIPPING INDEX WITH(granularity = '10240', type = 'BLOOM') INVERTED INDEX, |
|
||||
| | "ts" TIMESTAMP(3) NOT NULL, |
|
||||
| | TIME INDEX ("ts"), |
|
||||
| | PRIMARY KEY ("host") |
|
||||
| | ) |
|
||||
| | |
|
||||
| | ENGINE=mito |
|
||||
| | |
|
||||
+-----------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
|
||||
drop table test_column_constrain_composite_indexes;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user