diff --git a/.github/scripts/pr-review-reminder.js b/.github/scripts/pr-review-reminder.js
index 9b0ce4f490..3853d11e60 100644
--- a/.github/scripts/pr-review-reminder.js
+++ b/.github/scripts/pr-review-reminder.js
@@ -57,14 +57,6 @@
return days;
}
- // Get urgency emoji based on PR age
- function getAgeEmoji(days) {
- if (days >= 14) return "🔴"; // 14+ days - critical
- if (days >= 7) return "🟠"; // 7+ days - urgent
- if (days >= 3) return "🟡"; // 3+ days - needs attention
- return "🟢"; // < 3 days - fresh
- }
-
// Build Slack notification message from PR list
function buildSlackMessage(prs) {
if (prs.length === 0) {
diff --git a/.github/workflows/pr-review-reminder.yml b/.github/workflows/pr-review-reminder.yml
index d49e928fec..0ba25fdaca 100644
--- a/.github/workflows/pr-review-reminder.yml
+++ b/.github/workflows/pr-review-reminder.yml
@@ -2,8 +2,8 @@ name: PR Review Reminder
on:
schedule:
- # Run at 9:00 AM UTC+8 (01:00 AM UTC) every day
- - cron: '0 1 * * *'
+ # Run at 9:00 AM UTC+8 (01:00 AM UTC) on Monday, Wednesday, Friday
+ - cron: '0 1 * * 1,3,5'
workflow_dispatch:
jobs:
diff --git a/Cargo.lock b/Cargo.lock
index a76ce955e8..da0548704c 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -124,12 +124,6 @@ version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923"
-[[package]]
-name = "android-tzdata"
-version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0"
-
[[package]]
name = "android_system_properties"
version = "0.1.5"
@@ -262,7 +256,7 @@ dependencies = [
"proc-macro-error2",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -300,9 +294,9 @@ checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
[[package]]
name = "arrow"
-version = "56.1.0"
+version = "56.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c26b57282a08ae92f727497805122fec964c6245cfa0e13f0e75452eaf3bc41f"
+checksum = "6e833808ff2d94ed40d9379848a950d995043c7fb3e81a30b383f4c6033821cc"
dependencies = [
"arrow-arith",
"arrow-array",
@@ -321,9 +315,9 @@ dependencies = [
[[package]]
name = "arrow-arith"
-version = "56.1.0"
+version = "56.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cebf38ca279120ff522f4954b81a39527425b6e9f615e6b72842f4de1ffe02b8"
+checksum = "ad08897b81588f60ba983e3ca39bda2b179bdd84dced378e7df81a5313802ef8"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -335,9 +329,9 @@ dependencies = [
[[package]]
name = "arrow-array"
-version = "56.1.0"
+version = "56.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "744109142cdf8e7b02795e240e20756c2a782ac9180d4992802954a8f871c0de"
+checksum = "8548ca7c070d8db9ce7aa43f37393e4bfcf3f2d3681df278490772fd1673d08d"
dependencies = [
"ahash 0.8.12",
"arrow-buffer",
@@ -346,15 +340,15 @@ dependencies = [
"chrono",
"chrono-tz",
"half",
- "hashbrown 0.15.4",
+ "hashbrown 0.16.0",
"num",
]
[[package]]
name = "arrow-buffer"
-version = "56.1.0"
+version = "56.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "601bb103c4c374bcd1f62c66bcea67b42a2ee91a690486c37d4c180236f11ccc"
+checksum = "e003216336f70446457e280807a73899dd822feaf02087d31febca1363e2fccc"
dependencies = [
"bytes",
"half",
@@ -363,9 +357,9 @@ dependencies = [
[[package]]
name = "arrow-cast"
-version = "56.1.0"
+version = "56.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eed61d9d73eda8df9e3014843def37af3050b5080a9acbe108f045a316d5a0be"
+checksum = "919418a0681298d3a77d1a315f625916cb5678ad0d74b9c60108eb15fd083023"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -384,9 +378,9 @@ dependencies = [
[[package]]
name = "arrow-csv"
-version = "56.1.0"
+version = "56.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fa95b96ce0c06b4d33ac958370db8c0d31e88e54f9d6e08b0353d18374d9f991"
+checksum = "bfa9bf02705b5cf762b6f764c65f04ae9082c7cfc4e96e0c33548ee3f67012eb"
dependencies = [
"arrow-array",
"arrow-cast",
@@ -399,9 +393,9 @@ dependencies = [
[[package]]
name = "arrow-data"
-version = "56.1.0"
+version = "56.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "43407f2c6ba2367f64d85d4603d6fb9c4b92ed79d2ffd21021b37efa96523e12"
+checksum = "a5c64fff1d142f833d78897a772f2e5b55b36cb3e6320376f0961ab0db7bd6d0"
dependencies = [
"arrow-buffer",
"arrow-schema",
@@ -411,9 +405,9 @@ dependencies = [
[[package]]
name = "arrow-flight"
-version = "56.1.0"
+version = "56.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d7c66c5e4a7aedc2bfebffeabc2116d76adb22e08d230b968b995da97f8b11ca"
+checksum = "8c8b0ba0784d56bc6266b79f5de7a24b47024e7b3a0045d2ad4df3d9b686099f"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -430,9 +424,9 @@ dependencies = [
[[package]]
name = "arrow-ipc"
-version = "56.1.0"
+version = "56.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e4b0487c4d2ad121cbc42c4db204f1509f8618e589bc77e635e9c40b502e3b90"
+checksum = "1d3594dcddccc7f20fd069bc8e9828ce37220372680ff638c5e00dea427d88f5"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -446,9 +440,9 @@ dependencies = [
[[package]]
name = "arrow-json"
-version = "56.1.0"
+version = "56.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "26d747573390905905a2dc4c5a61a96163fe2750457f90a04ee2a88680758c79"
+checksum = "88cf36502b64a127dc659e3b305f1d993a544eab0d48cce704424e62074dc04b"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -457,7 +451,7 @@ dependencies = [
"arrow-schema",
"chrono",
"half",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"lexical-core",
"memchr",
"num",
@@ -468,9 +462,9 @@ dependencies = [
[[package]]
name = "arrow-ord"
-version = "56.1.0"
+version = "56.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c142a147dceb59d057bad82400f1693847c80dca870d008bf7b91caf902810ae"
+checksum = "3c8f82583eb4f8d84d4ee55fd1cb306720cddead7596edce95b50ee418edf66f"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -481,9 +475,9 @@ dependencies = [
[[package]]
name = "arrow-row"
-version = "56.1.0"
+version = "56.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dac6620667fccdab4204689ca173bd84a15de6bb6b756c3a8764d4d7d0c2fc04"
+checksum = "9d07ba24522229d9085031df6b94605e0f4b26e099fb7cdeec37abd941a73753"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -494,9 +488,9 @@ dependencies = [
[[package]]
name = "arrow-schema"
-version = "56.1.0"
+version = "56.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dfa93af9ff2bb80de539e6eb2c1c8764abd0f4b73ffb0d7c82bf1f9868785e66"
+checksum = "b3aa9e59c611ebc291c28582077ef25c97f1975383f1479b12f3b9ffee2ffabe"
dependencies = [
"serde",
"serde_json",
@@ -504,9 +498,9 @@ dependencies = [
[[package]]
name = "arrow-select"
-version = "56.1.0"
+version = "56.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "be8b2e0052cd20d36d64f32640b68a5ab54d805d24a473baee5d52017c85536c"
+checksum = "8c41dbbd1e97bfcaee4fcb30e29105fb2c75e4d82ae4de70b792a5d3f66b2e7a"
dependencies = [
"ahash 0.8.12",
"arrow-array",
@@ -518,9 +512,9 @@ dependencies = [
[[package]]
name = "arrow-string"
-version = "56.1.0"
+version = "56.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c2155e26e17f053c8975c546fc70cf19c00542f9abf43c23a88a46ef7204204f"
+checksum = "53f5183c150fbc619eede22b861ea7c0eebed8eaac0333eaa7f6da5205fd504d"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -530,7 +524,7 @@ dependencies = [
"memchr",
"num",
"regex",
- "regex-syntax 0.8.5",
+ "regex-syntax 0.8.7",
]
[[package]]
@@ -644,7 +638,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -666,7 +660,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -677,13 +671,13 @@ checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de"
[[package]]
name = "async-trait"
-version = "0.1.88"
+version = "0.1.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5"
+checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -763,7 +757,7 @@ checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -918,7 +912,7 @@ checksum = "604fde5e028fea851ce1d8570bbdc034bec850d157f7569d10f347d06808c05c"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -1066,7 +1060,7 @@ dependencies = [
"regex",
"rustc-hash 2.1.1",
"shlex",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -1195,7 +1189,7 @@ dependencies = [
"proc-macro2",
"quote",
"rustversion",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -1218,7 +1212,7 @@ dependencies = [
"proc-macro-crate 3.3.0",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -1270,7 +1264,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "234113d19d0d7d613b40e86fb654acf958910802bcceab913a4f9e7cda03b1a4"
dependencies = [
"memchr",
- "regex-automata 0.4.9",
+ "regex-automata 0.4.13",
"serde",
]
@@ -1560,7 +1554,7 @@ version = "0.13.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fe45e18904af7af10e4312df7c97251e98af98c70f42f1f2587aecfcbee56bf"
dependencies = [
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"lazy_static",
"num-traits",
"regex",
@@ -1614,17 +1608,16 @@ dependencies = [
[[package]]
name = "chrono"
-version = "0.4.41"
+version = "0.4.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d"
+checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2"
dependencies = [
- "android-tzdata",
"iana-time-zone",
"js-sys",
"num-traits",
"serde",
"wasm-bindgen",
- "windows-link",
+ "windows-link 0.2.1",
]
[[package]]
@@ -1746,7 +1739,7 @@ dependencies = [
"heck 0.5.0",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -1984,11 +1977,12 @@ checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75"
[[package]]
name = "comfy-table"
-version = "7.1.4"
+version = "7.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4a65ebfec4fb190b6f90e944a817d60499ee0744e582530e2c9900a22e591d9a"
+checksum = "e0d05af1e006a2407bedef5af410552494ce5be9090444dbbcb57258c1af3d56"
dependencies = [
- "unicode-segmentation",
+ "strum 0.26.3",
+ "strum_macros 0.26.4",
"unicode-width 0.2.1",
]
@@ -2031,7 +2025,6 @@ dependencies = [
"common-base",
"common-error",
"common-macro",
- "common-stat",
"common-telemetry",
"common-test-util",
"common-wal",
@@ -2072,7 +2065,7 @@ dependencies = [
"lazy_static",
"object-store",
"object_store_opendal",
- "orc-rust 0.6.3",
+ "orc-rust",
"parquet",
"paste",
"regex",
@@ -2197,6 +2190,7 @@ dependencies = [
"num-traits",
"paste",
"pretty_assertions",
+ "regex",
"s2",
"serde",
"serde_json",
@@ -2288,7 +2282,7 @@ dependencies = [
"once_cell",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -2462,7 +2456,7 @@ dependencies = [
"futures-util",
"serde",
"snafu 0.8.6",
- "sqlparser 0.55.0-greptime",
+ "sqlparser",
"sqlparser_derive 0.1.1",
"store-api",
"tokio",
@@ -2544,7 +2538,7 @@ dependencies = [
"jsonb",
"serde_json",
"snafu 0.8.6",
- "sqlparser 0.55.0-greptime",
+ "sqlparser",
]
[[package]]
@@ -2552,11 +2546,14 @@ name = "common-stat"
version = "0.18.0"
dependencies = [
"common-base",
+ "common-runtime",
+ "common-telemetry",
"lazy_static",
"nix 0.30.1",
"num_cpus",
"prometheus",
"sysinfo",
+ "tokio",
]
[[package]]
@@ -3177,7 +3174,7 @@ dependencies = [
"proc-macro2",
"quote",
"strsim 0.11.1",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -3191,7 +3188,7 @@ dependencies = [
"proc-macro2",
"quote",
"strsim 0.11.1",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -3213,7 +3210,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead"
dependencies = [
"darling_core 0.20.11",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -3224,7 +3221,7 @@ checksum = "2b5be8a7a562d315a5b92a630c30cec6bcf663e6673f00fbb69cca66a6f521b9"
dependencies = [
"darling_core 0.21.1",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -3249,8 +3246,8 @@ dependencies = [
[[package]]
name = "datafusion"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
"arrow",
"arrow-ipc",
@@ -3277,6 +3274,7 @@ dependencies = [
"datafusion-functions-window",
"datafusion-optimizer",
"datafusion-physical-expr",
+ "datafusion-physical-expr-adapter",
"datafusion-physical-expr-common",
"datafusion-physical-optimizer",
"datafusion-physical-plan",
@@ -3284,7 +3282,6 @@ dependencies = [
"datafusion-sql",
"flate2",
"futures",
- "hex",
"itertools 0.14.0",
"log",
"object_store",
@@ -3292,7 +3289,8 @@ dependencies = [
"parquet",
"rand 0.9.1",
"regex",
- "sqlparser 0.55.0",
+ "rstest",
+ "sqlparser",
"tempfile",
"tokio",
"url",
@@ -3303,8 +3301,8 @@ dependencies = [
[[package]]
name = "datafusion-catalog"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
"arrow",
"async-trait",
@@ -3317,7 +3315,6 @@ dependencies = [
"datafusion-physical-expr",
"datafusion-physical-plan",
"datafusion-session",
- "datafusion-sql",
"futures",
"itertools 0.14.0",
"log",
@@ -3328,8 +3325,8 @@ dependencies = [
[[package]]
name = "datafusion-catalog-listing"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
"arrow",
"async-trait",
@@ -3350,33 +3347,31 @@ dependencies = [
[[package]]
name = "datafusion-common"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
"ahash 0.8.12",
"arrow",
"arrow-ipc",
- "base64 0.22.1",
"chrono",
"half",
"hashbrown 0.14.5",
- "hex",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"libc",
"log",
"object_store",
"parquet",
"paste",
"recursive",
- "sqlparser 0.55.0",
+ "sqlparser",
"tokio",
"web-time",
]
[[package]]
name = "datafusion-common-runtime"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
"futures",
"log",
@@ -3385,8 +3380,8 @@ dependencies = [
[[package]]
name = "datafusion-datasource"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
"arrow",
"async-compression 0.4.19",
@@ -3399,6 +3394,7 @@ dependencies = [
"datafusion-execution",
"datafusion-expr",
"datafusion-physical-expr",
+ "datafusion-physical-expr-adapter",
"datafusion-physical-expr-common",
"datafusion-physical-plan",
"datafusion-session",
@@ -3408,9 +3404,7 @@ dependencies = [
"itertools 0.14.0",
"log",
"object_store",
- "parquet",
"rand 0.9.1",
- "tempfile",
"tokio",
"tokio-util",
"url",
@@ -3420,19 +3414,17 @@ dependencies = [
[[package]]
name = "datafusion-datasource-csv"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
"arrow",
"async-trait",
"bytes",
- "datafusion-catalog",
"datafusion-common",
"datafusion-common-runtime",
"datafusion-datasource",
"datafusion-execution",
"datafusion-expr",
- "datafusion-physical-expr",
"datafusion-physical-expr-common",
"datafusion-physical-plan",
"datafusion-session",
@@ -3444,71 +3436,66 @@ dependencies = [
[[package]]
name = "datafusion-datasource-json"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
"arrow",
"async-trait",
"bytes",
- "datafusion-catalog",
"datafusion-common",
"datafusion-common-runtime",
"datafusion-datasource",
"datafusion-execution",
"datafusion-expr",
- "datafusion-physical-expr",
"datafusion-physical-expr-common",
"datafusion-physical-plan",
"datafusion-session",
"futures",
"object_store",
- "serde_json",
"tokio",
]
[[package]]
name = "datafusion-datasource-parquet"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
"arrow",
"async-trait",
"bytes",
- "datafusion-catalog",
"datafusion-common",
"datafusion-common-runtime",
"datafusion-datasource",
"datafusion-execution",
"datafusion-expr",
- "datafusion-functions-aggregate",
+ "datafusion-functions-aggregate-common",
"datafusion-physical-expr",
+ "datafusion-physical-expr-adapter",
"datafusion-physical-expr-common",
- "datafusion-physical-optimizer",
"datafusion-physical-plan",
"datafusion-pruning",
"datafusion-session",
"futures",
- "hex",
"itertools 0.14.0",
"log",
"object_store",
"parking_lot 0.12.4",
"parquet",
- "rand 0.9.1",
"tokio",
]
[[package]]
name = "datafusion-doc"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
[[package]]
name = "datafusion-execution"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
"arrow",
+ "async-trait",
"dashmap",
"datafusion-common",
"datafusion-expr",
@@ -3516,7 +3503,6 @@ dependencies = [
"log",
"object_store",
"parking_lot 0.12.4",
- "parquet",
"rand 0.9.1",
"tempfile",
"url",
@@ -3524,8 +3510,8 @@ dependencies = [
[[package]]
name = "datafusion-expr"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
"arrow",
"async-trait",
@@ -3536,29 +3522,30 @@ dependencies = [
"datafusion-functions-aggregate-common",
"datafusion-functions-window-common",
"datafusion-physical-expr-common",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
+ "itertools 0.14.0",
"paste",
"recursive",
"serde_json",
- "sqlparser 0.55.0",
+ "sqlparser",
]
[[package]]
name = "datafusion-expr-common"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
"arrow",
"datafusion-common",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"itertools 0.14.0",
"paste",
]
[[package]]
name = "datafusion-functions"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
"arrow",
"arrow-buffer",
@@ -3585,8 +3572,8 @@ dependencies = [
[[package]]
name = "datafusion-functions-aggregate"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
"ahash 0.8.12",
"arrow",
@@ -3605,8 +3592,8 @@ dependencies = [
[[package]]
name = "datafusion-functions-aggregate-common"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
"ahash 0.8.12",
"arrow",
@@ -3617,8 +3604,8 @@ dependencies = [
[[package]]
name = "datafusion-functions-nested"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
"arrow",
"arrow-ord",
@@ -3626,6 +3613,7 @@ dependencies = [
"datafusion-doc",
"datafusion-execution",
"datafusion-expr",
+ "datafusion-expr-common",
"datafusion-functions",
"datafusion-functions-aggregate",
"datafusion-functions-aggregate-common",
@@ -3638,8 +3626,8 @@ dependencies = [
[[package]]
name = "datafusion-functions-table"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
"arrow",
"async-trait",
@@ -3653,8 +3641,8 @@ dependencies = [
[[package]]
name = "datafusion-functions-window"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
"arrow",
"datafusion-common",
@@ -3670,8 +3658,8 @@ dependencies = [
[[package]]
name = "datafusion-functions-window-common"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
"datafusion-common",
"datafusion-physical-expr-common",
@@ -3679,18 +3667,18 @@ dependencies = [
[[package]]
name = "datafusion-macros"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
- "datafusion-expr",
+ "datafusion-doc",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
name = "datafusion-optimizer"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
"arrow",
"chrono",
@@ -3698,18 +3686,19 @@ dependencies = [
"datafusion-expr",
"datafusion-expr-common",
"datafusion-physical-expr",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"itertools 0.14.0",
"log",
"recursive",
"regex",
- "regex-syntax 0.8.5",
+ "regex-syntax 0.8.7",
]
[[package]]
name = "datafusion-orc"
-version = "0.4.1"
-source = "git+https://github.com/GreptimeTeam/datafusion-orc?rev=a0a5f902158f153119316eaeec868cff3fc8a99d#a0a5f902158f153119316eaeec868cff3fc8a99d"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2088adcf23fad3b1430ba95e7782c74e49c9ce5b0965151d96b295d4d538fb17"
dependencies = [
"arrow",
"async-trait",
@@ -3719,14 +3708,15 @@ dependencies = [
"futures",
"futures-util",
"object_store",
- "orc-rust 0.6.0",
+ "orc-rust",
"tokio",
]
[[package]]
name = "datafusion-pg-catalog"
-version = "0.9.0"
-source = "git+https://github.com/datafusion-contrib/datafusion-postgres?rev=3d1b7c7d5b82dd49bafc2803259365e633f654fa#3d1b7c7d5b82dd49bafc2803259365e633f654fa"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f258caedd1593e7dca3bf53912249de6685fa224bcce897ede1fbb7b040ac6f6"
dependencies = [
"async-trait",
"datafusion",
@@ -3738,8 +3728,8 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
"ahash 0.8.12",
"arrow",
@@ -3750,17 +3740,31 @@ dependencies = [
"datafusion-physical-expr-common",
"half",
"hashbrown 0.14.5",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"itertools 0.14.0",
- "log",
+ "parking_lot 0.12.4",
"paste",
- "petgraph 0.8.2",
+ "petgraph 0.8.3",
+]
+
+[[package]]
+name = "datafusion-physical-expr-adapter"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
+dependencies = [
+ "arrow",
+ "datafusion-common",
+ "datafusion-expr",
+ "datafusion-functions",
+ "datafusion-physical-expr",
+ "datafusion-physical-expr-common",
+ "itertools 0.14.0",
]
[[package]]
name = "datafusion-physical-expr-common"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
"ahash 0.8.12",
"arrow",
@@ -3772,8 +3776,8 @@ dependencies = [
[[package]]
name = "datafusion-physical-optimizer"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
"arrow",
"datafusion-common",
@@ -3785,14 +3789,13 @@ dependencies = [
"datafusion-physical-plan",
"datafusion-pruning",
"itertools 0.14.0",
- "log",
"recursive",
]
[[package]]
name = "datafusion-physical-plan"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
"ahash 0.8.12",
"arrow",
@@ -3811,7 +3814,7 @@ dependencies = [
"futures",
"half",
"hashbrown 0.14.5",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"itertools 0.14.0",
"log",
"parking_lot 0.12.4",
@@ -3821,11 +3824,10 @@ dependencies = [
[[package]]
name = "datafusion-pruning"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
"arrow",
- "arrow-schema",
"datafusion-common",
"datafusion-datasource",
"datafusion-expr-common",
@@ -3838,47 +3840,38 @@ dependencies = [
[[package]]
name = "datafusion-session"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
- "arrow",
"async-trait",
- "dashmap",
"datafusion-common",
- "datafusion-common-runtime",
"datafusion-execution",
"datafusion-expr",
- "datafusion-physical-expr",
"datafusion-physical-plan",
- "datafusion-sql",
- "futures",
- "itertools 0.14.0",
- "log",
- "object_store",
"parking_lot 0.12.4",
- "tokio",
]
[[package]]
name = "datafusion-sql"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
"arrow",
"bigdecimal 0.4.8",
+ "chrono",
"datafusion-common",
"datafusion-expr",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"log",
"recursive",
"regex",
- "sqlparser 0.55.0",
+ "sqlparser",
]
[[package]]
name = "datafusion-substrait"
-version = "49.0.0"
-source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7d5214512740b4dfb742b6b3d91ed9affcc2c9d0#7d5214512740b4dfb742b6b3d91ed9affcc2c9d0"
+version = "50.1.0"
+source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
dependencies = [
"async-recursion",
"async-trait",
@@ -3891,6 +3884,7 @@ dependencies = [
"substrait 0.58.0",
"tokio",
"url",
+ "uuid",
]
[[package]]
@@ -3916,6 +3910,7 @@ dependencies = [
"common-query",
"common-recordbatch",
"common-runtime",
+ "common-stat",
"common-telemetry",
"common-test-util",
"common-time",
@@ -3979,7 +3974,7 @@ dependencies = [
"serde",
"serde_json",
"snafu 0.8.6",
- "sqlparser 0.55.0-greptime",
+ "sqlparser",
"sqlparser_derive 0.1.1",
]
@@ -4084,7 +4079,7 @@ checksum = "2cdc8d50f426189eef89dac62fabfa0abb27d5cc008f25bf4156a0203325becc"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -4095,7 +4090,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -4137,7 +4132,7 @@ dependencies = [
"darling 0.20.11",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -4157,7 +4152,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c"
dependencies = [
"derive_builder_core 0.20.2",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -4177,7 +4172,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
"unicode-xid",
]
@@ -4189,7 +4184,7 @@ checksum = "ccfae181bab5ab6c5478b2ccb69e4c68a02f8c3ec72f6616bfec9dbc599d2ee0"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -4248,7 +4243,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -4323,7 +4318,7 @@ checksum = "0e197fdfd2cdb5fdeb7f8ddcf3aed5d5d04ecde2890d448b14ffb716f7376b70"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -4431,7 +4426,7 @@ dependencies = [
"heck 0.5.0",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -4443,7 +4438,7 @@ dependencies = [
"once_cell",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -4482,7 +4477,7 @@ checksum = "44f23cf4b44bfce11a86ace86f8a73ffdec849c9fd00a386a53d278bd9e81fb3"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -4594,8 +4589,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e24cb5a94bcae1e5408b0effca5cd7172ea3c5755049c5f3af4cd283a165298"
dependencies = [
"bit-set",
- "regex-automata 0.4.9",
- "regex-syntax 0.8.5",
+ "regex-automata 0.4.13",
+ "regex-syntax 0.8.7",
]
[[package]]
@@ -4707,9 +4702,9 @@ dependencies = [
[[package]]
name = "flate2"
-version = "1.1.2"
+version = "1.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d"
+checksum = "dc5a4e564e38c699f2880d3fda590bedc2e69f3f84cd48b457bd892ce61d0aa9"
dependencies = [
"crc32fast",
"libz-rs-sys",
@@ -4871,9 +4866,9 @@ checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2"
[[package]]
name = "form_urlencoded"
-version = "1.2.1"
+version = "1.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456"
+checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf"
dependencies = [
"percent-encoding",
]
@@ -4913,6 +4908,7 @@ dependencies = [
"common-query",
"common-recordbatch",
"common-runtime",
+ "common-stat",
"common-telemetry",
"common-test-util",
"common-time",
@@ -4946,7 +4942,7 @@ dependencies = [
"session",
"snafu 0.8.6",
"sql",
- "sqlparser 0.55.0-greptime",
+ "sqlparser",
"store-api",
"strfmt",
"table",
@@ -4988,7 +4984,7 @@ checksum = "a0b4095fc99e1d858e5b8c7125d2638372ec85aa0fe6c807105cf10b0265ca6c"
dependencies = [
"frunk_proc_macro_helpers",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -5000,7 +4996,7 @@ dependencies = [
"frunk_core",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -5012,7 +5008,7 @@ dependencies = [
"frunk_core",
"frunk_proc_macro_helpers",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -5136,7 +5132,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -5328,7 +5324,7 @@ dependencies = [
[[package]]
name = "greptime-proto"
version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=69a6089933daa573c96808ec4bbc48f447ec6e8c#69a6089933daa573c96808ec4bbc48f447ec6e8c"
+source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=14b9dc40bdc8288742b0cefc7bb024303b7429ef#14b9dc40bdc8288742b0cefc7bb024303b7429ef"
dependencies = [
"prost 0.13.5",
"prost-types 0.13.5",
@@ -5362,7 +5358,7 @@ dependencies = [
"futures-sink",
"futures-util",
"http 0.2.12",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"slab",
"tokio",
"tokio-util",
@@ -5381,7 +5377,7 @@ dependencies = [
"futures-core",
"futures-sink",
"http 1.3.1",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"slab",
"tokio",
"tokio-util",
@@ -5467,6 +5463,12 @@ dependencies = [
"foldhash",
]
+[[package]]
+name = "hashbrown"
+version = "0.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d"
+
[[package]]
name = "hashlink"
version = "0.10.0"
@@ -5602,7 +5604,7 @@ checksum = "a56f203cd1c76362b69e3863fd987520ac36cf70a8c92627449b2f64a8cf7d65"
dependencies = [
"cfg-if",
"libc",
- "windows-link",
+ "windows-link 0.1.3",
]
[[package]]
@@ -6021,9 +6023,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
[[package]]
name = "idna"
-version = "1.0.3"
+version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e"
+checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de"
dependencies = [
"idna_adapter",
"smallvec",
@@ -6066,7 +6068,7 @@ dependencies = [
"libflate",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -6117,7 +6119,7 @@ dependencies = [
"rand 0.9.1",
"rand_chacha 0.9.0",
"regex",
- "regex-automata 0.4.9",
+ "regex-automata 0.4.13",
"roaring",
"serde",
"serde_json",
@@ -6144,13 +6146,12 @@ dependencies = [
[[package]]
name = "indexmap"
-version = "2.10.0"
+version = "2.11.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661"
+checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5"
dependencies = [
"equivalent",
- "hashbrown 0.15.4",
- "serde",
+ "hashbrown 0.16.0",
]
[[package]]
@@ -6166,7 +6167,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "232929e1d75fe899576a3d5c7416ad0d88dbfbb3c3d6aa00873a7408a50ddb88"
dependencies = [
"ahash 0.8.12",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"is-terminal",
"itoa",
"log",
@@ -6189,7 +6190,7 @@ dependencies = [
"crossbeam-utils",
"dashmap",
"env_logger",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"itoa",
"log",
"num-format",
@@ -6231,7 +6232,7 @@ checksum = "6c38228f24186d9cc68c729accb4d413be9eaed6ad07ff79e0270d9e56f3de13"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -6576,7 +6577,7 @@ version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4ee7893dab2e44ae5f9d0173f26ff4aa327c10b01b06a72b52dd9405b628640d"
dependencies = [
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
]
[[package]]
@@ -6661,7 +6662,7 @@ dependencies = [
"http 1.3.1",
"json-patch",
"k8s-openapi",
- "schemars 0.8.22",
+ "schemars",
"serde",
"serde_json",
"thiserror 1.0.69",
@@ -6677,7 +6678,7 @@ dependencies = [
"proc-macro2",
"quote",
"serde_json",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -6721,7 +6722,7 @@ dependencies = [
"lalrpop-util",
"petgraph 0.7.1",
"regex",
- "regex-syntax 0.8.5",
+ "regex-syntax 0.8.7",
"sha3",
"string_cache",
"term",
@@ -6735,7 +6736,7 @@ version = "0.22.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5baa5e9ff84f1aefd264e6869907646538a52147a755d494517a8007fb48733"
dependencies = [
- "regex-automata 0.4.9",
+ "regex-automata 0.4.13",
"rustversion",
]
@@ -6763,7 +6764,7 @@ dependencies = [
"proc-macro-crate 1.3.1",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -6786,7 +6787,7 @@ dependencies = [
"proc-macro2",
"quote",
"regex",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -6876,9 +6877,9 @@ checksum = "775bf80d5878ab7c2b1080b5351a48b2f737d9f6f8b383574eebcc22be0dfccb"
[[package]]
name = "libc"
-version = "0.2.175"
+version = "0.2.177"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543"
+checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976"
[[package]]
name = "libflate"
@@ -7119,7 +7120,7 @@ dependencies = [
"num-traits",
"quote",
"regex",
- "regex-syntax 0.8.5",
+ "regex-syntax 0.8.7",
"serde",
"vergen",
]
@@ -7134,7 +7135,7 @@ dependencies = [
"cactus",
"cfgrammar",
"filetime",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"lazy_static",
"lrtable",
"num-traits",
@@ -7402,6 +7403,7 @@ dependencies = [
"common-procedure",
"common-procedure-test",
"common-runtime",
+ "common-stat",
"common-telemetry",
"common-time",
"common-version",
@@ -7553,6 +7555,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316"
dependencies = [
"adler2",
+ "simd-adler32",
]
[[package]]
@@ -7694,7 +7697,7 @@ dependencies = [
"cfg-if",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -7793,7 +7796,7 @@ dependencies = [
"proc-macro-error2",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
"termcolor",
"thiserror 1.0.69",
]
@@ -7811,7 +7814,7 @@ dependencies = [
"proc-macro-error2",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
"termcolor",
"thiserror 2.0.17",
]
@@ -7947,7 +7950,7 @@ checksum = "254a5372af8fc138e36684761d3c0cdb758a4410e938babcff1c860ce14ddbfc"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -8174,7 +8177,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -8287,7 +8290,7 @@ dependencies = [
"proc-macro-crate 1.3.1",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -8299,7 +8302,7 @@ dependencies = [
"proc-macro-crate 3.3.0",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -8356,9 +8359,9 @@ dependencies = [
[[package]]
name = "object_store"
-version = "0.12.3"
+version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "efc4f07659e11cd45a341cd24d71e683e3be65d9ff1f8150061678fe60437496"
+checksum = "4c1be0c6c22ec0817cdc77d3842f721a17fd30ab6965001415b5402a74e6b740"
dependencies = [
"async-trait",
"bytes",
@@ -8672,7 +8675,7 @@ dependencies = [
"session",
"snafu 0.8.6",
"sql",
- "sqlparser 0.55.0-greptime",
+ "sqlparser",
"store-api",
"substrait 0.18.0",
"table",
@@ -8682,31 +8685,6 @@ dependencies = [
"tracing",
]
-[[package]]
-name = "orc-rust"
-version = "0.6.0"
-source = "git+https://github.com/GreptimeTeam/orc-rust?rev=d1690a06eec754e97beecf2cf7690267fc818726#d1690a06eec754e97beecf2cf7690267fc818726"
-dependencies = [
- "arrow",
- "async-trait",
- "bytemuck",
- "bytes",
- "chrono",
- "chrono-tz",
- "fallible-streaming-iterator",
- "flate2",
- "futures",
- "futures-util",
- "lz4_flex",
- "lzokay-native",
- "num",
- "prost 0.13.5",
- "snafu 0.8.6",
- "snap",
- "tokio",
- "zstd 0.13.3",
-]
-
[[package]]
name = "orc-rust"
version = "0.6.3"
@@ -8824,7 +8802,7 @@ dependencies = [
"otlp-model",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -8934,9 +8912,9 @@ dependencies = [
[[package]]
name = "parquet"
-version = "56.0.0"
+version = "56.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c7288a07ed5d25939a90f9cb1ca5afa6855faa08ec7700613511ae64bdb0620c"
+checksum = "f0dbd48ad52d7dccf8ea1b90a3ddbfaea4f69878dd7683e51c507d4bc52b5b27"
dependencies = [
"ahash 0.8.12",
"arrow-array",
@@ -8953,13 +8931,12 @@ dependencies = [
"flate2",
"futures",
"half",
- "hashbrown 0.15.4",
+ "hashbrown 0.16.0",
"lz4_flex",
"num",
"num-bigint",
"object_store",
"paste",
- "ring",
"seq-macro",
"simdutf8",
"snap",
@@ -9006,7 +8983,7 @@ dependencies = [
"session",
"snafu 0.8.6",
"sql",
- "sqlparser 0.55.0-greptime",
+ "sqlparser",
"store-api",
"table",
]
@@ -9103,9 +9080,9 @@ dependencies = [
[[package]]
name = "percent-encoding"
-version = "2.3.1"
+version = "2.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
+checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220"
[[package]]
name = "permutation"
@@ -9144,7 +9121,7 @@ dependencies = [
"pest_meta",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -9164,7 +9141,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db"
dependencies = [
"fixedbitset 0.4.2",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
]
[[package]]
@@ -9174,26 +9151,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772"
dependencies = [
"fixedbitset 0.5.7",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
]
[[package]]
name = "petgraph"
-version = "0.8.2"
+version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "54acf3a685220b533e437e264e4d932cfbdc4cc7ec0cd232ed73c08d03b8a7ca"
+checksum = "8701b58ea97060d5e5b155d383a69952a60943f0e6dfe30b04c287beb0b27455"
dependencies = [
"fixedbitset 0.5.7",
"hashbrown 0.15.4",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"serde",
]
[[package]]
name = "pgwire"
-version = "0.34.1"
+version = "0.34.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c748793f2a9267fa2aa409d9375a5e26e4f1504ea96e34f8cab3e2fc32042d69"
+checksum = "4f56a81b4fcc69016028f657a68f9b8e8a2a4b7d07684ca3298f2d3e7ff199ce"
dependencies = [
"async-trait",
"base64 0.22.1",
@@ -9313,7 +9290,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -9450,7 +9427,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d77244ce2d584cd84f6a15f86195b8c9b2a0dfbfd817c09e0464244091a58ed"
dependencies = [
"base64 0.22.1",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"quick-xml 0.37.5",
"serde",
"time",
@@ -9679,7 +9656,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "061c1221631e079b26479d25bbf2275bfe5917ae8419cd7e34f13bfc2aa7539a"
dependencies = [
"proc-macro2",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -9720,7 +9697,7 @@ dependencies = [
"proc-macro-error-attr2",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -9872,7 +9849,7 @@ dependencies = [
"prost 0.12.6",
"prost-types 0.12.6",
"regex",
- "syn 2.0.104",
+ "syn 2.0.106",
"tempfile",
]
@@ -9892,7 +9869,7 @@ dependencies = [
"prost 0.13.5",
"prost-types 0.13.5",
"regex",
- "syn 2.0.104",
+ "syn 2.0.106",
"tempfile",
]
@@ -9919,7 +9896,7 @@ dependencies = [
"itertools 0.12.1",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -9932,7 +9909,7 @@ dependencies = [
"itertools 0.14.0",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -10160,6 +10137,7 @@ dependencies = [
"num-traits",
"object-store",
"once_cell",
+ "parking_lot 0.12.4",
"partition",
"paste",
"pretty_assertions",
@@ -10174,7 +10152,7 @@ dependencies = [
"session",
"snafu 0.8.6",
"sql",
- "sqlparser 0.55.0-greptime",
+ "sqlparser",
"store-api",
"substrait 0.18.0",
"table",
@@ -10261,9 +10239,9 @@ dependencies = [
[[package]]
name = "quote"
-version = "1.0.40"
+version = "1.0.41"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d"
+checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1"
dependencies = [
"proc-macro2",
]
@@ -10450,7 +10428,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76009fbe0614077fc1a2ce255e3a1881a2e3a3527097d5dc6d8212c585e7e38b"
dependencies = [
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -10488,19 +10466,19 @@ checksum = "1165225c21bff1f3bbce98f5a1f889949bc902d3575308cc7b0de30b4f6d27c7"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
name = "regex"
-version = "1.11.1"
+version = "1.12.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
+checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4"
dependencies = [
"aho-corasick",
"memchr",
- "regex-automata 0.4.9",
- "regex-syntax 0.8.5",
+ "regex-automata 0.4.13",
+ "regex-syntax 0.8.7",
]
[[package]]
@@ -10514,13 +10492,13 @@ dependencies = [
[[package]]
name = "regex-automata"
-version = "0.4.9"
+version = "0.4.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
+checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c"
dependencies = [
"aho-corasick",
"memchr",
- "regex-syntax 0.8.5",
+ "regex-syntax 0.8.7",
]
[[package]]
@@ -10530,11 +10508,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c11639076bf147be211b90e47790db89f4c22b6c8a9ca6e960833869da67166"
dependencies = [
"aho-corasick",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"itertools 0.13.0",
"nohash",
"regex",
- "regex-syntax 0.8.5",
+ "regex-syntax 0.8.7",
]
[[package]]
@@ -10551,9 +10529,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
[[package]]
name = "regex-syntax"
-version = "0.8.5"
+version = "0.8.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
+checksum = "c3160422bbd54dd5ecfdca71e5fd59b7b8fe2b1697ab2baf64f6d05dcc66d298"
[[package]]
name = "regress"
@@ -10877,7 +10855,7 @@ dependencies = [
"regex",
"relative-path",
"rustc_version",
- "syn 2.0.104",
+ "syn 2.0.106",
"unicode-ident",
]
@@ -10889,7 +10867,7 @@ checksum = "b3a8fb4672e840a587a66fc577a5491375df51ddb88f2a2c2a792598c326fe14"
dependencies = [
"quote",
"rand 0.8.5",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -10912,7 +10890,7 @@ dependencies = [
"proc-macro2",
"quote",
"rust-embed-utils",
- "syn 2.0.104",
+ "syn 2.0.106",
"walkdir",
]
@@ -11224,30 +11202,6 @@ dependencies = [
"serde_json",
]
-[[package]]
-name = "schemars"
-version = "0.9.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4cd191f9397d57d581cddd31014772520aa448f65ef991055d7f61582c65165f"
-dependencies = [
- "dyn-clone",
- "ref-cast",
- "serde",
- "serde_json",
-]
-
-[[package]]
-name = "schemars"
-version = "1.0.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1375ba8ef45a6f15d83fa8748f1079428295d403d6ea991d09ab100155fbc06d"
-dependencies = [
- "dyn-clone",
- "ref-cast",
- "serde",
- "serde_json",
-]
-
[[package]]
name = "schemars_derive"
version = "0.8.22"
@@ -11257,7 +11211,7 @@ dependencies = [
"proc-macro2",
"quote",
"serde_derive_internals",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -11303,7 +11257,7 @@ dependencies = [
"heck 0.4.1",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
"thiserror 2.0.17",
]
@@ -11322,7 +11276,7 @@ dependencies = [
"heck 0.4.1",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -11413,7 +11367,7 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -11424,7 +11378,7 @@ checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -11457,7 +11411,7 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -11478,7 +11432,7 @@ dependencies = [
"proc-macro2",
"quote",
"serde",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -11495,19 +11449,15 @@ dependencies = [
[[package]]
name = "serde_with"
-version = "3.14.0"
+version = "3.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5"
+checksum = "21e47d95bc83ed33b2ecf84f4187ad1ab9685d18ff28db000c99deac8ce180e3"
dependencies = [
- "base64 0.22.1",
+ "base64 0.21.7",
"chrono",
"hex",
"indexmap 1.9.3",
- "indexmap 2.10.0",
- "schemars 0.9.0",
- "schemars 1.0.3",
"serde",
- "serde_derive",
"serde_json",
"serde_with_macros",
"time",
@@ -11515,14 +11465,14 @@ dependencies = [
[[package]]
name = "serde_with_macros"
-version = "3.14.0"
+version = "3.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f"
+checksum = "ea3cee93715c2e266b9338b7544da68a9f24e227722ba482bd1c024367c77c65"
dependencies = [
"darling 0.20.11",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -11531,7 +11481,7 @@ version = "0.9.34+deprecated"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47"
dependencies = [
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"itoa",
"ryu",
"serde",
@@ -11596,7 +11546,7 @@ dependencies = [
"humantime",
"humantime-serde",
"hyper 1.6.0",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"influxdb_line_protocol",
"itertools 0.14.0",
"json5",
@@ -11788,6 +11738,12 @@ dependencies = [
"wide",
]
+[[package]]
+name = "simd-adler32"
+version = "0.3.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe"
+
[[package]]
name = "simd-json"
version = "0.15.1"
@@ -11920,7 +11876,7 @@ dependencies = [
"heck 0.5.0",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -12015,6 +11971,7 @@ dependencies = [
"datafusion-physical-expr",
"datafusion-sql",
"datatypes",
+ "either",
"hex",
"humantime",
"iso8601",
@@ -12026,7 +11983,7 @@ dependencies = [
"serde",
"serde_json",
"snafu 0.8.6",
- "sqlparser 0.55.0-greptime",
+ "sqlparser",
"sqlparser_derive 0.1.1",
"store-api",
"table",
@@ -12083,26 +12040,14 @@ dependencies = [
[[package]]
name = "sqlparser"
-version = "0.55.0-greptime"
-source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=39e4fc94c3c741981f77e9d63b5ce8c02e0a27ea#39e4fc94c3c741981f77e9d63b5ce8c02e0a27ea"
+version = "0.58.0"
+source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=4b519a5caa95472cc3988f5556813a583dd35af1#4b519a5caa95472cc3988f5556813a583dd35af1"
dependencies = [
"lazy_static",
"log",
"recursive",
"regex",
"serde",
- "sqlparser 0.55.0",
- "sqlparser_derive 0.3.0-greptime",
-]
-
-[[package]]
-name = "sqlparser"
-version = "0.55.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c4521174166bac1ff04fe16ef4524c70144cd29682a45978978ca3d7f4e0be11"
-dependencies = [
- "log",
- "recursive",
"sqlparser_derive 0.3.0",
]
@@ -12117,25 +12062,14 @@ dependencies = [
"syn 1.0.109",
]
-[[package]]
-name = "sqlparser_derive"
-version = "0.3.0-greptime"
-source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=39e4fc94c3c741981f77e9d63b5ce8c02e0a27ea#39e4fc94c3c741981f77e9d63b5ce8c02e0a27ea"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn 2.0.104",
-]
-
[[package]]
name = "sqlparser_derive"
version = "0.3.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "da5fc6819faabb412da764b99d3b713bb55083c11e7e0c00144d386cd6a1939c"
+source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=4b519a5caa95472cc3988f5556813a583dd35af1#4b519a5caa95472cc3988f5556813a583dd35af1"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -12170,7 +12104,7 @@ dependencies = [
"futures-util",
"hashbrown 0.15.4",
"hashlink",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"log",
"memchr",
"once_cell",
@@ -12198,7 +12132,7 @@ dependencies = [
"quote",
"sqlx-core",
"sqlx-macros-core",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -12221,7 +12155,7 @@ dependencies = [
"sqlx-mysql",
"sqlx-postgres",
"sqlx-sqlite",
- "syn 2.0.104",
+ "syn 2.0.106",
"tokio",
"url",
]
@@ -12424,7 +12358,7 @@ dependencies = [
"serde",
"serde_json",
"snafu 0.8.6",
- "sqlparser 0.55.0-greptime",
+ "sqlparser",
"strum 0.27.1",
"tokio",
"uuid",
@@ -12495,6 +12429,12 @@ dependencies = [
"strum_macros 0.25.3",
]
+[[package]]
+name = "strum"
+version = "0.26.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06"
+
[[package]]
name = "strum"
version = "0.27.1"
@@ -12514,7 +12454,20 @@ dependencies = [
"proc-macro2",
"quote",
"rustversion",
- "syn 2.0.104",
+ "syn 2.0.106",
+]
+
+[[package]]
+name = "strum_macros"
+version = "0.26.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be"
+dependencies = [
+ "heck 0.5.0",
+ "proc-macro2",
+ "quote",
+ "rustversion",
+ "syn 2.0.106",
]
[[package]]
@@ -12527,7 +12480,7 @@ dependencies = [
"proc-macro2",
"quote",
"rustversion",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -12573,12 +12526,12 @@ dependencies = [
"prost 0.13.5",
"prost-build 0.13.5",
"prost-types 0.13.5",
- "schemars 0.8.22",
+ "schemars",
"semver",
"serde",
"serde_json",
"serde_yaml",
- "syn 2.0.104",
+ "syn 2.0.106",
"typify 0.1.0",
"walkdir",
]
@@ -12598,12 +12551,12 @@ dependencies = [
"prost-build 0.13.5",
"prost-types 0.13.5",
"regress 0.10.3",
- "schemars 0.8.22",
+ "schemars",
"semver",
"serde",
"serde_json",
"serde_yaml",
- "syn 2.0.104",
+ "syn 2.0.106",
"typify 0.4.2",
"walkdir",
]
@@ -12650,9 +12603,9 @@ dependencies = [
[[package]]
name = "syn"
-version = "2.0.104"
+version = "2.0.106"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40"
+checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6"
dependencies = [
"proc-macro2",
"quote",
@@ -12682,7 +12635,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -12744,7 +12697,7 @@ dependencies = [
"serde",
"serde_json",
"snafu 0.8.6",
- "sqlparser 0.55.0-greptime",
+ "sqlparser",
"store-api",
"tokio",
"tokio-util",
@@ -12854,7 +12807,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d60769b80ad7953d8a7b2c70cdfe722bbcdcac6bccc8ac934c40c034d866fc18"
dependencies = [
"byteorder",
- "regex-syntax 0.8.5",
+ "regex-syntax 0.8.7",
"utf8-ranges",
]
@@ -13008,13 +12961,13 @@ dependencies = [
"rand 0.9.1",
"rand_chacha 0.9.0",
"reqwest",
- "schemars 0.8.22",
+ "schemars",
"serde",
"serde_json",
"serde_yaml",
"snafu 0.8.6",
"sql",
- "sqlparser 0.55.0-greptime",
+ "sqlparser",
"sqlx",
"store-api",
"strum 0.27.1",
@@ -13050,6 +13003,7 @@ dependencies = [
"common-query",
"common-recordbatch",
"common-runtime",
+ "common-stat",
"common-telemetry",
"common-test-util",
"common-time",
@@ -13145,7 +13099,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -13156,7 +13110,7 @@ checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -13336,7 +13290,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -13469,7 +13423,7 @@ version = "0.8.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362"
dependencies = [
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"serde",
"serde_spanned",
"toml_datetime",
@@ -13491,7 +13445,7 @@ version = "0.19.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421"
dependencies = [
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"toml_datetime",
"winnow 0.5.40",
]
@@ -13502,7 +13456,7 @@ version = "0.22.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a"
dependencies = [
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"serde",
"serde_spanned",
"toml_datetime",
@@ -13588,7 +13542,7 @@ dependencies = [
"prost-build 0.13.5",
"prost-types 0.13.5",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -13633,7 +13587,7 @@ dependencies = [
"futures-core",
"futures-util",
"hdrhistogram",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"pin-project-lite",
"slab",
"sync_wrapper 1.0.2",
@@ -13738,7 +13692,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -13862,7 +13816,7 @@ checksum = "35f5380909ffc31b4de4f4bdf96b877175a016aa2ca98cee39fcfd8c4d53d952"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -13896,11 +13850,11 @@ dependencies = [
"proc-macro2",
"quote",
"regress 0.9.1",
- "schemars 0.8.22",
+ "schemars",
"semver",
"serde",
"serde_json",
- "syn 2.0.104",
+ "syn 2.0.106",
"thiserror 1.0.69",
"unicode-ident",
]
@@ -13916,11 +13870,11 @@ dependencies = [
"proc-macro2",
"quote",
"regress 0.10.3",
- "schemars 0.8.22",
+ "schemars",
"semver",
"serde",
"serde_json",
- "syn 2.0.104",
+ "syn 2.0.106",
"thiserror 2.0.17",
"unicode-ident",
]
@@ -13933,12 +13887,12 @@ checksum = "f8e6491896e955692d68361c68db2b263e3bec317ec0b684e0e2fa882fb6e31e"
dependencies = [
"proc-macro2",
"quote",
- "schemars 0.8.22",
+ "schemars",
"semver",
"serde",
"serde_json",
"serde_tokenstream",
- "syn 2.0.104",
+ "syn 2.0.106",
"typify-impl 0.1.0",
]
@@ -13950,12 +13904,12 @@ checksum = "7560adf816a1e8dad7c63d8845ef6e31e673e39eab310d225636779230cbedeb"
dependencies = [
"proc-macro2",
"quote",
- "schemars 0.8.22",
+ "schemars",
"semver",
"serde",
"serde_json",
"serde_tokenstream",
- "syn 2.0.104",
+ "syn 2.0.106",
"typify-impl 0.4.2",
]
@@ -14100,13 +14054,14 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1"
[[package]]
name = "url"
-version = "2.5.4"
+version = "2.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60"
+checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b"
dependencies = [
"form_urlencoded",
"idna",
"percent-encoding",
+ "serde",
]
[[package]]
@@ -14141,9 +14096,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
[[package]]
name = "uuid"
-version = "1.17.0"
+version = "1.18.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d"
+checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2"
dependencies = [
"getrandom 0.3.3",
"js-sys",
@@ -14189,7 +14144,7 @@ dependencies = [
"proc-macro-crate 1.3.1",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
"variadics",
]
@@ -14274,7 +14229,7 @@ dependencies = [
"hostname 0.4.1",
"iana-time-zone",
"idna",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"indoc",
"influxdb-line-protocol",
"itertools 0.14.0",
@@ -14403,7 +14358,7 @@ dependencies = [
"log",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
"wasm-bindgen-shared",
]
@@ -14438,7 +14393,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
@@ -14584,7 +14539,7 @@ dependencies = [
"windows-collections",
"windows-core 0.61.2",
"windows-future",
- "windows-link",
+ "windows-link 0.1.3",
"windows-numerics",
]
@@ -14617,7 +14572,7 @@ checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3"
dependencies = [
"windows-implement 0.60.0",
"windows-interface 0.59.1",
- "windows-link",
+ "windows-link 0.1.3",
"windows-result 0.3.4",
"windows-strings",
]
@@ -14629,7 +14584,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e"
dependencies = [
"windows-core 0.61.2",
- "windows-link",
+ "windows-link 0.1.3",
"windows-threading",
]
@@ -14641,7 +14596,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -14652,7 +14607,7 @@ checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -14663,7 +14618,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -14674,7 +14629,7 @@ checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -14683,6 +14638,12 @@ version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a"
+[[package]]
+name = "windows-link"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5"
+
[[package]]
name = "windows-numerics"
version = "0.2.0"
@@ -14690,7 +14651,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9150af68066c4c5c07ddc0ce30421554771e528bde427614c61038bc2c92c2b1"
dependencies = [
"windows-core 0.61.2",
- "windows-link",
+ "windows-link 0.1.3",
]
[[package]]
@@ -14708,7 +14669,7 @@ version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6"
dependencies = [
- "windows-link",
+ "windows-link 0.1.3",
]
[[package]]
@@ -14717,7 +14678,7 @@ version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57"
dependencies = [
- "windows-link",
+ "windows-link 0.1.3",
]
[[package]]
@@ -14784,7 +14745,7 @@ version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b66463ad2e0ea3bbf808b7f1d371311c80e115c0b71d60efc142cafbcfb057a6"
dependencies = [
- "windows-link",
+ "windows-link 0.1.3",
]
[[package]]
@@ -15042,7 +15003,7 @@ checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
"synstructure",
]
@@ -15063,7 +15024,7 @@ checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -15083,7 +15044,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
"synstructure",
]
@@ -15104,7 +15065,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -15137,7 +15098,7 @@ checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
diff --git a/Cargo.toml b/Cargo.toml
index f500f70b0e..ebafce51ba 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -99,12 +99,12 @@ rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
ahash = { version = "0.8", features = ["compile-time-rng"] }
aquamarine = "0.6"
-arrow = { version = "56.0", features = ["prettyprint"] }
-arrow-array = { version = "56.0", default-features = false, features = ["chrono-tz"] }
-arrow-buffer = "56.0"
-arrow-flight = "56.0"
-arrow-ipc = { version = "56.0", default-features = false, features = ["lz4", "zstd"] }
-arrow-schema = { version = "56.0", features = ["serde"] }
+arrow = { version = "56.2", features = ["prettyprint"] }
+arrow-array = { version = "56.2", default-features = false, features = ["chrono-tz"] }
+arrow-buffer = "56.2"
+arrow-flight = "56.2"
+arrow-ipc = { version = "56.2", default-features = false, features = ["lz4", "zstd"] }
+arrow-schema = { version = "56.2", features = ["serde"] }
async-stream = "0.3"
async-trait = "0.1"
# Remember to update axum-extra, axum-macros when updating axum
@@ -123,18 +123,18 @@ clap = { version = "4.4", features = ["derive"] }
config = "0.13.0"
crossbeam-utils = "0.8"
dashmap = "6.1"
-datafusion = "49"
-datafusion-common = "49"
-datafusion-expr = "49"
-datafusion-functions = "49"
-datafusion-functions-aggregate-common = "49"
-datafusion-optimizer = "49"
-datafusion-orc = { git = "https://github.com/GreptimeTeam/datafusion-orc", rev = "a0a5f902158f153119316eaeec868cff3fc8a99d" }
-datafusion-pg-catalog = { git = "https://github.com/datafusion-contrib/datafusion-postgres", rev = "3d1b7c7d5b82dd49bafc2803259365e633f654fa" }
-datafusion-physical-expr = "49"
-datafusion-physical-plan = "49"
-datafusion-sql = "49"
-datafusion-substrait = "49"
+datafusion = "50"
+datafusion-common = "50"
+datafusion-expr = "50"
+datafusion-functions = "50"
+datafusion-functions-aggregate-common = "50"
+datafusion-optimizer = "50"
+datafusion-orc = "0.5"
+datafusion-pg-catalog = "0.11"
+datafusion-physical-expr = "50"
+datafusion-physical-plan = "50"
+datafusion-sql = "50"
+datafusion-substrait = "50"
deadpool = "0.12"
deadpool-postgres = "0.14"
derive_builder = "0.20"
@@ -147,7 +147,7 @@ etcd-client = { git = "https://github.com/GreptimeTeam/etcd-client", rev = "f62d
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
-greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "69a6089933daa573c96808ec4bbc48f447ec6e8c" }
+greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "14b9dc40bdc8288742b0cefc7bb024303b7429ef" }
hex = "0.4"
http = "1"
humantime = "2.1"
@@ -180,7 +180,7 @@ otel-arrow-rust = { git = "https://github.com/GreptimeTeam/otel-arrow", rev = "2
"server",
] }
parking_lot = "0.12"
-parquet = { version = "56.0", default-features = false, features = ["arrow", "async", "object_store"] }
+parquet = { version = "56.2", default-features = false, features = ["arrow", "async", "object_store"] }
paste = "1.0"
pin-project = "1.0"
pretty_assertions = "1.4.0"
@@ -191,7 +191,7 @@ prost-types = "0.13"
raft-engine = { version = "0.4.1", default-features = false }
rand = "0.9"
ratelimit = "0.10"
-regex = "1.8"
+regex = "1.12"
regex-automata = "0.4"
reqwest = { version = "0.12", default-features = false, features = [
"json",
@@ -217,10 +217,7 @@ simd-json = "0.15"
similar-asserts = "1.6.0"
smallvec = { version = "1", features = ["serde"] }
snafu = "0.8"
-sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "39e4fc94c3c741981f77e9d63b5ce8c02e0a27ea", features = [
- "visitor",
- "serde",
-] } # branch = "v0.55.x"
+sqlparser = { version = "0.58.0", default-features = false, features = ["std", "visitor", "serde"] }
sqlx = { version = "0.8", features = [
"runtime-tokio-rustls",
"mysql",
@@ -322,16 +319,19 @@ git = "https://github.com/GreptimeTeam/greptime-meter.git"
rev = "5618e779cf2bb4755b499c630fba4c35e91898cb"
[patch.crates-io]
-datafusion = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
-datafusion-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
-datafusion-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
-datafusion-functions = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
-datafusion-functions-aggregate-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
-datafusion-optimizer = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
-datafusion-physical-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
-datafusion-physical-plan = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
-datafusion-sql = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
-datafusion-substrait = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
+datafusion = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
+datafusion-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
+datafusion-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
+datafusion-functions = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
+datafusion-functions-aggregate-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
+datafusion-optimizer = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
+datafusion-physical-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
+datafusion-physical-expr-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
+datafusion-physical-plan = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
+datafusion-datasource = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
+datafusion-sql = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
+datafusion-substrait = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
+sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "4b519a5caa95472cc3988f5556813a583dd35af1" } # branch = "v0.58.x"
[profile.release]
debug = 1
diff --git a/config/config.md b/config/config.md
index 46a0aee1a7..72d48b5bcb 100644
--- a/config/config.md
+++ b/config/config.md
@@ -25,12 +25,14 @@
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.
The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
Set to 0 to disable limit. |
+| `http.max_total_body_memory` | String | Unset | Maximum total memory for all concurrent HTTP request bodies.
Set to 0 to disable the limit. Default: "0" (unlimited) |
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default
This allows browser to access http APIs without CORS restrictions |
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
| `http.prom_validation_mode` | String | `strict` | Whether to enable validation for Prometheus remote write requests.
Available options:
- strict: deny invalid UTF-8 strings (default).
- lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).
- unchecked: do not valid strings. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
+| `grpc.max_total_message_memory` | String | Unset | Maximum total memory for all concurrent gRPC request messages.
Set to 0 to disable the limit. Default: "0" (unlimited) |
| `grpc.max_connection_age` | String | Unset | The maximum connection age for gRPC connection.
The value can be a human-readable time string. For example: `10m` for ten minutes or `1h` for one hour.
Refer to https://grpc.io/docs/guides/keepalive/ for more details. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. |
@@ -235,6 +237,7 @@
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.
The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
Set to 0 to disable limit. |
+| `http.max_total_body_memory` | String | Unset | Maximum total memory for all concurrent HTTP request bodies.
Set to 0 to disable the limit. Default: "0" (unlimited) |
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default
This allows browser to access http APIs without CORS restrictions |
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
| `http.prom_validation_mode` | String | `strict` | Whether to enable validation for Prometheus remote write requests.
Available options:
- strict: deny invalid UTF-8 strings (default).
- lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).
- unchecked: do not valid strings. |
@@ -242,6 +245,7 @@
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.server_addr` | String | `127.0.0.1:4001` | The address advertised to the metasrv, and used for connections from outside the host.
If left empty or unset, the server will automatically use the IP address of the first network interface
on the host, with the same port number as the one specified in `grpc.bind_addr`. |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
+| `grpc.max_total_message_memory` | String | Unset | Maximum total memory for all concurrent gRPC request messages.
Set to 0 to disable the limit. Default: "0" (unlimited) |
| `grpc.flight_compression` | String | `arrow_ipc` | Compression mode for frontend side Arrow IPC service. Available options:
- `none`: disable all compression
- `transport`: only enable gRPC transport compression (zstd)
- `arrow_ipc`: only enable Arrow IPC compression (lz4)
- `all`: enable all compression.
Default to `none` |
| `grpc.max_connection_age` | String | Unset | The maximum connection age for gRPC connection.
The value can be a human-readable time string. For example: `10m` for ten minutes or `1h` for one hour.
Refer to https://grpc.io/docs/guides/keepalive/ for more details. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
diff --git a/config/frontend.example.toml b/config/frontend.example.toml
index b26d88323e..9ffcdad540 100644
--- a/config/frontend.example.toml
+++ b/config/frontend.example.toml
@@ -31,6 +31,10 @@ timeout = "0s"
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
body_limit = "64MB"
+## Maximum total memory for all concurrent HTTP request bodies.
+## Set to 0 to disable the limit. Default: "0" (unlimited)
+## @toml2docs:none-default
+#+ max_total_body_memory = "1GB"
## HTTP CORS support, it's turned on by default
## This allows browser to access http APIs without CORS restrictions
enable_cors = true
@@ -54,6 +58,10 @@ bind_addr = "127.0.0.1:4001"
server_addr = "127.0.0.1:4001"
## The number of server worker threads.
runtime_size = 8
+## Maximum total memory for all concurrent gRPC request messages.
+## Set to 0 to disable the limit. Default: "0" (unlimited)
+## @toml2docs:none-default
+#+ max_total_message_memory = "1GB"
## Compression mode for frontend side Arrow IPC service. Available options:
## - `none`: disable all compression
## - `transport`: only enable gRPC transport compression (zstd)
diff --git a/config/standalone.example.toml b/config/standalone.example.toml
index 5fae0f444f..744dbbe751 100644
--- a/config/standalone.example.toml
+++ b/config/standalone.example.toml
@@ -36,6 +36,10 @@ timeout = "0s"
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
body_limit = "64MB"
+## Maximum total memory for all concurrent HTTP request bodies.
+## Set to 0 to disable the limit. Default: "0" (unlimited)
+## @toml2docs:none-default
+#+ max_total_body_memory = "1GB"
## HTTP CORS support, it's turned on by default
## This allows browser to access http APIs without CORS restrictions
enable_cors = true
@@ -56,6 +60,10 @@ prom_validation_mode = "strict"
bind_addr = "127.0.0.1:4001"
## The number of server worker threads.
runtime_size = 8
+## Maximum total memory for all concurrent gRPC request messages.
+## Set to 0 to disable the limit. Default: "0" (unlimited)
+## @toml2docs:none-default
+#+ max_total_message_memory = "1GB"
## The maximum connection age for gRPC connection.
## The value can be a human-readable time string. For example: `10m` for ten minutes or `1h` for one hour.
## Refer to https://grpc.io/docs/guides/keepalive/ for more details.
diff --git a/src/api/src/v1/column_def.rs b/src/api/src/v1/column_def.rs
index 5be3d5c196..912b7ee13e 100644
--- a/src/api/src/v1/column_def.rs
+++ b/src/api/src/v1/column_def.rs
@@ -16,8 +16,8 @@ use std::collections::HashMap;
use datatypes::schema::{
COMMENT_KEY, ColumnDefaultConstraint, ColumnSchema, FULLTEXT_KEY, FulltextAnalyzer,
- FulltextBackend, FulltextOptions, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY, SkippingIndexOptions,
- SkippingIndexType,
+ FulltextBackend, FulltextOptions, INVERTED_INDEX_KEY, JSON_STRUCTURE_SETTINGS_KEY,
+ SKIPPING_INDEX_KEY, SkippingIndexOptions, SkippingIndexType,
};
use greptime_proto::v1::{
Analyzer, FulltextBackend as PbFulltextBackend, SkippingIndexType as PbSkippingIndexType,
@@ -68,6 +68,9 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result {
if let Some(skipping_index) = options.options.get(SKIPPING_INDEX_GRPC_KEY) {
metadata.insert(SKIPPING_INDEX_KEY.to_string(), skipping_index.to_owned());
}
+ if let Some(settings) = options.options.get(JSON_STRUCTURE_SETTINGS_KEY) {
+ metadata.insert(JSON_STRUCTURE_SETTINGS_KEY.to_string(), settings.clone());
+ }
}
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
@@ -139,6 +142,11 @@ pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option 0 {
self.active_times.push(Some(
@@ -269,8 +292,10 @@ impl InformationSchemaClusterInfoBuilder {
Arc::new(self.peer_types.finish()),
Arc::new(self.peer_addrs.finish()),
Arc::new(self.peer_hostnames.finish()),
- Arc::new(self.cpus.finish()),
- Arc::new(self.memory_bytes.finish()),
+ Arc::new(self.total_cpu_millicores.finish()),
+ Arc::new(self.total_memory_bytes.finish()),
+ Arc::new(self.cpu_usage_millicores.finish()),
+ Arc::new(self.memory_usage_bytes.finish()),
Arc::new(self.versions.finish()),
Arc::new(self.git_commits.finish()),
Arc::new(self.start_times.finish()),
diff --git a/src/catalog/src/system_schema/pg_catalog.rs b/src/catalog/src/system_schema/pg_catalog.rs
index b3ddec5b3b..08aad2d6dd 100644
--- a/src/catalog/src/system_schema/pg_catalog.rs
+++ b/src/catalog/src/system_schema/pg_catalog.rs
@@ -27,6 +27,7 @@ use datafusion::error::DataFusionError;
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion_pg_catalog::pg_catalog::catalog_info::CatalogInfo;
+use datafusion_pg_catalog::pg_catalog::context::EmptyContextProvider;
use datafusion_pg_catalog::pg_catalog::{
PG_CATALOG_TABLES, PgCatalogSchemaProvider, PgCatalogStaticTables, PgCatalogTable,
};
@@ -44,7 +45,7 @@ use crate::system_schema::{
/// [`PGCatalogProvider`] is the provider for a schema named `pg_catalog`, it is not a catalog.
pub struct PGCatalogProvider {
catalog_name: String,
- inner: PgCatalogSchemaProvider,
+ inner: PgCatalogSchemaProvider,
tables: HashMap,
table_ids: HashMap<&'static str, u32>,
}
@@ -69,6 +70,7 @@ impl PGCatalogProvider {
catalog_manager,
},
Arc::new(static_tables),
+ EmptyContextProvider,
)
.expect("Failed to initialize PgCatalogSchemaProvider");
diff --git a/src/cmd/src/flownode.rs b/src/cmd/src/flownode.rs
index 500e9bfa89..07f3279724 100644
--- a/src/cmd/src/flownode.rs
+++ b/src/cmd/src/flownode.rs
@@ -30,6 +30,7 @@ use common_meta::heartbeat::handler::invalidate_table_cache::InvalidateCacheHand
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
use common_meta::key::TableMetadataManager;
use common_meta::key::flow::FlowMetadataManager;
+use common_stat::ResourceStatImpl;
use common_telemetry::info;
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
use common_version::{short_version, verbose_version};
@@ -372,11 +373,15 @@ impl StartCommand {
Arc::new(InvalidateCacheHandler::new(layered_cache_registry.clone())),
]);
+ let mut resource_stat = ResourceStatImpl::default();
+ resource_stat.start_collect_cpu_usage();
+
let heartbeat_task = flow::heartbeat::HeartbeatTask::new(
&opts,
meta_client.clone(),
opts.heartbeat.clone(),
Arc::new(executor),
+ Arc::new(resource_stat),
);
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(cached_meta_backend.clone()));
diff --git a/src/cmd/src/frontend.rs b/src/cmd/src/frontend.rs
index 4c72021a47..fda6d968bf 100644
--- a/src/cmd/src/frontend.rs
+++ b/src/cmd/src/frontend.rs
@@ -30,6 +30,7 @@ use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
use common_meta::heartbeat::handler::HandlerGroupExecutor;
use common_meta::heartbeat::handler::invalidate_table_cache::InvalidateCacheHandler;
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
+use common_stat::ResourceStatImpl;
use common_telemetry::info;
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
use common_time::timezone::set_default_timezone;
@@ -421,11 +422,15 @@ impl StartCommand {
Arc::new(InvalidateCacheHandler::new(layered_cache_registry.clone())),
]);
+ let mut resource_stat = ResourceStatImpl::default();
+ resource_stat.start_collect_cpu_usage();
+
let heartbeat_task = HeartbeatTask::new(
&opts,
meta_client.clone(),
opts.heartbeat.clone(),
Arc::new(executor),
+ Arc::new(resource_stat),
);
let heartbeat_task = Some(heartbeat_task);
diff --git a/src/common/config/Cargo.toml b/src/common/config/Cargo.toml
index 1d2b21602f..b45c03a6c3 100644
--- a/src/common/config/Cargo.toml
+++ b/src/common/config/Cargo.toml
@@ -11,7 +11,6 @@ workspace = true
common-base.workspace = true
common-error.workspace = true
common-macro.workspace = true
-common-stat.workspace = true
config.workspace = true
humantime-serde.workspace = true
object-store.workspace = true
diff --git a/src/common/config/src/lib.rs b/src/common/config/src/lib.rs
index b806924217..cc25ebce16 100644
--- a/src/common/config/src/lib.rs
+++ b/src/common/config/src/lib.rs
@@ -14,7 +14,6 @@
pub mod config;
pub mod error;
-pub mod utils;
use std::time::Duration;
diff --git a/src/common/config/src/utils.rs b/src/common/config/src/utils.rs
deleted file mode 100644
index 1bc986b77e..0000000000
--- a/src/common/config/src/utils.rs
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use common_base::readable_size::ReadableSize;
-use common_stat::{get_total_cpu_millicores, get_total_memory_readable};
-
-/// `ResourceSpec` holds the static resource specifications of a node,
-/// such as CPU cores and memory capacity. These values are fixed
-/// at startup and do not change dynamically during runtime.
-#[derive(Debug, Clone, Copy)]
-pub struct ResourceSpec {
- pub cpus: i64,
- pub memory: Option,
-}
-
-impl Default for ResourceSpec {
- fn default() -> Self {
- Self {
- cpus: get_total_cpu_millicores(),
- memory: get_total_memory_readable(),
- }
- }
-}
diff --git a/src/common/datasource/Cargo.toml b/src/common/datasource/Cargo.toml
index 303d05ceb1..964f41736c 100644
--- a/src/common/datasource/Cargo.toml
+++ b/src/common/datasource/Cargo.toml
@@ -36,7 +36,7 @@ object_store_opendal.workspace = true
orc-rust = { version = "0.6.3", default-features = false, features = ["async"] }
parquet.workspace = true
paste.workspace = true
-regex = "1.7"
+regex.workspace = true
serde.workspace = true
snafu.workspace = true
strum.workspace = true
diff --git a/src/common/datasource/src/file_format.rs b/src/common/datasource/src/file_format.rs
index b6d4d6c30a..7c4e8d6c88 100644
--- a/src/common/datasource/src/file_format.rs
+++ b/src/common/datasource/src/file_format.rs
@@ -33,7 +33,7 @@ use bytes::{Buf, Bytes};
use datafusion::datasource::physical_plan::FileOpenFuture;
use datafusion::error::{DataFusionError, Result as DataFusionResult};
use datafusion::physical_plan::SendableRecordBatchStream;
-use futures::StreamExt;
+use futures::{StreamExt, TryStreamExt};
use object_store::ObjectStore;
use snafu::ResultExt;
use tokio_util::compat::FuturesAsyncWriteCompatExt;
@@ -179,7 +179,7 @@ pub fn open_with_decoder DataFusionResult>(
Poll::Ready(decoder.flush().transpose())
});
- Ok(stream.boxed())
+ Ok(stream.map_err(Into::into).boxed())
}))
}
diff --git a/src/common/function/Cargo.toml b/src/common/function/Cargo.toml
index d5b928e2a1..1d272f5d04 100644
--- a/src/common/function/Cargo.toml
+++ b/src/common/function/Cargo.toml
@@ -51,6 +51,7 @@ nalgebra.workspace = true
num = "0.4"
num-traits = "0.2"
paste.workspace = true
+regex.workspace = true
s2 = { version = "0.0.12", optional = true }
serde.workspace = true
serde_json.workspace = true
diff --git a/src/common/function/src/aggrs/aggr_wrapper.rs b/src/common/function/src/aggrs/aggr_wrapper.rs
index 4ee8190f2d..ed691296ee 100644
--- a/src/common/function/src/aggrs/aggr_wrapper.rs
+++ b/src/common/function/src/aggrs/aggr_wrapper.rs
@@ -22,6 +22,7 @@
//! `foo_merge`'s input arg is the same as `foo_state`'s output, and its output is the same as `foo`'s input.
//!
+use std::hash::{Hash, Hasher};
use std::sync::Arc;
use arrow::array::StructArray;
@@ -272,7 +273,7 @@ impl StateMergeHelper {
}
/// Wrapper to make an aggregate function out of a state function.
-#[derive(Debug, Clone, PartialEq, Eq)]
+#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct StateWrapper {
inner: AggregateUDF,
name: String,
@@ -616,6 +617,20 @@ impl AggregateUDFImpl for MergeWrapper {
}
}
+impl PartialEq for MergeWrapper {
+ fn eq(&self, other: &Self) -> bool {
+ self.inner == other.inner
+ }
+}
+
+impl Eq for MergeWrapper {}
+
+impl Hash for MergeWrapper {
+ fn hash(&self, state: &mut H) {
+ self.inner.hash(state);
+ }
+}
+
/// The merge accumulator, which modify `update_batch`'s behavior to accept one struct array which
/// include the state fields of original aggregate function, and merge said states into original accumulator
/// the output is the same as original aggregate function
diff --git a/src/common/function/src/aggrs/aggr_wrapper/tests.rs b/src/common/function/src/aggrs/aggr_wrapper/tests.rs
index d24cdd8475..97a5a792d9 100644
--- a/src/common/function/src/aggrs/aggr_wrapper/tests.rs
+++ b/src/common/function/src/aggrs/aggr_wrapper/tests.rs
@@ -39,8 +39,7 @@ use datafusion::prelude::SessionContext;
use datafusion_common::arrow::array::AsArray;
use datafusion_common::arrow::datatypes::{Float64Type, UInt64Type};
use datafusion_common::{Column, TableReference};
-use datafusion_expr::expr::AggregateFunction;
-use datafusion_expr::sqlparser::ast::NullTreatment;
+use datafusion_expr::expr::{AggregateFunction, NullTreatment};
use datafusion_expr::{
Aggregate, ColumnarValue, Expr, LogicalPlan, ScalarFunctionArgs, SortExpr, TableScan, lit,
};
diff --git a/src/common/function/src/aggrs/count_hash.rs b/src/common/function/src/aggrs/count_hash.rs
index ded88107e6..7cc594f2e3 100644
--- a/src/common/function/src/aggrs/count_hash.rs
+++ b/src/common/function/src/aggrs/count_hash.rs
@@ -68,7 +68,7 @@ impl CountHash {
}
}
-#[derive(Debug, Clone)]
+#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub struct CountHash {
signature: Signature,
}
diff --git a/src/common/function/src/function_registry.rs b/src/common/function/src/function_registry.rs
index 75bb71c63a..e51dcf4cb8 100644
--- a/src/common/function/src/function_registry.rs
+++ b/src/common/function/src/function_registry.rs
@@ -34,6 +34,7 @@ use crate::scalars::json::JsonFunction;
use crate::scalars::matches::MatchesFunction;
use crate::scalars::matches_term::MatchesTermFunction;
use crate::scalars::math::MathFunction;
+use crate::scalars::string::register_string_functions;
use crate::scalars::timestamp::TimestampFunction;
use crate::scalars::uddsketch_calc::UddSketchCalcFunction;
use crate::scalars::vector::VectorFunction as VectorScalarFunction;
@@ -154,6 +155,9 @@ pub static FUNCTION_REGISTRY: LazyLock> = LazyLock::new(||
// Json related functions
JsonFunction::register(&function_registry);
+ // String related functions
+ register_string_functions(&function_registry);
+
// Vector related functions
VectorScalarFunction::register(&function_registry);
VectorAggrFunction::register(&function_registry);
diff --git a/src/common/function/src/scalars.rs b/src/common/function/src/scalars.rs
index 6f93f2741d..9a8c9cc3a0 100644
--- a/src/common/function/src/scalars.rs
+++ b/src/common/function/src/scalars.rs
@@ -20,6 +20,7 @@ pub mod json;
pub mod matches;
pub mod matches_term;
pub mod math;
+pub(crate) mod string;
pub mod vector;
pub(crate) mod hll_count;
diff --git a/src/common/function/src/scalars/date/date_format.rs b/src/common/function/src/scalars/date/date_format.rs
index 0e321c957e..dfa5a444ca 100644
--- a/src/common/function/src/scalars/date/date_format.rs
+++ b/src/common/function/src/scalars/date/date_format.rs
@@ -20,7 +20,9 @@ use common_query::error;
use common_time::{Date, Timestamp};
use datafusion_common::DataFusionError;
use datafusion_common::arrow::array::{Array, AsArray, StringViewBuilder};
-use datafusion_common::arrow::datatypes::{ArrowTimestampType, DataType, Date32Type, TimeUnit};
+use datafusion_common::arrow::datatypes::{
+ ArrowTimestampType, DataType, Date32Type, Date64Type, TimeUnit,
+};
use datafusion_expr::{ColumnarValue, ScalarFunctionArgs, Signature};
use snafu::ResultExt;
@@ -40,6 +42,7 @@ impl Default for DateFormatFunction {
signature: helper::one_of_sigs2(
vec![
DataType::Date32,
+ DataType::Date64,
DataType::Timestamp(TimeUnit::Second, None),
DataType::Timestamp(TimeUnit::Millisecond, None),
DataType::Timestamp(TimeUnit::Microsecond, None),
@@ -115,6 +118,29 @@ impl Function for DateFormatFunction {
builder.append_option(result.as_deref());
}
}
+ DataType::Date64 => {
+ let left = left.as_primitive::();
+ for i in 0..size {
+ let date = left.is_valid(i).then(|| {
+ let ms = left.value(i);
+ Timestamp::new_millisecond(ms)
+ });
+ let format = formats.is_valid(i).then(|| formats.value(i));
+
+ let result = match (date, format) {
+ (Some(ts), Some(fmt)) => {
+ Some(ts.as_formatted_string(fmt, Some(timezone)).map_err(|e| {
+ DataFusionError::Execution(format!(
+ "cannot format {ts:?} as '{fmt}': {e}"
+ ))
+ })?)
+ }
+ _ => None,
+ };
+
+ builder.append_option(result.as_deref());
+ }
+ }
x => {
return Err(DataFusionError::Execution(format!(
"unsupported input data type {x}"
@@ -137,7 +163,9 @@ mod tests {
use std::sync::Arc;
use arrow_schema::Field;
- use datafusion_common::arrow::array::{Date32Array, StringArray, TimestampSecondArray};
+ use datafusion_common::arrow::array::{
+ Date32Array, Date64Array, StringArray, TimestampSecondArray,
+ };
use datafusion_common::config::ConfigOptions;
use datafusion_expr::{TypeSignature, Volatility};
@@ -166,7 +194,7 @@ mod tests {
Signature {
type_signature: TypeSignature::OneOf(sigs),
volatility: Volatility::Immutable
- } if sigs.len() == 5));
+ } if sigs.len() == 6));
}
#[test]
@@ -213,6 +241,50 @@ mod tests {
}
}
+ #[test]
+ fn test_date64_date_format() {
+ let f = DateFormatFunction::default();
+
+ let dates = vec![Some(123000), None, Some(42000), None];
+ let formats = vec![
+ "%Y-%m-%d %T.%3f",
+ "%Y-%m-%d %T.%3f",
+ "%Y-%m-%d %T.%3f",
+ "%Y-%m-%d %T.%3f",
+ ];
+ let results = [
+ Some("1970-01-01 00:02:03.000"),
+ None,
+ Some("1970-01-01 00:00:42.000"),
+ None,
+ ];
+
+ let mut config_options = ConfigOptions::default();
+ config_options.extensions.insert(FunctionContext::default());
+ let config_options = Arc::new(config_options);
+
+ let args = ScalarFunctionArgs {
+ args: vec![
+ ColumnarValue::Array(Arc::new(Date64Array::from(dates))),
+ ColumnarValue::Array(Arc::new(StringArray::from_iter_values(formats))),
+ ],
+ arg_fields: vec![],
+ number_rows: 4,
+ return_field: Arc::new(Field::new("x", DataType::Utf8View, false)),
+ config_options,
+ };
+ let result = f
+ .invoke_with_args(args)
+ .and_then(|x| x.to_array(4))
+ .unwrap();
+ let vector = result.as_string_view();
+
+ assert_eq!(4, vector.len());
+ for (actual, expect) in vector.iter().zip(results) {
+ assert_eq!(actual, expect);
+ }
+ }
+
#[test]
fn test_date_date_format() {
let f = DateFormatFunction::default();
diff --git a/src/common/function/src/scalars/geo/geohash.rs b/src/common/function/src/scalars/geo/geohash.rs
index 2a9deddca6..90bb958246 100644
--- a/src/common/function/src/scalars/geo/geohash.rs
+++ b/src/common/function/src/scalars/geo/geohash.rs
@@ -76,7 +76,7 @@ impl Function for GeohashFunction {
}
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result {
- Ok(DataType::Utf8)
+ Ok(DataType::Utf8View)
}
fn signature(&self) -> &Signature {
@@ -176,7 +176,7 @@ impl Function for GeohashNeighboursFunction {
Ok(DataType::List(Arc::new(Field::new(
"item",
DataType::Utf8View,
- false,
+ true,
))))
}
diff --git a/src/common/function/src/scalars/geo/h3.rs b/src/common/function/src/scalars/geo/h3.rs
index d90eed8143..c6630525df 100644
--- a/src/common/function/src/scalars/geo/h3.rs
+++ b/src/common/function/src/scalars/geo/h3.rs
@@ -355,9 +355,9 @@ impl Function for H3CellCenterLatLng {
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result {
Ok(DataType::List(Arc::new(Field::new(
- "x",
+ "item",
DataType::Float64,
- false,
+ true,
))))
}
diff --git a/src/common/function/src/scalars/string.rs b/src/common/function/src/scalars/string.rs
new file mode 100644
index 0000000000..95c6201ee2
--- /dev/null
+++ b/src/common/function/src/scalars/string.rs
@@ -0,0 +1,26 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! String scalar functions
+
+mod regexp_extract;
+
+pub(crate) use regexp_extract::RegexpExtractFunction;
+
+use crate::function_registry::FunctionRegistry;
+
+/// Register all string functions
+pub fn register_string_functions(registry: &FunctionRegistry) {
+ RegexpExtractFunction::register(registry);
+}
diff --git a/src/common/function/src/scalars/string/regexp_extract.rs b/src/common/function/src/scalars/string/regexp_extract.rs
new file mode 100644
index 0000000000..bc78c4df74
--- /dev/null
+++ b/src/common/function/src/scalars/string/regexp_extract.rs
@@ -0,0 +1,339 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Implementation of REGEXP_EXTRACT function
+use std::fmt;
+use std::sync::Arc;
+
+use datafusion_common::DataFusionError;
+use datafusion_common::arrow::array::{Array, AsArray, LargeStringBuilder};
+use datafusion_common::arrow::compute::cast;
+use datafusion_common::arrow::datatypes::DataType;
+use datafusion_expr::{ColumnarValue, ScalarFunctionArgs, Signature, TypeSignature, Volatility};
+use regex::{Regex, RegexBuilder};
+
+use crate::function::Function;
+use crate::function_registry::FunctionRegistry;
+
+const NAME: &str = "regexp_extract";
+
+// Safety limits
+const MAX_REGEX_SIZE: usize = 1024 * 1024; // compiled regex heap cap
+const MAX_DFA_SIZE: usize = 2 * 1024 * 1024; // lazy DFA cap
+const MAX_TOTAL_RESULT_SIZE: usize = 64 * 1024 * 1024; // total batch cap
+const MAX_SINGLE_MATCH: usize = 1024 * 1024; // per-row cap
+const MAX_PATTERN_LEN: usize = 10_000; // pattern text length cap
+
+/// REGEXP_EXTRACT function implementation
+/// Extracts the first substring matching the given regular expression pattern.
+/// If no match is found, returns NULL.
+///
+#[derive(Debug)]
+pub struct RegexpExtractFunction {
+ signature: Signature,
+}
+
+impl RegexpExtractFunction {
+ pub fn register(registry: &FunctionRegistry) {
+ registry.register_scalar(RegexpExtractFunction::default());
+ }
+}
+
+impl Default for RegexpExtractFunction {
+ fn default() -> Self {
+ Self {
+ signature: Signature::one_of(
+ vec![
+ TypeSignature::Exact(vec![DataType::Utf8View, DataType::Utf8]),
+ TypeSignature::Exact(vec![DataType::Utf8View, DataType::Utf8View]),
+ TypeSignature::Exact(vec![DataType::Utf8, DataType::Utf8View]),
+ TypeSignature::Exact(vec![DataType::LargeUtf8, DataType::Utf8View]),
+ TypeSignature::Exact(vec![DataType::Utf8View, DataType::LargeUtf8]),
+ TypeSignature::Exact(vec![DataType::Utf8, DataType::Utf8]),
+ TypeSignature::Exact(vec![DataType::LargeUtf8, DataType::Utf8]),
+ TypeSignature::Exact(vec![DataType::Utf8, DataType::LargeUtf8]),
+ TypeSignature::Exact(vec![DataType::LargeUtf8, DataType::LargeUtf8]),
+ ],
+ Volatility::Immutable,
+ ),
+ }
+ }
+}
+
+impl fmt::Display for RegexpExtractFunction {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "{}", NAME.to_ascii_uppercase())
+ }
+}
+
+impl Function for RegexpExtractFunction {
+ fn name(&self) -> &str {
+ NAME
+ }
+
+ // Always return LargeUtf8 for simplicity and safety
+ fn return_type(&self, _: &[DataType]) -> datafusion_common::Result {
+ Ok(DataType::LargeUtf8)
+ }
+
+ fn signature(&self) -> &Signature {
+ &self.signature
+ }
+
+ fn invoke_with_args(
+ &self,
+ args: ScalarFunctionArgs,
+ ) -> datafusion_common::Result {
+ if args.args.len() != 2 {
+ return Err(DataFusionError::Execution(
+ "REGEXP_EXTRACT requires exactly two arguments (text, pattern)".to_string(),
+ ));
+ }
+
+ // Keep original ColumnarValue variants for scalar-pattern fast path
+ let pattern_is_scalar = matches!(args.args[1], ColumnarValue::Scalar(_));
+
+ let arrays = ColumnarValue::values_to_arrays(&args.args)?;
+ let text_array = &arrays[0];
+ let pattern_array = &arrays[1];
+
+ // Cast both to LargeUtf8 for uniform access (supports Utf8/Utf8View/Dictionary)
+ let text_large = cast(text_array.as_ref(), &DataType::LargeUtf8).map_err(|e| {
+ DataFusionError::Execution(format!("REGEXP_EXTRACT: text cast failed: {e}"))
+ })?;
+ let pattern_large = cast(pattern_array.as_ref(), &DataType::LargeUtf8).map_err(|e| {
+ DataFusionError::Execution(format!("REGEXP_EXTRACT: pattern cast failed: {e}"))
+ })?;
+
+ let text = text_large.as_string::();
+ let pattern = pattern_large.as_string::();
+ let len = text.len();
+
+ // Pre-size result builder with conservative estimate
+ let mut estimated_total = 0usize;
+ for i in 0..len {
+ if !text.is_null(i) {
+ estimated_total = estimated_total.saturating_add(text.value_length(i) as usize);
+ if estimated_total > MAX_TOTAL_RESULT_SIZE {
+ return Err(DataFusionError::ResourcesExhausted(format!(
+ "REGEXP_EXTRACT total output exceeds {} bytes",
+ MAX_TOTAL_RESULT_SIZE
+ )));
+ }
+ }
+ }
+ let mut builder = LargeStringBuilder::with_capacity(len, estimated_total);
+
+ // Fast path: if pattern is scalar, compile once
+ let compiled_scalar: Option = if pattern_is_scalar && len > 0 && !pattern.is_null(0)
+ {
+ Some(compile_regex_checked(pattern.value(0))?)
+ } else {
+ None
+ };
+
+ for i in 0..len {
+ if text.is_null(i) || pattern.is_null(i) {
+ builder.append_null();
+ continue;
+ }
+
+ let s = text.value(i);
+ let pat = pattern.value(i);
+
+ // Compile or reuse regex
+ let re = if let Some(ref compiled) = compiled_scalar {
+ compiled
+ } else {
+ // TODO: For performance-critical applications with repeating patterns,
+ // consider adding a small LRU cache here
+ &compile_regex_checked(pat)?
+ };
+
+ // First match only
+ if let Some(m) = re.find(s) {
+ let m_str = m.as_str();
+ if m_str.len() > MAX_SINGLE_MATCH {
+ return Err(DataFusionError::Execution(
+ "REGEXP_EXTRACT match exceeds per-row limit (1MB)".to_string(),
+ ));
+ }
+ builder.append_value(m_str);
+ } else {
+ builder.append_null();
+ }
+ }
+
+ Ok(ColumnarValue::Array(Arc::new(builder.finish())))
+ }
+}
+
+// Compile a regex with safety checks
+fn compile_regex_checked(pattern: &str) -> datafusion_common::Result {
+ if pattern.len() > MAX_PATTERN_LEN {
+ return Err(DataFusionError::Execution(format!(
+ "REGEXP_EXTRACT pattern too long (> {} chars)",
+ MAX_PATTERN_LEN
+ )));
+ }
+ RegexBuilder::new(pattern)
+ .size_limit(MAX_REGEX_SIZE)
+ .dfa_size_limit(MAX_DFA_SIZE)
+ .build()
+ .map_err(|e| {
+ DataFusionError::Execution(format!("REGEXP_EXTRACT invalid pattern '{}': {e}", pattern))
+ })
+}
+
+#[cfg(test)]
+mod tests {
+ use datafusion_common::arrow::array::StringArray;
+ use datafusion_common::arrow::datatypes::Field;
+ use datafusion_expr::ScalarFunctionArgs;
+
+ use super::*;
+
+ #[test]
+ fn test_regexp_extract_function_basic() {
+ let text_array = Arc::new(StringArray::from(vec!["version 1.2.3", "no match here"]));
+ let pattern_array = Arc::new(StringArray::from(vec!["\\d+\\.\\d+\\.\\d+", "\\d+"]));
+
+ let args = ScalarFunctionArgs {
+ args: vec![
+ ColumnarValue::Array(text_array),
+ ColumnarValue::Array(pattern_array),
+ ],
+ arg_fields: vec![
+ Arc::new(Field::new("arg_0", DataType::Utf8, false)),
+ Arc::new(Field::new("arg_1", DataType::Utf8, false)),
+ ],
+ return_field: Arc::new(Field::new("result", DataType::LargeUtf8, true)),
+ number_rows: 2,
+ config_options: Arc::new(datafusion_common::config::ConfigOptions::default()),
+ };
+
+ let function = RegexpExtractFunction::default();
+ let result = function.invoke_with_args(args).unwrap();
+
+ if let ColumnarValue::Array(array) = result {
+ let string_array = array.as_string::();
+ assert_eq!(string_array.value(0), "1.2.3");
+ assert!(string_array.is_null(1)); // no match should return NULL
+ } else {
+ panic!("Expected array result");
+ }
+ }
+
+ #[test]
+ fn test_regexp_extract_phone_number() {
+ let text_array = Arc::new(StringArray::from(vec!["Phone: 123-456-7890", "No phone"]));
+ let pattern_array = Arc::new(StringArray::from(vec![
+ "\\d{3}-\\d{3}-\\d{4}",
+ "\\d{3}-\\d{3}-\\d{4}",
+ ]));
+
+ let args = ScalarFunctionArgs {
+ args: vec![
+ ColumnarValue::Array(text_array),
+ ColumnarValue::Array(pattern_array),
+ ],
+ arg_fields: vec![
+ Arc::new(Field::new("arg_0", DataType::Utf8, false)),
+ Arc::new(Field::new("arg_1", DataType::Utf8, false)),
+ ],
+ return_field: Arc::new(Field::new("result", DataType::LargeUtf8, true)),
+ number_rows: 2,
+ config_options: Arc::new(datafusion_common::config::ConfigOptions::default()),
+ };
+
+ let function = RegexpExtractFunction::default();
+ let result = function.invoke_with_args(args).unwrap();
+
+ if let ColumnarValue::Array(array) = result {
+ let string_array = array.as_string::();
+ assert_eq!(string_array.value(0), "123-456-7890");
+ assert!(string_array.is_null(1)); // no match should return NULL
+ } else {
+ panic!("Expected array result");
+ }
+ }
+
+ #[test]
+ fn test_regexp_extract_email() {
+ let text_array = Arc::new(StringArray::from(vec![
+ "Email: user@domain.com",
+ "Invalid email",
+ ]));
+ let pattern_array = Arc::new(StringArray::from(vec![
+ "[a-zA-Z0-9]+@[a-zA-Z0-9]+\\.[a-zA-Z]+",
+ "[a-zA-Z0-9]+@[a-zA-Z0-9]+\\.[a-zA-Z]+",
+ ]));
+
+ let args = ScalarFunctionArgs {
+ args: vec![
+ ColumnarValue::Array(text_array),
+ ColumnarValue::Array(pattern_array),
+ ],
+ arg_fields: vec![
+ Arc::new(Field::new("arg_0", DataType::Utf8, false)),
+ Arc::new(Field::new("arg_1", DataType::Utf8, false)),
+ ],
+ return_field: Arc::new(Field::new("result", DataType::LargeUtf8, true)),
+ number_rows: 2,
+ config_options: Arc::new(datafusion_common::config::ConfigOptions::default()),
+ };
+
+ let function = RegexpExtractFunction::default();
+ let result = function.invoke_with_args(args).unwrap();
+
+ if let ColumnarValue::Array(array) = result {
+ let string_array = array.as_string::();
+ assert_eq!(string_array.value(0), "user@domain.com");
+ assert!(string_array.is_null(1)); // no match should return NULL
+ } else {
+ panic!("Expected array result");
+ }
+ }
+
+ #[test]
+ fn test_regexp_extract_with_nulls() {
+ let text_array = Arc::new(StringArray::from(vec![Some("test 123"), None]));
+ let pattern_array = Arc::new(StringArray::from(vec![Some("\\d+"), Some("\\d+")]));
+
+ let args = ScalarFunctionArgs {
+ args: vec![
+ ColumnarValue::Array(text_array),
+ ColumnarValue::Array(pattern_array),
+ ],
+ arg_fields: vec![
+ Arc::new(Field::new("arg_0", DataType::Utf8, true)),
+ Arc::new(Field::new("arg_1", DataType::Utf8, false)),
+ ],
+ return_field: Arc::new(Field::new("result", DataType::LargeUtf8, true)),
+ number_rows: 2,
+ config_options: Arc::new(datafusion_common::config::ConfigOptions::default()),
+ };
+
+ let function = RegexpExtractFunction::default();
+ let result = function.invoke_with_args(args).unwrap();
+
+ if let ColumnarValue::Array(array) = result {
+ let string_array = array.as_string::();
+ assert_eq!(string_array.value(0), "123");
+ assert!(string_array.is_null(1)); // NULL input should return NULL
+ } else {
+ panic!("Expected array result");
+ }
+ }
+}
diff --git a/src/common/function/src/scalars/udf.rs b/src/common/function/src/scalars/udf.rs
index 503a66d331..eee3ede801 100644
--- a/src/common/function/src/scalars/udf.rs
+++ b/src/common/function/src/scalars/udf.rs
@@ -14,6 +14,7 @@
use std::any::Any;
use std::fmt::{Debug, Formatter};
+use std::hash::{Hash, Hasher};
use datafusion::arrow::datatypes::DataType;
use datafusion::logical_expr::{ScalarFunctionArgs, ScalarUDFImpl};
@@ -33,6 +34,20 @@ impl Debug for ScalarUdf {
}
}
+impl PartialEq for ScalarUdf {
+ fn eq(&self, other: &Self) -> bool {
+ self.function.signature() == other.function.signature()
+ }
+}
+
+impl Eq for ScalarUdf {}
+
+impl Hash for ScalarUdf {
+ fn hash(&self, state: &mut H) {
+ self.function.signature().hash(state)
+ }
+}
+
impl ScalarUDFImpl for ScalarUdf {
fn as_any(&self) -> &dyn Any {
self
diff --git a/src/common/function/src/system/pg_catalog.rs b/src/common/function/src/system/pg_catalog.rs
index c768aae248..b66e208ea9 100644
--- a/src/common/function/src/system/pg_catalog.rs
+++ b/src/common/function/src/system/pg_catalog.rs
@@ -32,10 +32,36 @@ use crate::system::define_nullary_udf;
const CURRENT_SCHEMA_FUNCTION_NAME: &str = "current_schema";
const CURRENT_SCHEMAS_FUNCTION_NAME: &str = "current_schemas";
const SESSION_USER_FUNCTION_NAME: &str = "session_user";
+const CURRENT_DATABASE_FUNCTION_NAME: &str = "current_database";
define_nullary_udf!(CurrentSchemaFunction);
define_nullary_udf!(CurrentSchemasFunction);
define_nullary_udf!(SessionUserFunction);
+define_nullary_udf!(CurrentDatabaseFunction);
+
+impl Function for CurrentDatabaseFunction {
+ fn name(&self) -> &str {
+ CURRENT_DATABASE_FUNCTION_NAME
+ }
+
+ fn return_type(&self, _: &[DataType]) -> datafusion_common::Result {
+ Ok(DataType::Utf8View)
+ }
+
+ fn signature(&self) -> &Signature {
+ &self.signature
+ }
+
+ fn invoke_with_args(
+ &self,
+ args: ScalarFunctionArgs,
+ ) -> datafusion_common::Result {
+ let func_ctx = find_function_context(&args)?;
+ let db = func_ctx.query_ctx.current_catalog().to_string();
+
+ Ok(ColumnarValue::Scalar(ScalarValue::Utf8View(Some(db))))
+ }
+}
// Though "current_schema" can be aliased to "database", to not cause any breaking changes,
// we are not doing it: not until https://github.com/apache/datafusion/issues/17469 is resolved.
@@ -141,6 +167,7 @@ impl PGCatalogFunction {
registry.register_scalar(CurrentSchemaFunction::default());
registry.register_scalar(CurrentSchemasFunction::default());
registry.register_scalar(SessionUserFunction::default());
+ registry.register_scalar(CurrentDatabaseFunction::default());
registry.register(pg_catalog::format_type::create_format_type_udf());
registry.register(pg_catalog::create_pg_get_partkeydef_udf());
registry.register(pg_catalog::has_privilege_udf::create_has_privilege_udf(
diff --git a/src/common/macro/src/admin_fn.rs b/src/common/macro/src/admin_fn.rs
index ca97e5468f..651c083ec8 100644
--- a/src/common/macro/src/admin_fn.rs
+++ b/src/common/macro/src/admin_fn.rs
@@ -345,6 +345,20 @@ fn build_struct(
Ok(datafusion_expr::ColumnarValue::Array(result_vector.to_arrow_array()))
}
}
+
+ impl PartialEq for #name {
+ fn eq(&self, other: &Self) -> bool {
+ self.signature == other.signature
+ }
+ }
+
+ impl Eq for #name {}
+
+ impl std::hash::Hash for #name {
+ fn hash(&self, state: &mut H) {
+ self.signature.hash(state)
+ }
+ }
}
.into()
}
diff --git a/src/common/meta/src/cluster.rs b/src/common/meta/src/cluster.rs
index 63001970b6..74485513e9 100644
--- a/src/common/meta/src/cluster.rs
+++ b/src/common/meta/src/cluster.rs
@@ -120,10 +120,16 @@ pub struct NodeInfo {
pub start_time_ms: u64,
// The node build cpus
#[serde(default)]
- pub cpus: u32,
+ pub total_cpu_millicores: i64,
// The node build memory bytes
#[serde(default)]
- pub memory_bytes: u64,
+ pub total_memory_bytes: i64,
+ // The node build cpu usage millicores
+ #[serde(default)]
+ pub cpu_usage_millicores: i64,
+ // The node build memory usage bytes
+ #[serde(default)]
+ pub memory_usage_bytes: i64,
// The node build hostname
#[serde(default)]
pub hostname: String,
@@ -333,8 +339,10 @@ mod tests {
version: "".to_string(),
git_commit: "".to_string(),
start_time_ms: 1,
- cpus: 0,
- memory_bytes: 0,
+ total_cpu_millicores: 0,
+ total_memory_bytes: 0,
+ cpu_usage_millicores: 0,
+ memory_usage_bytes: 0,
hostname: "test_hostname".to_string(),
};
diff --git a/src/common/meta/src/instruction.rs b/src/common/meta/src/instruction.rs
index 9a9d955f58..c7bd82d675 100644
--- a/src/common/meta/src/instruction.rs
+++ b/src/common/meta/src/instruction.rs
@@ -55,6 +55,10 @@ impl Display for RegionIdent {
/// The result of downgrade leader region.
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
pub struct DowngradeRegionReply {
+ /// The [RegionId].
+ /// For compatibility, it is defaulted to [RegionId::new(0, 0)].
+ #[serde(default)]
+ pub region_id: RegionId,
/// Returns the `last_entry_id` if available.
pub last_entry_id: Option,
/// Returns the `metadata_last_entry_id` if available (Only available for metric engine).
@@ -423,14 +427,60 @@ pub enum Instruction {
CloseRegions(Vec),
/// Upgrades a region.
UpgradeRegion(UpgradeRegion),
+ #[serde(
+ deserialize_with = "single_or_multiple_from",
+ alias = "DowngradeRegion"
+ )]
/// Downgrades a region.
- DowngradeRegion(DowngradeRegion),
+ DowngradeRegions(Vec),
/// Invalidates batch cache.
InvalidateCaches(Vec),
/// Flushes regions.
FlushRegions(FlushRegions),
}
+impl Instruction {
+ /// Converts the instruction into a vector of [OpenRegion].
+ pub fn into_open_regions(self) -> Option> {
+ match self {
+ Self::OpenRegions(open_regions) => Some(open_regions),
+ _ => None,
+ }
+ }
+
+ /// Converts the instruction into a vector of [RegionIdent].
+ pub fn into_close_regions(self) -> Option> {
+ match self {
+ Self::CloseRegions(close_regions) => Some(close_regions),
+ _ => None,
+ }
+ }
+
+ /// Converts the instruction into a [FlushRegions].
+ pub fn into_flush_regions(self) -> Option {
+ match self {
+ Self::FlushRegions(flush_regions) => Some(flush_regions),
+ _ => None,
+ }
+ }
+
+ /// Converts the instruction into a [DowngradeRegion].
+ pub fn into_downgrade_regions(self) -> Option> {
+ match self {
+ Self::DowngradeRegions(downgrade_region) => Some(downgrade_region),
+ _ => None,
+ }
+ }
+
+ /// Converts the instruction into a [UpgradeRegion].
+ pub fn into_upgrade_regions(self) -> Option {
+ match self {
+ Self::UpgradeRegion(upgrade_region) => Some(upgrade_region),
+ _ => None,
+ }
+ }
+}
+
/// The reply of [UpgradeRegion].
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
pub struct UpgradeRegionReply {
@@ -452,6 +502,39 @@ impl Display for UpgradeRegionReply {
}
}
+#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
+pub struct DowngradeRegionsReply {
+ pub replies: Vec,
+}
+
+impl DowngradeRegionsReply {
+ pub fn new(replies: Vec) -> Self {
+ Self { replies }
+ }
+
+ pub fn single(reply: DowngradeRegionReply) -> Self {
+ Self::new(vec![reply])
+ }
+}
+
+#[derive(Deserialize)]
+#[serde(untagged)]
+enum DowngradeRegionsCompat {
+ Single(DowngradeRegionReply),
+ Multiple(DowngradeRegionsReply),
+}
+
+fn downgrade_regions_compat_from<'de, D>(deserializer: D) -> Result
+where
+ D: Deserializer<'de>,
+{
+ let helper = DowngradeRegionsCompat::deserialize(deserializer)?;
+ Ok(match helper {
+ DowngradeRegionsCompat::Single(x) => DowngradeRegionsReply::new(vec![x]),
+ DowngradeRegionsCompat::Multiple(reply) => reply,
+ })
+}
+
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum InstructionReply {
@@ -460,7 +543,11 @@ pub enum InstructionReply {
#[serde(alias = "close_region")]
CloseRegions(SimpleReply),
UpgradeRegion(UpgradeRegionReply),
- DowngradeRegion(DowngradeRegionReply),
+ #[serde(
+ alias = "downgrade_region",
+ deserialize_with = "downgrade_regions_compat_from"
+ )]
+ DowngradeRegions(DowngradeRegionsReply),
FlushRegions(FlushRegionReply),
}
@@ -470,8 +557,8 @@ impl Display for InstructionReply {
Self::OpenRegions(reply) => write!(f, "InstructionReply::OpenRegions({})", reply),
Self::CloseRegions(reply) => write!(f, "InstructionReply::CloseRegions({})", reply),
Self::UpgradeRegion(reply) => write!(f, "InstructionReply::UpgradeRegion({})", reply),
- Self::DowngradeRegion(reply) => {
- write!(f, "InstructionReply::DowngradeRegion({})", reply)
+ Self::DowngradeRegions(reply) => {
+ write!(f, "InstructionReply::DowngradeRegions({:?})", reply)
}
Self::FlushRegions(reply) => write!(f, "InstructionReply::FlushRegions({})", reply),
}
@@ -493,6 +580,27 @@ impl InstructionReply {
_ => panic!("Expected OpenRegions reply"),
}
}
+
+ pub fn expect_upgrade_region_reply(self) -> UpgradeRegionReply {
+ match self {
+ Self::UpgradeRegion(reply) => reply,
+ _ => panic!("Expected UpgradeRegion reply"),
+ }
+ }
+
+ pub fn expect_downgrade_regions_reply(self) -> Vec {
+ match self {
+ Self::DowngradeRegions(reply) => reply.replies,
+ _ => panic!("Expected DowngradeRegion reply"),
+ }
+ }
+
+ pub fn expect_flush_regions_reply(self) -> FlushRegionReply {
+ match self {
+ Self::FlushRegions(reply) => reply,
+ _ => panic!("Expected FlushRegions reply"),
+ }
+ }
}
#[cfg(test)]
@@ -532,11 +640,27 @@ mod tests {
r#"{"CloseRegions":[{"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"}]}"#,
serialized
);
+
+ let downgrade_region = InstructionReply::DowngradeRegions(DowngradeRegionsReply::single(
+ DowngradeRegionReply {
+ region_id: RegionId::new(1024, 1),
+ last_entry_id: None,
+ metadata_last_entry_id: None,
+ exists: true,
+ error: None,
+ },
+ ));
+
+ let serialized = serde_json::to_string(&downgrade_region).unwrap();
+ assert_eq!(
+ r#"{"type":"downgrade_regions","replies":[{"region_id":4398046511105,"last_entry_id":null,"metadata_last_entry_id":null,"exists":true,"error":null}]}"#,
+ serialized
+ )
}
#[test]
fn test_deserialize_instruction() {
- let open_region_instruction = r#"{"OpenRegion":[{"region_ident":{"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"},"region_storage_path":"test/foo","region_options":{},"region_wal_options":{},"skip_wal_replay":false}]}"#;
+ let open_region_instruction = r#"{"OpenRegion":{"region_ident":{"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"},"region_storage_path":"test/foo","region_options":{},"region_wal_options":{},"skip_wal_replay":false}}"#;
let open_region_instruction: Instruction =
serde_json::from_str(open_region_instruction).unwrap();
let open_region = Instruction::OpenRegions(vec![OpenRegion::new(
@@ -553,7 +677,7 @@ mod tests {
)]);
assert_eq!(open_region_instruction, open_region);
- let close_region_instruction = r#"{"CloseRegion":[{"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"}]}"#;
+ let close_region_instruction = r#"{"CloseRegion":{"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"}}"#;
let close_region_instruction: Instruction =
serde_json::from_str(close_region_instruction).unwrap();
let close_region = Instruction::CloseRegions(vec![RegionIdent {
@@ -564,6 +688,15 @@ mod tests {
}]);
assert_eq!(close_region_instruction, close_region);
+ let downgrade_region_instruction = r#"{"DowngradeRegions":{"region_id":4398046511105,"flush_timeout":{"secs":1,"nanos":0}}}"#;
+ let downgrade_region_instruction: Instruction =
+ serde_json::from_str(downgrade_region_instruction).unwrap();
+ let downgrade_region = Instruction::DowngradeRegions(vec![DowngradeRegion {
+ region_id: RegionId::new(1024, 1),
+ flush_timeout: Some(Duration::from_millis(1000)),
+ }]);
+ assert_eq!(downgrade_region_instruction, downgrade_region);
+
let close_region_instruction_reply =
r#"{"result":true,"error":null,"type":"close_region"}"#;
let close_region_instruction_reply: InstructionReply =
@@ -582,6 +715,20 @@ mod tests {
error: None,
});
assert_eq!(open_region_instruction_reply, open_region_reply);
+
+ let downgrade_region_instruction_reply = r#"{"region_id":4398046511105,"last_entry_id":null,"metadata_last_entry_id":null,"exists":true,"error":null,"type":"downgrade_region"}"#;
+ let downgrade_region_instruction_reply: InstructionReply =
+ serde_json::from_str(downgrade_region_instruction_reply).unwrap();
+ let downgrade_region_reply = InstructionReply::DowngradeRegions(
+ DowngradeRegionsReply::single(DowngradeRegionReply {
+ region_id: RegionId::new(1024, 1),
+ last_entry_id: None,
+ metadata_last_entry_id: None,
+ exists: true,
+ error: None,
+ }),
+ );
+ assert_eq!(downgrade_region_instruction_reply, downgrade_region_reply);
}
#[derive(Debug, Clone, Serialize, Deserialize)]
diff --git a/src/common/stat/Cargo.toml b/src/common/stat/Cargo.toml
index 3d0198f6a2..d0e8b5448f 100644
--- a/src/common/stat/Cargo.toml
+++ b/src/common/stat/Cargo.toml
@@ -6,11 +6,14 @@ license.workspace = true
[dependencies]
common-base.workspace = true
+common-runtime.workspace = true
+common-telemetry.workspace = true
lazy_static.workspace = true
nix.workspace = true
num_cpus.workspace = true
prometheus.workspace = true
sysinfo.workspace = true
+tokio.workspace = true
[lints]
workspace = true
diff --git a/src/common/stat/src/cgroups.rs b/src/common/stat/src/cgroups.rs
index fe26f5ec36..ce8f5ac87a 100644
--- a/src/common/stat/src/cgroups.rs
+++ b/src/common/stat/src/cgroups.rs
@@ -117,7 +117,10 @@ pub fn get_cpu_limit_from_cgroups() -> Option {
None
}
-fn get_cpu_usage() -> Option {
+/// Get the usage of cpu in millicores from cgroups filesystem.
+///
+/// - Return `None` if it's not in the cgroups v2 environment or fails to read the cpu usage.
+pub fn get_cpu_usage_from_cgroups() -> Option {
// In certain bare-metal environments, the `/sys/fs/cgroup/cpu.stat` file may be present and reflect system-wide CPU usage rather than container-specific metrics.
// To ensure accurate collection of container-level CPU usage, verify the existence of the `/sys/fs/cgroup/memory.current` file.
// The presence of this file typically indicates execution within a containerized environment, thereby validating the relevance of the collected CPU usage data.
@@ -142,6 +145,22 @@ fn get_cpu_usage() -> Option {
fields[1].trim().parse::().ok()
}
+// Calculate the cpu usage in millicores from cgroups filesystem.
+//
+// - Return `0` if the current cpu usage is equal to the last cpu usage or the interval is 0.
+pub(crate) fn calculate_cpu_usage(
+ current_cpu_usage_usecs: i64,
+ last_cpu_usage_usecs: i64,
+ interval_milliseconds: i64,
+) -> i64 {
+ let diff = current_cpu_usage_usecs - last_cpu_usage_usecs;
+ if diff > 0 && interval_milliseconds > 0 {
+ ((diff as f64 / interval_milliseconds as f64).round() as i64).max(1)
+ } else {
+ 0
+ }
+}
+
// Check whether the cgroup is v2.
// - Return `true` if the cgroup is v2, otherwise return `false`.
// - Return `None` if the detection fails or not on linux.
@@ -230,7 +249,7 @@ impl Collector for CgroupsMetricsCollector {
}
fn collect(&self) -> Vec {
- if let Some(cpu_usage) = get_cpu_usage() {
+ if let Some(cpu_usage) = get_cpu_usage_from_cgroups() {
self.cpu_usage.set(cpu_usage);
}
diff --git a/src/common/stat/src/lib.rs b/src/common/stat/src/lib.rs
index 2c6cbea3f1..544b9439c8 100644
--- a/src/common/stat/src/lib.rs
+++ b/src/common/stat/src/lib.rs
@@ -13,66 +13,7 @@
// limitations under the License.
mod cgroups;
+mod resource;
pub use cgroups::*;
-use common_base::readable_size::ReadableSize;
-use sysinfo::System;
-
-/// Get the total CPU in millicores.
-pub fn get_total_cpu_millicores() -> i64 {
- // Get CPU limit from cgroups filesystem.
- if let Some(cgroup_cpu_limit) = get_cpu_limit_from_cgroups() {
- cgroup_cpu_limit
- } else {
- // Get total CPU cores from host system.
- num_cpus::get() as i64 * 1000
- }
-}
-
-/// Get the total memory in bytes.
-pub fn get_total_memory_bytes() -> i64 {
- // Get memory limit from cgroups filesystem.
- if let Some(cgroup_memory_limit) = get_memory_limit_from_cgroups() {
- cgroup_memory_limit
- } else {
- // Get total memory from host system.
- if sysinfo::IS_SUPPORTED_SYSTEM {
- let mut sys_info = System::new();
- sys_info.refresh_memory();
- sys_info.total_memory() as i64
- } else {
- // If the system is not supported, return -1.
- -1
- }
- }
-}
-
-/// Get the total CPU cores. The result will be rounded to the nearest integer.
-/// For example, if the total CPU is 1.5 cores(1500 millicores), the result will be 2.
-pub fn get_total_cpu_cores() -> usize {
- ((get_total_cpu_millicores() as f64) / 1000.0).round() as usize
-}
-
-/// Get the total memory in readable size.
-pub fn get_total_memory_readable() -> Option {
- if get_total_memory_bytes() > 0 {
- Some(ReadableSize(get_total_memory_bytes() as u64))
- } else {
- None
- }
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
-
- #[test]
- fn test_get_total_cpu_cores() {
- assert!(get_total_cpu_cores() > 0);
- }
-
- #[test]
- fn test_get_total_memory_readable() {
- assert!(get_total_memory_readable().unwrap() > ReadableSize::mb(0));
- }
-}
+pub use resource::*;
diff --git a/src/common/stat/src/resource.rs b/src/common/stat/src/resource.rs
new file mode 100644
index 0000000000..babfa54a19
--- /dev/null
+++ b/src/common/stat/src/resource.rs
@@ -0,0 +1,187 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+use std::sync::atomic::{AtomicI64, Ordering};
+use std::time::Duration;
+
+use common_base::readable_size::ReadableSize;
+use common_runtime::JoinHandle;
+use common_telemetry::info;
+use sysinfo::System;
+use tokio::time::sleep;
+
+use crate::cgroups::calculate_cpu_usage;
+use crate::{
+ get_cpu_limit_from_cgroups, get_cpu_usage_from_cgroups, get_memory_limit_from_cgroups,
+ get_memory_usage_from_cgroups,
+};
+
+/// Get the total CPU in millicores. If the CPU limit is unset, it will return the total CPU cores from host system.
+pub fn get_total_cpu_millicores() -> i64 {
+ // Get CPU limit from cgroups filesystem.
+ if let Some(cgroup_cpu_limit) = get_cpu_limit_from_cgroups() {
+ cgroup_cpu_limit
+ } else {
+ // Get total CPU cores from host system.
+ num_cpus::get() as i64 * 1000
+ }
+}
+
+/// Get the total memory in bytes. If the memory limit is unset, it will return the total memory from host system.
+/// If the system is not supported to get the total host memory, it will return 0.
+pub fn get_total_memory_bytes() -> i64 {
+ // Get memory limit from cgroups filesystem.
+ if let Some(cgroup_memory_limit) = get_memory_limit_from_cgroups() {
+ cgroup_memory_limit
+ } else {
+ // Get total memory from host system.
+ if sysinfo::IS_SUPPORTED_SYSTEM {
+ let mut sys_info = System::new();
+ sys_info.refresh_memory();
+ sys_info.total_memory() as i64
+ } else {
+ // If the system is not supported, return 0
+ 0
+ }
+ }
+}
+
+/// Get the total CPU cores. The result will be rounded to the nearest integer.
+/// For example, if the total CPU is 1.5 cores(1500 millicores), the result will be 2.
+pub fn get_total_cpu_cores() -> usize {
+ ((get_total_cpu_millicores() as f64) / 1000.0).round() as usize
+}
+
+/// Get the total memory in readable size.
+pub fn get_total_memory_readable() -> Option {
+ if get_total_memory_bytes() > 0 {
+ Some(ReadableSize(get_total_memory_bytes() as u64))
+ } else {
+ None
+ }
+}
+
+/// A reference to a `ResourceStat` implementation.
+pub type ResourceStatRef = Arc;
+
+/// A trait for getting resource statistics.
+pub trait ResourceStat {
+ /// Get the total CPU in millicores.
+ fn get_total_cpu_millicores(&self) -> i64;
+ /// Get the total memory in bytes.
+ fn get_total_memory_bytes(&self) -> i64;
+ /// Get the CPU usage in millicores.
+ fn get_cpu_usage_millicores(&self) -> i64;
+ /// Get the memory usage in bytes.
+ fn get_memory_usage_bytes(&self) -> i64;
+}
+
+/// A implementation of `ResourceStat` trait.
+pub struct ResourceStatImpl {
+ cpu_usage_millicores: Arc,
+ last_cpu_usage_usecs: Arc,
+ calculate_interval: Duration,
+ handler: Option>,
+}
+
+impl Default for ResourceStatImpl {
+ fn default() -> Self {
+ Self {
+ cpu_usage_millicores: Arc::new(AtomicI64::new(0)),
+ last_cpu_usage_usecs: Arc::new(AtomicI64::new(0)),
+ calculate_interval: Duration::from_secs(5),
+ handler: None,
+ }
+ }
+}
+
+impl ResourceStatImpl {
+ /// Start collecting CPU usage periodically. It will calculate the CPU usage in millicores based on rate of change of CPU usage usage_usec in `/sys/fs/cgroup/cpu.stat`.
+ /// It ONLY works in cgroup v2 environment.
+ pub fn start_collect_cpu_usage(&mut self) {
+ if self.handler.is_some() {
+ return;
+ }
+
+ let cpu_usage_millicores = self.cpu_usage_millicores.clone();
+ let last_cpu_usage_usecs = self.last_cpu_usage_usecs.clone();
+ let calculate_interval = self.calculate_interval;
+
+ let handler = common_runtime::spawn_global(async move {
+ info!(
+ "Starting to collect CPU usage periodically for every {} seconds",
+ calculate_interval.as_secs()
+ );
+ loop {
+ let current_cpu_usage_usecs = get_cpu_usage_from_cgroups();
+ if let Some(current_cpu_usage_usecs) = current_cpu_usage_usecs {
+ // Skip the first time to collect CPU usage.
+ if last_cpu_usage_usecs.load(Ordering::Relaxed) == 0 {
+ last_cpu_usage_usecs.store(current_cpu_usage_usecs, Ordering::Relaxed);
+ continue;
+ }
+ let cpu_usage = calculate_cpu_usage(
+ current_cpu_usage_usecs,
+ last_cpu_usage_usecs.load(Ordering::Relaxed),
+ calculate_interval.as_millis() as i64,
+ );
+ cpu_usage_millicores.store(cpu_usage, Ordering::Relaxed);
+ last_cpu_usage_usecs.store(current_cpu_usage_usecs, Ordering::Relaxed);
+ }
+ sleep(calculate_interval).await;
+ }
+ });
+
+ self.handler = Some(handler);
+ }
+}
+
+impl ResourceStat for ResourceStatImpl {
+ /// Get the total CPU in millicores.
+ fn get_total_cpu_millicores(&self) -> i64 {
+ get_total_cpu_millicores()
+ }
+
+ /// Get the total memory in bytes.
+ fn get_total_memory_bytes(&self) -> i64 {
+ get_total_memory_bytes()
+ }
+
+ /// Get the CPU usage in millicores.
+ fn get_cpu_usage_millicores(&self) -> i64 {
+ self.cpu_usage_millicores.load(Ordering::Relaxed)
+ }
+
+ /// Get the memory usage in bytes.
+ /// It ONLY works in cgroup v2 environment.
+ fn get_memory_usage_bytes(&self) -> i64 {
+ get_memory_usage_from_cgroups().unwrap_or_default()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_get_total_cpu_cores() {
+ assert!(get_total_cpu_cores() > 0);
+ }
+
+ #[test]
+ fn test_get_total_memory_readable() {
+ assert!(get_total_memory_readable().unwrap() > ReadableSize::mb(0));
+ }
+}
diff --git a/src/common/test-util/src/recordbatch.rs b/src/common/test-util/src/recordbatch.rs
index eb666e167a..aa68f79356 100644
--- a/src/common/test-util/src/recordbatch.rs
+++ b/src/common/test-util/src/recordbatch.rs
@@ -28,7 +28,7 @@ pub async fn check_output_stream(output: OutputData, expected: &str) {
_ => unreachable!(),
};
let pretty_print = recordbatches.pretty_print().unwrap();
- assert_eq!(pretty_print, expected, "actual: \n{}", pretty_print);
+ assert_eq!(pretty_print, expected.trim(), "actual: \n{}", pretty_print);
}
pub async fn execute_and_check_output(db: &Database, sql: &str, expected: ExpectedOutput<'_>) {
diff --git a/src/datanode/Cargo.toml b/src/datanode/Cargo.toml
index 3dcffd0ac9..265ede339e 100644
--- a/src/datanode/Cargo.toml
+++ b/src/datanode/Cargo.toml
@@ -30,6 +30,7 @@ common-procedure.workspace = true
common-query.workspace = true
common-recordbatch.workspace = true
common-runtime.workspace = true
+common-stat.workspace = true
common-telemetry.workspace = true
common-time.workspace = true
common-version.workspace = true
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index ed8b41f0c7..b9b8edcdba 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -27,6 +27,7 @@ use common_meta::key::runtime_switch::RuntimeSwitchManager;
use common_meta::key::{SchemaMetadataManager, SchemaMetadataManagerRef};
use common_meta::kv_backend::KvBackendRef;
pub use common_procedure::options::ProcedureConfig;
+use common_stat::ResourceStatImpl;
use common_telemetry::{error, info, warn};
use common_wal::config::DatanodeWalConfig;
use common_wal::config::kafka::DatanodeKafkaConfig;
@@ -282,6 +283,9 @@ impl DatanodeBuilder {
open_all_regions.await?;
}
+ let mut resource_stat = ResourceStatImpl::default();
+ resource_stat.start_collect_cpu_usage();
+
let heartbeat_task = if let Some(meta_client) = meta_client {
Some(
HeartbeatTask::try_new(
@@ -290,6 +294,7 @@ impl DatanodeBuilder {
meta_client,
cache_registry,
self.plugins.clone(),
+ Arc::new(resource_stat),
)
.await?,
)
diff --git a/src/datanode/src/heartbeat.rs b/src/datanode/src/heartbeat.rs
index 9c059e5698..607e031b43 100644
--- a/src/datanode/src/heartbeat.rs
+++ b/src/datanode/src/heartbeat.rs
@@ -20,7 +20,6 @@ use std::time::Duration;
use api::v1::meta::heartbeat_request::NodeWorkloads;
use api::v1::meta::{DatanodeWorkloads, HeartbeatRequest, NodeInfo, Peer, RegionRole, RegionStat};
use common_base::Plugins;
-use common_config::utils::ResourceSpec;
use common_meta::cache_invalidator::CacheInvalidatorRef;
use common_meta::datanode::REGION_STATISTIC_KEY;
use common_meta::distributed_time_constants::META_KEEP_ALIVE_INTERVAL_SECS;
@@ -31,6 +30,7 @@ use common_meta::heartbeat::handler::{
};
use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MailboxRef};
use common_meta::heartbeat::utils::outgoing_message_to_mailbox_message;
+use common_stat::ResourceStatRef;
use common_telemetry::{debug, error, info, trace, warn};
use common_workload::DatanodeWorkloadType;
use meta_client::MetaClientRef;
@@ -63,7 +63,7 @@ pub struct HeartbeatTask {
interval: u64,
resp_handler_executor: HeartbeatResponseHandlerExecutorRef,
region_alive_keeper: Arc,
- resource_spec: ResourceSpec,
+ resource_stat: ResourceStatRef,
}
impl Drop for HeartbeatTask {
@@ -80,6 +80,7 @@ impl HeartbeatTask {
meta_client: MetaClientRef,
cache_invalidator: CacheInvalidatorRef,
plugins: Plugins,
+ resource_stat: ResourceStatRef,
) -> Result {
let countdown_task_handler_ext = plugins.get::();
let region_alive_keeper = Arc::new(RegionAliveKeeper::new(
@@ -109,7 +110,7 @@ impl HeartbeatTask {
interval: opts.heartbeat.interval.as_millis() as u64,
resp_handler_executor,
region_alive_keeper,
- resource_spec: Default::default(),
+ resource_stat,
})
}
@@ -186,6 +187,7 @@ impl HeartbeatTask {
.context(error::HandleHeartbeatResponseSnafu)
}
+ #[allow(deprecated)]
/// Start heartbeat task, spawn background task.
pub async fn start(
&self,
@@ -237,8 +239,9 @@ impl HeartbeatTask {
self.region_alive_keeper.start(Some(event_receiver)).await?;
let mut last_sent = Instant::now();
- let cpus = self.resource_spec.cpus as u32;
- let memory_bytes = self.resource_spec.memory.unwrap_or_default().as_bytes();
+ let total_cpu_millicores = self.resource_stat.get_total_cpu_millicores();
+ let total_memory_bytes = self.resource_stat.get_total_memory_bytes();
+ let resource_stat = self.resource_stat.clone();
common_runtime::spawn_hb(async move {
let sleep = tokio::time::sleep(Duration::from_millis(0));
@@ -252,8 +255,13 @@ impl HeartbeatTask {
version: build_info.version.to_string(),
git_commit: build_info.commit_short.to_string(),
start_time_ms: node_epoch,
- cpus,
- memory_bytes,
+ total_cpu_millicores,
+ total_memory_bytes,
+ cpu_usage_millicores: 0,
+ memory_usage_bytes: 0,
+ // TODO(zyy17): Remove these deprecated fields when the deprecated fields are removed from the proto.
+ cpus: total_cpu_millicores as u32,
+ memory_bytes: total_memory_bytes as u64,
hostname: hostname::get()
.unwrap_or_default()
.to_string_lossy()
@@ -297,12 +305,18 @@ impl HeartbeatTask {
let topic_stats = region_server_clone.topic_stats();
let now = Instant::now();
let duration_since_epoch = (now - epoch).as_millis() as u64;
- let req = HeartbeatRequest {
+ let mut req = HeartbeatRequest {
region_stats,
topic_stats,
duration_since_epoch,
..heartbeat_request.clone()
};
+
+ if let Some(info) = req.info.as_mut() {
+ info.cpu_usage_millicores = resource_stat.get_cpu_usage_millicores();
+ info.memory_usage_bytes = resource_stat.get_memory_usage_bytes();
+ }
+
sleep.as_mut().reset(now + Duration::from_millis(interval));
Some(req)
}
diff --git a/src/datanode/src/heartbeat/handler.rs b/src/datanode/src/heartbeat/handler.rs
index 14a671a14b..71b3181a04 100644
--- a/src/datanode/src/heartbeat/handler.rs
+++ b/src/datanode/src/heartbeat/handler.rs
@@ -13,16 +13,13 @@
// limitations under the License.
use async_trait::async_trait;
-use common_meta::RegionIdent;
use common_meta::error::{InvalidHeartbeatResponseSnafu, Result as MetaResult};
use common_meta::heartbeat::handler::{
HandleControl, HeartbeatResponseHandler, HeartbeatResponseHandlerContext,
};
use common_meta::instruction::{Instruction, InstructionReply};
use common_telemetry::error;
-use futures::future::BoxFuture;
use snafu::OptionExt;
-use store_api::storage::RegionId;
mod close_region;
mod downgrade_region;
@@ -30,10 +27,15 @@ mod flush_region;
mod open_region;
mod upgrade_region;
+use crate::heartbeat::handler::close_region::CloseRegionsHandler;
+use crate::heartbeat::handler::downgrade_region::DowngradeRegionsHandler;
+use crate::heartbeat::handler::flush_region::FlushRegionsHandler;
+use crate::heartbeat::handler::open_region::OpenRegionsHandler;
+use crate::heartbeat::handler::upgrade_region::UpgradeRegionsHandler;
use crate::heartbeat::task_tracker::TaskTracker;
use crate::region_server::RegionServer;
-/// Handler for [Instruction::OpenRegion] and [Instruction::CloseRegion].
+/// The handler for [`Instruction`]s.
#[derive(Clone)]
pub struct RegionHeartbeatResponseHandler {
region_server: RegionServer,
@@ -43,9 +45,14 @@ pub struct RegionHeartbeatResponseHandler {
open_region_parallelism: usize,
}
-/// Handler of the instruction.
-pub type InstructionHandler =
- Box BoxFuture<'static, Option> + Send>;
+#[async_trait::async_trait]
+pub trait InstructionHandler: Send + Sync {
+ async fn handle(
+ &self,
+ ctx: &HandlerContext,
+ instruction: Instruction,
+ ) -> Option;
+}
#[derive(Clone)]
pub struct HandlerContext {
@@ -56,10 +63,6 @@ pub struct HandlerContext {
}
impl HandlerContext {
- fn region_ident_to_region_id(region_ident: &RegionIdent) -> RegionId {
- RegionId::new(region_ident.table_id, region_ident.region_number)
- }
-
#[cfg(test)]
pub fn new_for_test(region_server: RegionServer) -> Self {
Self {
@@ -90,31 +93,16 @@ impl RegionHeartbeatResponseHandler {
self
}
- /// Builds the [InstructionHandler].
- fn build_handler(&self, instruction: Instruction) -> MetaResult {
+ fn build_handler(&self, instruction: &Instruction) -> MetaResult> {
match instruction {
- Instruction::OpenRegions(open_regions) => {
- let open_region_parallelism = self.open_region_parallelism;
- Ok(Box::new(move |handler_context| {
- handler_context
- .handle_open_regions_instruction(open_regions, open_region_parallelism)
- }))
- }
- Instruction::CloseRegions(close_regions) => Ok(Box::new(move |handler_context| {
- handler_context.handle_close_regions_instruction(close_regions)
- })),
- Instruction::DowngradeRegion(downgrade_region) => {
- Ok(Box::new(move |handler_context| {
- handler_context.handle_downgrade_region_instruction(downgrade_region)
- }))
- }
- Instruction::UpgradeRegion(upgrade_region) => Ok(Box::new(move |handler_context| {
- handler_context.handle_upgrade_region_instruction(upgrade_region)
+ Instruction::CloseRegions(_) => Ok(Box::new(CloseRegionsHandler)),
+ Instruction::OpenRegions(_) => Ok(Box::new(OpenRegionsHandler {
+ open_region_parallelism: self.open_region_parallelism,
})),
+ Instruction::FlushRegions(_) => Ok(Box::new(FlushRegionsHandler)),
+ Instruction::DowngradeRegions(_) => Ok(Box::new(DowngradeRegionsHandler)),
+ Instruction::UpgradeRegion(_) => Ok(Box::new(UpgradeRegionsHandler)),
Instruction::InvalidateCaches(_) => InvalidHeartbeatResponseSnafu.fail(),
- Instruction::FlushRegions(flush_regions) => Ok(Box::new(move |handler_context| {
- handler_context.handle_flush_regions_instruction(flush_regions)
- })),
}
}
}
@@ -124,7 +112,7 @@ impl HeartbeatResponseHandler for RegionHeartbeatResponseHandler {
fn is_acceptable(&self, ctx: &HeartbeatResponseHandlerContext) -> bool {
matches!(ctx.incoming_message.as_ref(), |Some((
_,
- Instruction::DowngradeRegion { .. },
+ Instruction::DowngradeRegions { .. },
))| Some((
_,
Instruction::UpgradeRegion { .. }
@@ -151,15 +139,19 @@ impl HeartbeatResponseHandler for RegionHeartbeatResponseHandler {
let catchup_tasks = self.catchup_tasks.clone();
let downgrade_tasks = self.downgrade_tasks.clone();
let flush_tasks = self.flush_tasks.clone();
- let handler = self.build_handler(instruction)?;
+ let handler = self.build_handler(&instruction)?;
let _handle = common_runtime::spawn_global(async move {
- let reply = handler(HandlerContext {
- region_server,
- catchup_tasks,
- downgrade_tasks,
- flush_tasks,
- })
- .await;
+ let reply = handler
+ .handle(
+ &HandlerContext {
+ region_server,
+ catchup_tasks,
+ downgrade_tasks,
+ flush_tasks,
+ },
+ instruction,
+ )
+ .await;
if let Some(reply) = reply
&& let Err(e) = mailbox.send((meta, reply)).await
@@ -179,6 +171,7 @@ mod tests {
use std::sync::Arc;
use std::time::Duration;
+ use common_meta::RegionIdent;
use common_meta::heartbeat::mailbox::{
HeartbeatMailbox, IncomingMessage, MailboxRef, MessageMeta,
};
@@ -249,10 +242,10 @@ mod tests {
);
// Downgrade region
- let instruction = Instruction::DowngradeRegion(DowngradeRegion {
+ let instruction = Instruction::DowngradeRegions(vec![DowngradeRegion {
region_id: RegionId::new(2048, 1),
flush_timeout: Some(Duration::from_secs(1)),
- });
+ }]);
assert!(
heartbeat_handler
.is_acceptable(&heartbeat_env.create_handler_ctx((meta.clone(), instruction)))
@@ -447,10 +440,10 @@ mod tests {
// Should be ok, if we try to downgrade it twice.
for _ in 0..2 {
let meta = MessageMeta::new_test(1, "test", "dn-1", "me-0");
- let instruction = Instruction::DowngradeRegion(DowngradeRegion {
+ let instruction = Instruction::DowngradeRegions(vec![DowngradeRegion {
region_id,
flush_timeout: Some(Duration::from_secs(1)),
- });
+ }]);
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
@@ -458,33 +451,27 @@ mod tests {
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
- if let InstructionReply::DowngradeRegion(reply) = reply {
- assert!(reply.exists);
- assert!(reply.error.is_none());
- assert_eq!(reply.last_entry_id.unwrap(), 0);
- } else {
- unreachable!()
- }
+ let reply = &reply.expect_downgrade_regions_reply()[0];
+ assert!(reply.exists);
+ assert!(reply.error.is_none());
+ assert_eq!(reply.last_entry_id.unwrap(), 0);
}
// Downgrades a not exists region.
let meta = MessageMeta::new_test(1, "test", "dn-1", "me-0");
- let instruction = Instruction::DowngradeRegion(DowngradeRegion {
+ let instruction = Instruction::DowngradeRegions(vec![DowngradeRegion {
region_id: RegionId::new(2048, 1),
flush_timeout: Some(Duration::from_secs(1)),
- });
+ }]);
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
assert_matches!(control, HandleControl::Continue);
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
- if let InstructionReply::DowngradeRegion(reply) = reply {
- assert!(!reply.exists);
- assert!(reply.error.is_none());
- assert!(reply.last_entry_id.is_none());
- } else {
- unreachable!()
- }
+ let reply = reply.expect_downgrade_regions_reply();
+ assert!(!reply[0].exists);
+ assert!(reply[0].error.is_none());
+ assert!(reply[0].last_entry_id.is_none());
}
}
diff --git a/src/datanode/src/heartbeat/handler/close_region.rs b/src/datanode/src/heartbeat/handler/close_region.rs
index c942642731..88ed043fab 100644
--- a/src/datanode/src/heartbeat/handler/close_region.rs
+++ b/src/datanode/src/heartbeat/handler/close_region.rs
@@ -12,60 +12,64 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use common_meta::RegionIdent;
-use common_meta::instruction::{InstructionReply, SimpleReply};
+use common_meta::instruction::{Instruction, InstructionReply, SimpleReply};
use common_telemetry::warn;
use futures::future::join_all;
-use futures_util::future::BoxFuture;
use store_api::region_request::{RegionCloseRequest, RegionRequest};
+use store_api::storage::RegionId;
use crate::error;
-use crate::heartbeat::handler::HandlerContext;
+use crate::heartbeat::handler::{HandlerContext, InstructionHandler};
-impl HandlerContext {
- pub(crate) fn handle_close_regions_instruction(
- self,
- region_idents: Vec,
- ) -> BoxFuture<'static, Option> {
- Box::pin(async move {
- let region_ids = region_idents
- .into_iter()
- .map(|region_ident| Self::region_ident_to_region_id(®ion_ident))
- .collect::>();
+#[derive(Debug, Clone, Copy, Default)]
+pub struct CloseRegionsHandler;
- let futs = region_ids.iter().map(|region_id| {
- self.region_server
- .handle_request(*region_id, RegionRequest::Close(RegionCloseRequest {}))
- });
+#[async_trait::async_trait]
+impl InstructionHandler for CloseRegionsHandler {
+ async fn handle(
+ &self,
+ ctx: &HandlerContext,
+ instruction: Instruction,
+ ) -> Option {
+ // Safety: must be `Instruction::CloseRegions` instruction.
+ let region_idents = instruction.into_close_regions().unwrap();
+ let region_ids = region_idents
+ .into_iter()
+ .map(|region_ident| RegionId::new(region_ident.table_id, region_ident.region_number))
+ .collect::>();
- let results = join_all(futs).await;
+ let futs = region_ids.iter().map(|region_id| {
+ ctx.region_server
+ .handle_request(*region_id, RegionRequest::Close(RegionCloseRequest {}))
+ });
- let mut errors = vec![];
- for (region_id, result) in region_ids.into_iter().zip(results.into_iter()) {
- match result {
- Ok(_) => (),
- Err(error::Error::RegionNotFound { .. }) => {
- warn!(
- "Received a close regions instruction from meta, but target region:{} is not found.",
- region_id
- );
- }
- Err(err) => errors.push(format!("region:{region_id}: {err:?}")),
+ let results = join_all(futs).await;
+
+ let mut errors = vec![];
+ for (region_id, result) in region_ids.into_iter().zip(results.into_iter()) {
+ match result {
+ Ok(_) => (),
+ Err(error::Error::RegionNotFound { .. }) => {
+ warn!(
+ "Received a close regions instruction from meta, but target region:{} is not found.",
+ region_id
+ );
}
+ Err(err) => errors.push(format!("region:{region_id}: {err:?}")),
}
+ }
- if errors.is_empty() {
- return Some(InstructionReply::CloseRegions(SimpleReply {
- result: true,
- error: None,
- }));
- }
+ if errors.is_empty() {
+ return Some(InstructionReply::CloseRegions(SimpleReply {
+ result: true,
+ error: None,
+ }));
+ }
- Some(InstructionReply::CloseRegions(SimpleReply {
- result: false,
- error: Some(errors.join("; ")),
- }))
- })
+ Some(InstructionReply::CloseRegions(SimpleReply {
+ result: false,
+ error: Some(errors.join("; ")),
+ }))
}
}
diff --git a/src/datanode/src/heartbeat/handler/downgrade_region.rs b/src/datanode/src/heartbeat/handler/downgrade_region.rs
index 06d3ab046e..91ceddb91a 100644
--- a/src/datanode/src/heartbeat/handler/downgrade_region.rs
+++ b/src/datanode/src/heartbeat/handler/downgrade_region.rs
@@ -12,209 +12,242 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use common_meta::instruction::{DowngradeRegion, DowngradeRegionReply, InstructionReply};
+use common_meta::instruction::{
+ DowngradeRegion, DowngradeRegionReply, DowngradeRegionsReply, Instruction, InstructionReply,
+};
use common_telemetry::tracing::info;
use common_telemetry::{error, warn};
-use futures_util::future::BoxFuture;
+use futures::future::join_all;
use store_api::region_engine::{SetRegionRoleStateResponse, SettableRegionRoleState};
use store_api::region_request::{RegionFlushRequest, RegionRequest};
use store_api::storage::RegionId;
-use crate::heartbeat::handler::HandlerContext;
+use crate::heartbeat::handler::{HandlerContext, InstructionHandler};
use crate::heartbeat::task_tracker::WaitResult;
-impl HandlerContext {
- async fn downgrade_to_follower_gracefully(
+#[derive(Debug, Clone, Copy, Default)]
+pub struct DowngradeRegionsHandler;
+
+impl DowngradeRegionsHandler {
+ async fn handle_downgrade_region(
+ ctx: &HandlerContext,
+ DowngradeRegion {
+ region_id,
+ flush_timeout,
+ }: DowngradeRegion,
+ ) -> DowngradeRegionReply {
+ let Some(writable) = ctx.region_server.is_region_leader(region_id) else {
+ warn!("Region: {region_id} is not found");
+ return DowngradeRegionReply {
+ region_id,
+ last_entry_id: None,
+ metadata_last_entry_id: None,
+ exists: false,
+ error: None,
+ };
+ };
+
+ let region_server_moved = ctx.region_server.clone();
+
+ // Ignores flush request
+ if !writable {
+ warn!(
+ "Region: {region_id} is not writable, flush_timeout: {:?}",
+ flush_timeout
+ );
+ return ctx.downgrade_to_follower_gracefully(region_id).await;
+ }
+
+ // If flush_timeout is not set, directly convert region to follower.
+ let Some(flush_timeout) = flush_timeout else {
+ return ctx.downgrade_to_follower_gracefully(region_id).await;
+ };
+
+ // Sets region to downgrading,
+ // the downgrading region will reject all write requests.
+ // However, the downgrading region will still accept read, flush requests.
+ match ctx
+ .region_server
+ .set_region_role_state_gracefully(region_id, SettableRegionRoleState::DowngradingLeader)
+ .await
+ {
+ Ok(SetRegionRoleStateResponse::Success { .. }) => {}
+ Ok(SetRegionRoleStateResponse::NotFound) => {
+ warn!("Region: {region_id} is not found");
+ return DowngradeRegionReply {
+ region_id,
+ last_entry_id: None,
+ metadata_last_entry_id: None,
+ exists: false,
+ error: None,
+ };
+ }
+ Ok(SetRegionRoleStateResponse::InvalidTransition(err)) => {
+ error!(err; "Failed to convert region to downgrading leader - invalid transition");
+ return DowngradeRegionReply {
+ region_id,
+ last_entry_id: None,
+ metadata_last_entry_id: None,
+ exists: true,
+ error: Some(format!("{err:?}")),
+ };
+ }
+ Err(err) => {
+ error!(err; "Failed to convert region to downgrading leader");
+ return DowngradeRegionReply {
+ region_id,
+ last_entry_id: None,
+ metadata_last_entry_id: None,
+ exists: true,
+ error: Some(format!("{err:?}")),
+ };
+ }
+ }
+
+ let register_result = ctx
+ .downgrade_tasks
+ .try_register(
+ region_id,
+ Box::pin(async move {
+ info!("Flush region: {region_id} before converting region to follower");
+ region_server_moved
+ .handle_request(
+ region_id,
+ RegionRequest::Flush(RegionFlushRequest {
+ row_group_size: None,
+ }),
+ )
+ .await?;
+
+ Ok(())
+ }),
+ )
+ .await;
+
+ if register_result.is_busy() {
+ warn!("Another flush task is running for the region: {region_id}");
+ }
+
+ let mut watcher = register_result.into_watcher();
+ let result = ctx.downgrade_tasks.wait(&mut watcher, flush_timeout).await;
+
+ match result {
+ WaitResult::Timeout => DowngradeRegionReply {
+ region_id,
+ last_entry_id: None,
+ metadata_last_entry_id: None,
+ exists: true,
+ error: Some(format!(
+ "Flush region timeout, region: {region_id}, timeout: {:?}",
+ flush_timeout
+ )),
+ },
+ WaitResult::Finish(Ok(_)) => ctx.downgrade_to_follower_gracefully(region_id).await,
+ WaitResult::Finish(Err(err)) => DowngradeRegionReply {
+ region_id,
+ last_entry_id: None,
+ metadata_last_entry_id: None,
+ exists: true,
+ error: Some(format!("{err:?}")),
+ },
+ }
+ }
+}
+
+#[async_trait::async_trait]
+impl InstructionHandler for DowngradeRegionsHandler {
+ async fn handle(
&self,
- region_id: RegionId,
+ ctx: &HandlerContext,
+ instruction: Instruction,
) -> Option {
+ // Safety: must be `Instruction::DowngradeRegion` instruction.
+ let downgrade_regions = instruction.into_downgrade_regions().unwrap();
+ let futures = downgrade_regions
+ .into_iter()
+ .map(|downgrade_region| Self::handle_downgrade_region(ctx, downgrade_region));
+ // Join all futures; parallelism is governed by the underlying flush scheduler.
+ let results = join_all(futures).await;
+
+ Some(InstructionReply::DowngradeRegions(
+ DowngradeRegionsReply::new(results),
+ ))
+ }
+}
+
+impl HandlerContext {
+ async fn downgrade_to_follower_gracefully(&self, region_id: RegionId) -> DowngradeRegionReply {
match self
.region_server
.set_region_role_state_gracefully(region_id, SettableRegionRoleState::Follower)
.await
{
- Ok(SetRegionRoleStateResponse::Success(success)) => {
- Some(InstructionReply::DowngradeRegion(DowngradeRegionReply {
- last_entry_id: success.last_entry_id(),
- metadata_last_entry_id: success.metadata_last_entry_id(),
- exists: true,
- error: None,
- }))
- }
+ Ok(SetRegionRoleStateResponse::Success(success)) => DowngradeRegionReply {
+ region_id,
+ last_entry_id: success.last_entry_id(),
+ metadata_last_entry_id: success.metadata_last_entry_id(),
+ exists: true,
+ error: None,
+ },
Ok(SetRegionRoleStateResponse::NotFound) => {
warn!("Region: {region_id} is not found");
- Some(InstructionReply::DowngradeRegion(DowngradeRegionReply {
+ DowngradeRegionReply {
+ region_id,
last_entry_id: None,
metadata_last_entry_id: None,
exists: false,
error: None,
- }))
+ }
}
Ok(SetRegionRoleStateResponse::InvalidTransition(err)) => {
error!(err; "Failed to convert region to follower - invalid transition");
- Some(InstructionReply::DowngradeRegion(DowngradeRegionReply {
+ DowngradeRegionReply {
+ region_id,
last_entry_id: None,
metadata_last_entry_id: None,
exists: true,
error: Some(format!("{err:?}")),
- }))
+ }
}
Err(err) => {
error!(err; "Failed to convert region to {}", SettableRegionRoleState::Follower);
- Some(InstructionReply::DowngradeRegion(DowngradeRegionReply {
+ DowngradeRegionReply {
+ region_id,
last_entry_id: None,
metadata_last_entry_id: None,
exists: true,
error: Some(format!("{err:?}")),
- }))
+ }
}
}
}
-
- pub(crate) fn handle_downgrade_region_instruction(
- self,
- DowngradeRegion {
- region_id,
- flush_timeout,
- }: DowngradeRegion,
- ) -> BoxFuture<'static, Option> {
- Box::pin(async move {
- let Some(writable) = self.region_server.is_region_leader(region_id) else {
- warn!("Region: {region_id} is not found");
- return Some(InstructionReply::DowngradeRegion(DowngradeRegionReply {
- last_entry_id: None,
- metadata_last_entry_id: None,
- exists: false,
- error: None,
- }));
- };
-
- let region_server_moved = self.region_server.clone();
-
- // Ignores flush request
- if !writable {
- warn!(
- "Region: {region_id} is not writable, flush_timeout: {:?}",
- flush_timeout
- );
- return self.downgrade_to_follower_gracefully(region_id).await;
- }
-
- // If flush_timeout is not set, directly convert region to follower.
- let Some(flush_timeout) = flush_timeout else {
- return self.downgrade_to_follower_gracefully(region_id).await;
- };
-
- // Sets region to downgrading,
- // the downgrading region will reject all write requests.
- // However, the downgrading region will still accept read, flush requests.
- match self
- .region_server
- .set_region_role_state_gracefully(
- region_id,
- SettableRegionRoleState::DowngradingLeader,
- )
- .await
- {
- Ok(SetRegionRoleStateResponse::Success { .. }) => {}
- Ok(SetRegionRoleStateResponse::NotFound) => {
- warn!("Region: {region_id} is not found");
- return Some(InstructionReply::DowngradeRegion(DowngradeRegionReply {
- last_entry_id: None,
- metadata_last_entry_id: None,
- exists: false,
- error: None,
- }));
- }
- Ok(SetRegionRoleStateResponse::InvalidTransition(err)) => {
- error!(err; "Failed to convert region to downgrading leader - invalid transition");
- return Some(InstructionReply::DowngradeRegion(DowngradeRegionReply {
- last_entry_id: None,
- metadata_last_entry_id: None,
- exists: true,
- error: Some(format!("{err:?}")),
- }));
- }
- Err(err) => {
- error!(err; "Failed to convert region to downgrading leader");
- return Some(InstructionReply::DowngradeRegion(DowngradeRegionReply {
- last_entry_id: None,
- metadata_last_entry_id: None,
- exists: true,
- error: Some(format!("{err:?}")),
- }));
- }
- }
-
- let register_result = self
- .downgrade_tasks
- .try_register(
- region_id,
- Box::pin(async move {
- info!("Flush region: {region_id} before converting region to follower");
- region_server_moved
- .handle_request(
- region_id,
- RegionRequest::Flush(RegionFlushRequest {
- row_group_size: None,
- }),
- )
- .await?;
-
- Ok(())
- }),
- )
- .await;
-
- if register_result.is_busy() {
- warn!("Another flush task is running for the region: {region_id}");
- }
-
- let mut watcher = register_result.into_watcher();
- let result = self.downgrade_tasks.wait(&mut watcher, flush_timeout).await;
-
- match result {
- WaitResult::Timeout => {
- Some(InstructionReply::DowngradeRegion(DowngradeRegionReply {
- last_entry_id: None,
- metadata_last_entry_id: None,
- exists: true,
- error: Some(format!(
- "Flush region timeout, region: {region_id}, timeout: {:?}",
- flush_timeout
- )),
- }))
- }
- WaitResult::Finish(Ok(_)) => self.downgrade_to_follower_gracefully(region_id).await,
- WaitResult::Finish(Err(err)) => {
- Some(InstructionReply::DowngradeRegion(DowngradeRegionReply {
- last_entry_id: None,
- metadata_last_entry_id: None,
- exists: true,
- error: Some(format!("{err:?}")),
- }))
- }
- }
- })
- }
}
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
+ use std::sync::Arc;
use std::time::Duration;
- use common_meta::instruction::{DowngradeRegion, InstructionReply};
+ use common_meta::heartbeat::handler::{HandleControl, HeartbeatResponseHandler};
+ use common_meta::heartbeat::mailbox::MessageMeta;
+ use common_meta::instruction::{DowngradeRegion, Instruction};
+ use mito2::config::MitoConfig;
use mito2::engine::MITO_ENGINE_NAME;
+ use mito2::test_util::{CreateRequestBuilder, TestEnv};
use store_api::region_engine::{
- RegionRole, SetRegionRoleStateResponse, SetRegionRoleStateSuccess,
+ RegionEngine, RegionRole, SetRegionRoleStateResponse, SetRegionRoleStateSuccess,
};
use store_api::region_request::RegionRequest;
use store_api::storage::RegionId;
use tokio::time::Instant;
use crate::error;
- use crate::heartbeat::handler::HandlerContext;
+ use crate::heartbeat::handler::downgrade_region::DowngradeRegionsHandler;
+ use crate::heartbeat::handler::tests::HeartbeatResponseTestEnv;
+ use crate::heartbeat::handler::{
+ HandlerContext, InstructionHandler, RegionHeartbeatResponseHandler,
+ };
use crate::tests::{MockRegionEngine, mock_region_server};
#[tokio::test]
@@ -227,20 +260,20 @@ mod tests {
let waits = vec![None, Some(Duration::from_millis(100u64))];
for flush_timeout in waits {
- let reply = handler_context
- .clone()
- .handle_downgrade_region_instruction(DowngradeRegion {
- region_id,
- flush_timeout,
- })
+ let reply = DowngradeRegionsHandler
+ .handle(
+ &handler_context,
+ Instruction::DowngradeRegions(vec![DowngradeRegion {
+ region_id,
+ flush_timeout,
+ }]),
+ )
.await;
- assert_matches!(reply, Some(InstructionReply::DowngradeRegion(_)));
- if let InstructionReply::DowngradeRegion(reply) = reply.unwrap() {
- assert!(!reply.exists);
- assert!(reply.error.is_none());
- assert!(reply.last_entry_id.is_none());
- }
+ let reply = &reply.unwrap().expect_downgrade_regions_reply()[0];
+ assert!(!reply.exists);
+ assert!(reply.error.is_none());
+ assert!(reply.last_entry_id.is_none());
}
}
@@ -270,20 +303,20 @@ mod tests {
let waits = vec![None, Some(Duration::from_millis(100u64))];
for flush_timeout in waits {
- let reply = handler_context
- .clone()
- .handle_downgrade_region_instruction(DowngradeRegion {
- region_id,
- flush_timeout,
- })
+ let reply = DowngradeRegionsHandler
+ .handle(
+ &handler_context,
+ Instruction::DowngradeRegions(vec![DowngradeRegion {
+ region_id,
+ flush_timeout,
+ }]),
+ )
.await;
- assert_matches!(reply, Some(InstructionReply::DowngradeRegion(_)));
- if let InstructionReply::DowngradeRegion(reply) = reply.unwrap() {
- assert!(reply.exists);
- assert!(reply.error.is_none());
- assert_eq!(reply.last_entry_id.unwrap(), 1024);
- }
+ let reply = &reply.unwrap().expect_downgrade_regions_reply()[0];
+ assert!(reply.exists);
+ assert!(reply.error.is_none());
+ assert_eq!(reply.last_entry_id.unwrap(), 1024);
}
}
@@ -305,20 +338,20 @@ mod tests {
let handler_context = HandlerContext::new_for_test(mock_region_server);
let flush_timeout = Duration::from_millis(100);
- let reply = handler_context
- .clone()
- .handle_downgrade_region_instruction(DowngradeRegion {
- region_id,
- flush_timeout: Some(flush_timeout),
- })
+ let reply = DowngradeRegionsHandler
+ .handle(
+ &handler_context,
+ Instruction::DowngradeRegions(vec![DowngradeRegion {
+ region_id,
+ flush_timeout: Some(flush_timeout),
+ }]),
+ )
.await;
- assert_matches!(reply, Some(InstructionReply::DowngradeRegion(_)));
- if let InstructionReply::DowngradeRegion(reply) = reply.unwrap() {
- assert!(reply.exists);
- assert!(reply.error.unwrap().contains("timeout"));
- assert!(reply.last_entry_id.is_none());
- }
+ let reply = &reply.unwrap().expect_downgrade_regions_reply()[0];
+ assert!(reply.exists);
+ assert!(reply.error.as_ref().unwrap().contains("timeout"));
+ assert!(reply.last_entry_id.is_none());
}
#[tokio::test]
@@ -344,36 +377,38 @@ mod tests {
];
for flush_timeout in waits {
- let reply = handler_context
- .clone()
- .handle_downgrade_region_instruction(DowngradeRegion {
- region_id,
- flush_timeout,
- })
+ let reply = DowngradeRegionsHandler
+ .handle(
+ &handler_context,
+ Instruction::DowngradeRegions(vec![DowngradeRegion {
+ region_id,
+ flush_timeout,
+ }]),
+ )
.await;
- assert_matches!(reply, Some(InstructionReply::DowngradeRegion(_)));
- if let InstructionReply::DowngradeRegion(reply) = reply.unwrap() {
- assert!(reply.exists);
- assert!(reply.error.unwrap().contains("timeout"));
- assert!(reply.last_entry_id.is_none());
- }
+
+ let reply = &reply.unwrap().expect_downgrade_regions_reply()[0];
+ assert!(reply.exists);
+ assert!(reply.error.as_ref().unwrap().contains("timeout"));
+ assert!(reply.last_entry_id.is_none());
}
let timer = Instant::now();
- let reply = handler_context
- .handle_downgrade_region_instruction(DowngradeRegion {
- region_id,
- flush_timeout: Some(Duration::from_millis(500)),
- })
+ let reply = DowngradeRegionsHandler
+ .handle(
+ &handler_context,
+ Instruction::DowngradeRegions(vec![DowngradeRegion {
+ region_id,
+ flush_timeout: Some(Duration::from_millis(500)),
+ }]),
+ )
.await;
- assert_matches!(reply, Some(InstructionReply::DowngradeRegion(_)));
// Must less than 300 ms.
assert!(timer.elapsed().as_millis() < 300);
- if let InstructionReply::DowngradeRegion(reply) = reply.unwrap() {
- assert!(reply.exists);
- assert!(reply.error.is_none());
- assert_eq!(reply.last_entry_id.unwrap(), 1024);
- }
+ let reply = &reply.unwrap().expect_downgrade_regions_reply()[0];
+ assert!(reply.exists);
+ assert!(reply.error.is_none());
+ assert_eq!(reply.last_entry_id.unwrap(), 1024);
}
#[tokio::test]
@@ -405,36 +440,36 @@ mod tests {
];
for flush_timeout in waits {
- let reply = handler_context
- .clone()
- .handle_downgrade_region_instruction(DowngradeRegion {
- region_id,
- flush_timeout,
- })
+ let reply = DowngradeRegionsHandler
+ .handle(
+ &handler_context,
+ Instruction::DowngradeRegions(vec![DowngradeRegion {
+ region_id,
+ flush_timeout,
+ }]),
+ )
.await;
- assert_matches!(reply, Some(InstructionReply::DowngradeRegion(_)));
- if let InstructionReply::DowngradeRegion(reply) = reply.unwrap() {
- assert!(reply.exists);
- assert!(reply.error.unwrap().contains("timeout"));
- assert!(reply.last_entry_id.is_none());
- }
- }
- let timer = Instant::now();
- let reply = handler_context
- .handle_downgrade_region_instruction(DowngradeRegion {
- region_id,
- flush_timeout: Some(Duration::from_millis(500)),
- })
- .await;
- assert_matches!(reply, Some(InstructionReply::DowngradeRegion(_)));
- // Must less than 300 ms.
- assert!(timer.elapsed().as_millis() < 300);
-
- if let InstructionReply::DowngradeRegion(reply) = reply.unwrap() {
+ let reply = &reply.unwrap().expect_downgrade_regions_reply()[0];
assert!(reply.exists);
- assert!(reply.error.unwrap().contains("flush failed"));
+ assert!(reply.error.as_ref().unwrap().contains("timeout"));
assert!(reply.last_entry_id.is_none());
}
+ let timer = Instant::now();
+ let reply = DowngradeRegionsHandler
+ .handle(
+ &handler_context,
+ Instruction::DowngradeRegions(vec![DowngradeRegion {
+ region_id,
+ flush_timeout: Some(Duration::from_millis(500)),
+ }]),
+ )
+ .await;
+ // Must less than 300 ms.
+ assert!(timer.elapsed().as_millis() < 300);
+ let reply = &reply.unwrap().expect_downgrade_regions_reply()[0];
+ assert!(reply.exists);
+ assert!(reply.error.as_ref().unwrap().contains("flush failed"));
+ assert!(reply.last_entry_id.is_none());
}
#[tokio::test]
@@ -449,19 +484,19 @@ mod tests {
});
mock_region_server.register_test_region(region_id, mock_engine);
let handler_context = HandlerContext::new_for_test(mock_region_server);
- let reply = handler_context
- .clone()
- .handle_downgrade_region_instruction(DowngradeRegion {
- region_id,
- flush_timeout: None,
- })
+ let reply = DowngradeRegionsHandler
+ .handle(
+ &handler_context,
+ Instruction::DowngradeRegions(vec![DowngradeRegion {
+ region_id,
+ flush_timeout: None,
+ }]),
+ )
.await;
- assert_matches!(reply, Some(InstructionReply::DowngradeRegion(_)));
- if let InstructionReply::DowngradeRegion(reply) = reply.unwrap() {
- assert!(!reply.exists);
- assert!(reply.error.is_none());
- assert!(reply.last_entry_id.is_none());
- }
+ let reply = &reply.unwrap().expect_downgrade_regions_reply()[0];
+ assert!(!reply.exists);
+ assert!(reply.error.is_none());
+ assert!(reply.last_entry_id.is_none());
}
#[tokio::test]
@@ -480,23 +515,77 @@ mod tests {
});
mock_region_server.register_test_region(region_id, mock_engine);
let handler_context = HandlerContext::new_for_test(mock_region_server);
- let reply = handler_context
- .clone()
- .handle_downgrade_region_instruction(DowngradeRegion {
- region_id,
- flush_timeout: None,
- })
+ let reply = DowngradeRegionsHandler
+ .handle(
+ &handler_context,
+ Instruction::DowngradeRegions(vec![DowngradeRegion {
+ region_id,
+ flush_timeout: None,
+ }]),
+ )
.await;
- assert_matches!(reply, Some(InstructionReply::DowngradeRegion(_)));
- if let InstructionReply::DowngradeRegion(reply) = reply.unwrap() {
- assert!(reply.exists);
- assert!(
- reply
- .error
- .unwrap()
- .contains("Failed to set region to readonly")
- );
- assert!(reply.last_entry_id.is_none());
- }
+ let reply = &reply.unwrap().expect_downgrade_regions_reply()[0];
+ assert!(reply.exists);
+ assert!(
+ reply
+ .error
+ .as_ref()
+ .unwrap()
+ .contains("Failed to set region to readonly")
+ );
+ assert!(reply.last_entry_id.is_none());
+ }
+
+ #[tokio::test]
+ async fn test_downgrade_regions() {
+ common_telemetry::init_default_ut_logging();
+
+ let mut region_server = mock_region_server();
+ let heartbeat_handler = RegionHeartbeatResponseHandler::new(region_server.clone());
+ let mut engine_env = TestEnv::with_prefix("downgrade-regions").await;
+ let engine = engine_env.create_engine(MitoConfig::default()).await;
+ region_server.register_engine(Arc::new(engine.clone()));
+ let region_id = RegionId::new(1024, 1);
+ let region_id1 = RegionId::new(1024, 2);
+ let builder = CreateRequestBuilder::new();
+ let create_req = builder.build();
+ region_server
+ .handle_request(region_id, RegionRequest::Create(create_req))
+ .await
+ .unwrap();
+ let create_req1 = builder.build();
+ region_server
+ .handle_request(region_id1, RegionRequest::Create(create_req1))
+ .await
+ .unwrap();
+ let meta = MessageMeta::new_test(1, "test", "dn-1", "meta-0");
+ let instruction = Instruction::DowngradeRegions(vec![
+ DowngradeRegion {
+ region_id,
+ flush_timeout: Some(Duration::from_secs(1)),
+ },
+ DowngradeRegion {
+ region_id: region_id1,
+ flush_timeout: Some(Duration::from_secs(1)),
+ },
+ ]);
+ let mut heartbeat_env = HeartbeatResponseTestEnv::new();
+ let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
+ let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
+ assert_matches!(control, HandleControl::Continue);
+
+ let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
+ let reply = reply.expect_downgrade_regions_reply();
+ assert_eq!(reply[0].region_id, region_id);
+ assert!(reply[0].exists);
+ assert!(reply[0].error.is_none());
+ assert_eq!(reply[0].last_entry_id, Some(0));
+ assert_eq!(reply[1].region_id, region_id1);
+ assert!(reply[1].exists);
+ assert!(reply[1].error.is_none());
+ assert_eq!(reply[1].last_entry_id, Some(0));
+
+ assert_eq!(engine.role(region_id).unwrap(), RegionRole::Follower);
+ assert_eq!(engine.role(region_id1).unwrap(), RegionRole::Follower);
}
}
diff --git a/src/datanode/src/heartbeat/handler/flush_region.rs b/src/datanode/src/heartbeat/handler/flush_region.rs
index 963d3bf488..56b841bf00 100644
--- a/src/datanode/src/heartbeat/handler/flush_region.rs
+++ b/src/datanode/src/heartbeat/handler/flush_region.rs
@@ -15,19 +15,53 @@
use std::time::Instant;
use common_meta::instruction::{
- FlushErrorStrategy, FlushRegionReply, FlushRegions, FlushStrategy, InstructionReply,
+ FlushErrorStrategy, FlushRegionReply, FlushStrategy, Instruction, InstructionReply,
};
use common_telemetry::{debug, warn};
-use futures_util::future::BoxFuture;
use store_api::region_request::{RegionFlushRequest, RegionRequest};
use store_api::storage::RegionId;
-use crate::error::{self, RegionNotFoundSnafu, RegionNotReadySnafu, UnexpectedSnafu};
-use crate::heartbeat::handler::HandlerContext;
+use crate::error::{self, RegionNotFoundSnafu, RegionNotReadySnafu, Result, UnexpectedSnafu};
+use crate::heartbeat::handler::{HandlerContext, InstructionHandler};
+
+pub struct FlushRegionsHandler;
+
+#[async_trait::async_trait]
+impl InstructionHandler for FlushRegionsHandler {
+ async fn handle(
+ &self,
+ ctx: &HandlerContext,
+ instruction: Instruction,
+ ) -> Option {
+ let start_time = Instant::now();
+ let flush_regions = instruction.into_flush_regions().unwrap();
+ let strategy = flush_regions.strategy;
+ let region_ids = flush_regions.region_ids;
+ let error_strategy = flush_regions.error_strategy;
+
+ let reply = if matches!(strategy, FlushStrategy::Async) {
+ // Asynchronous hint mode: fire-and-forget, no reply expected
+ ctx.handle_flush_hint(region_ids).await;
+ None
+ } else {
+ // Synchronous mode: return reply with results
+ let reply = ctx.handle_flush_sync(region_ids, error_strategy).await;
+ Some(InstructionReply::FlushRegions(reply))
+ };
+
+ let elapsed = start_time.elapsed();
+ debug!(
+ "FlushRegions strategy: {:?}, elapsed: {:?}, reply: {:?}",
+ strategy, elapsed, reply
+ );
+
+ reply
+ }
+}
impl HandlerContext {
/// Performs the actual region flush operation.
- async fn perform_region_flush(&self, region_id: RegionId) -> Result<(), error::Error> {
+ async fn perform_region_flush(&self, region_id: RegionId) -> Result<()> {
let request = RegionRequest::Flush(RegionFlushRequest {
row_group_size: None,
});
@@ -92,7 +126,7 @@ impl HandlerContext {
}
/// Flushes a single region synchronously with proper error handling.
- async fn flush_single_region_sync(&self, region_id: RegionId) -> Result<(), error::Error> {
+ async fn flush_single_region_sync(&self, region_id: RegionId) -> Result<()> {
// Check if region is leader and writable
let Some(writable) = self.region_server.is_region_leader(region_id) else {
return Err(RegionNotFoundSnafu { region_id }.build());
@@ -135,37 +169,6 @@ impl HandlerContext {
.build()),
}
}
-
- /// Unified handler for FlushRegions with all flush semantics.
- pub(crate) fn handle_flush_regions_instruction(
- self,
- flush_regions: FlushRegions,
- ) -> BoxFuture<'static, Option> {
- Box::pin(async move {
- let start_time = Instant::now();
- let strategy = flush_regions.strategy;
- let region_ids = flush_regions.region_ids;
- let error_strategy = flush_regions.error_strategy;
-
- let reply = if matches!(strategy, FlushStrategy::Async) {
- // Asynchronous hint mode: fire-and-forget, no reply expected
- self.handle_flush_hint(region_ids).await;
- None
- } else {
- // Synchronous mode: return reply with results
- let reply = self.handle_flush_sync(region_ids, error_strategy).await;
- Some(InstructionReply::FlushRegions(reply))
- };
-
- let elapsed = start_time.elapsed();
- debug!(
- "FlushRegions strategy: {:?}, elapsed: {:?}, reply: {:?}",
- strategy, elapsed, reply
- );
-
- reply
- })
- }
}
#[cfg(test)]
@@ -201,9 +204,11 @@ mod tests {
// Async hint mode
let flush_instruction = FlushRegions::async_batch(region_ids.clone());
- let reply = handler_context
- .clone()
- .handle_flush_regions_instruction(flush_instruction)
+ let reply = FlushRegionsHandler
+ .handle(
+ &handler_context,
+ Instruction::FlushRegions(flush_instruction),
+ )
.await;
assert!(reply.is_none()); // Hint mode returns no reply
assert_eq!(*flushed_region_ids.read().unwrap(), region_ids);
@@ -212,8 +217,11 @@ mod tests {
flushed_region_ids.write().unwrap().clear();
let not_found_region_ids = (0..2).map(|i| RegionId::new(2048, i)).collect::>();
let flush_instruction = FlushRegions::async_batch(not_found_region_ids);
- let reply = handler_context
- .handle_flush_regions_instruction(flush_instruction)
+ let reply = FlushRegionsHandler
+ .handle(
+ &handler_context,
+ Instruction::FlushRegions(flush_instruction),
+ )
.await;
assert!(reply.is_none());
assert!(flushed_region_ids.read().unwrap().is_empty());
@@ -238,20 +246,17 @@ mod tests {
let handler_context = HandlerContext::new_for_test(mock_region_server);
let flush_instruction = FlushRegions::sync_single(region_id);
- let reply = handler_context
- .handle_flush_regions_instruction(flush_instruction)
+ let reply = FlushRegionsHandler
+ .handle(
+ &handler_context,
+ Instruction::FlushRegions(flush_instruction),
+ )
.await;
-
- assert!(reply.is_some());
- if let Some(InstructionReply::FlushRegions(flush_reply)) = reply {
- assert!(flush_reply.overall_success);
- assert_eq!(flush_reply.results.len(), 1);
- assert_eq!(flush_reply.results[0].0, region_id);
- assert!(flush_reply.results[0].1.is_ok());
- } else {
- panic!("Expected FlushRegions reply");
- }
-
+ let flush_reply = reply.unwrap().expect_flush_regions_reply();
+ assert!(flush_reply.overall_success);
+ assert_eq!(flush_reply.results.len(), 1);
+ assert_eq!(flush_reply.results[0].0, region_id);
+ assert!(flush_reply.results[0].1.is_ok());
assert_eq!(*flushed_region_ids.read().unwrap(), vec![region_id]);
}
@@ -281,18 +286,16 @@ mod tests {
// Sync batch with fail-fast strategy
let flush_instruction =
FlushRegions::sync_batch(region_ids.clone(), FlushErrorStrategy::FailFast);
- let reply = handler_context
- .handle_flush_regions_instruction(flush_instruction)
+ let reply = FlushRegionsHandler
+ .handle(
+ &handler_context,
+ Instruction::FlushRegions(flush_instruction),
+ )
.await;
-
- assert!(reply.is_some());
- if let Some(InstructionReply::FlushRegions(flush_reply)) = reply {
- assert!(!flush_reply.overall_success); // Should fail due to non-existent regions
- // With fail-fast, only process regions until first failure
- assert!(flush_reply.results.len() <= region_ids.len());
- } else {
- panic!("Expected FlushRegions reply");
- }
+ let flush_reply = reply.unwrap().expect_flush_regions_reply();
+ assert!(!flush_reply.overall_success); // Should fail due to non-existent regions
+ // With fail-fast, only process regions until first failure
+ assert!(flush_reply.results.len() <= region_ids.len());
}
#[tokio::test]
@@ -317,20 +320,18 @@ mod tests {
// Sync batch with try-all strategy
let flush_instruction =
FlushRegions::sync_batch(region_ids.clone(), FlushErrorStrategy::TryAll);
- let reply = handler_context
- .handle_flush_regions_instruction(flush_instruction)
+ let reply = FlushRegionsHandler
+ .handle(
+ &handler_context,
+ Instruction::FlushRegions(flush_instruction),
+ )
.await;
-
- assert!(reply.is_some());
- if let Some(InstructionReply::FlushRegions(flush_reply)) = reply {
- assert!(!flush_reply.overall_success); // Should fail due to one non-existent region
- // With try-all, should process all regions
- assert_eq!(flush_reply.results.len(), region_ids.len());
- // First should succeed, second should fail
- assert!(flush_reply.results[0].1.is_ok());
- assert!(flush_reply.results[1].1.is_err());
- } else {
- panic!("Expected FlushRegions reply");
- }
+ let flush_reply = reply.unwrap().expect_flush_regions_reply();
+ assert!(!flush_reply.overall_success); // Should fail due to one non-existent region
+ // With try-all, should process all regions
+ assert_eq!(flush_reply.results.len(), region_ids.len());
+ // First should succeed, second should fail
+ assert!(flush_reply.results[0].1.is_ok());
+ assert!(flush_reply.results[1].1.is_err());
}
}
diff --git a/src/datanode/src/heartbeat/handler/open_region.rs b/src/datanode/src/heartbeat/handler/open_region.rs
index e6ea973eec..77cd4fe6a0 100644
--- a/src/datanode/src/heartbeat/handler/open_region.rs
+++ b/src/datanode/src/heartbeat/handler/open_region.rs
@@ -12,56 +12,62 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use common_meta::instruction::{InstructionReply, OpenRegion, SimpleReply};
+use common_meta::instruction::{Instruction, InstructionReply, OpenRegion, SimpleReply};
use common_meta::wal_options_allocator::prepare_wal_options;
-use futures_util::future::BoxFuture;
use store_api::path_utils::table_dir;
use store_api::region_request::{PathType, RegionOpenRequest};
+use store_api::storage::RegionId;
-use crate::heartbeat::handler::HandlerContext;
+use crate::heartbeat::handler::{HandlerContext, InstructionHandler};
-impl HandlerContext {
- pub(crate) fn handle_open_regions_instruction(
- self,
- open_regions: Vec,
- open_region_parallelism: usize,
- ) -> BoxFuture<'static, Option> {
- Box::pin(async move {
- let requests = open_regions
- .into_iter()
- .map(|open_region| {
- let OpenRegion {
- region_ident,
- region_storage_path,
- mut region_options,
- region_wal_options,
- skip_wal_replay,
- } = open_region;
- let region_id = Self::region_ident_to_region_id(®ion_ident);
- prepare_wal_options(&mut region_options, region_id, ®ion_wal_options);
- let request = RegionOpenRequest {
- engine: region_ident.engine,
- table_dir: table_dir(®ion_storage_path, region_id.table_id()),
- path_type: PathType::Bare,
- options: region_options,
- skip_wal_replay,
- checkpoint: None,
- };
- (region_id, request)
- })
- .collect::>();
+pub struct OpenRegionsHandler {
+ pub open_region_parallelism: usize,
+}
- let result = self
- .region_server
- .handle_batch_open_requests(open_region_parallelism, requests, false)
- .await;
- let success = result.is_ok();
- let error = result.as_ref().map_err(|e| format!("{e:?}")).err();
- Some(InstructionReply::OpenRegions(SimpleReply {
- result: success,
- error,
- }))
- })
+#[async_trait::async_trait]
+impl InstructionHandler for OpenRegionsHandler {
+ async fn handle(
+ &self,
+ ctx: &HandlerContext,
+ instruction: Instruction,
+ ) -> Option {
+ let open_regions = instruction.into_open_regions().unwrap();
+
+ let requests = open_regions
+ .into_iter()
+ .map(|open_region| {
+ let OpenRegion {
+ region_ident,
+ region_storage_path,
+ mut region_options,
+ region_wal_options,
+ skip_wal_replay,
+ } = open_region;
+ let region_id = RegionId::new(region_ident.table_id, region_ident.region_number);
+ prepare_wal_options(&mut region_options, region_id, ®ion_wal_options);
+ let request = RegionOpenRequest {
+ engine: region_ident.engine,
+ table_dir: table_dir(®ion_storage_path, region_id.table_id()),
+ path_type: PathType::Bare,
+ options: region_options,
+ skip_wal_replay,
+ checkpoint: None,
+ };
+ (region_id, request)
+ })
+ .collect::>();
+
+ let result = ctx
+ .region_server
+ .handle_batch_open_requests(self.open_region_parallelism, requests, false)
+ .await;
+ let success = result.is_ok();
+ let error = result.as_ref().map_err(|e| format!("{e:?}")).err();
+
+ Some(InstructionReply::OpenRegions(SimpleReply {
+ result: success,
+ error,
+ }))
}
}
diff --git a/src/datanode/src/heartbeat/handler/upgrade_region.rs b/src/datanode/src/heartbeat/handler/upgrade_region.rs
index c1f238e059..239eaf1e4c 100644
--- a/src/datanode/src/heartbeat/handler/upgrade_region.rs
+++ b/src/datanode/src/heartbeat/handler/upgrade_region.rs
@@ -12,18 +12,24 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use common_meta::instruction::{InstructionReply, UpgradeRegion, UpgradeRegionReply};
+use common_meta::instruction::{Instruction, InstructionReply, UpgradeRegion, UpgradeRegionReply};
use common_telemetry::{info, warn};
-use futures_util::future::BoxFuture;
use store_api::region_request::{RegionCatchupRequest, RegionRequest, ReplayCheckpoint};
-use crate::heartbeat::handler::HandlerContext;
+use crate::heartbeat::handler::{HandlerContext, InstructionHandler};
use crate::heartbeat::task_tracker::WaitResult;
-impl HandlerContext {
- pub(crate) fn handle_upgrade_region_instruction(
- self,
- UpgradeRegion {
+#[derive(Debug, Clone, Copy, Default)]
+pub struct UpgradeRegionsHandler;
+
+#[async_trait::async_trait]
+impl InstructionHandler for UpgradeRegionsHandler {
+ async fn handle(
+ &self,
+ ctx: &HandlerContext,
+ instruction: Instruction,
+ ) -> Option {
+ let UpgradeRegion {
region_id,
last_entry_id,
metadata_last_entry_id,
@@ -31,116 +37,116 @@ impl HandlerContext {
location_id,
replay_entry_id,
metadata_replay_entry_id,
- }: UpgradeRegion,
- ) -> BoxFuture<'static, Option> {
- Box::pin(async move {
- let Some(writable) = self.region_server.is_region_leader(region_id) else {
- return Some(InstructionReply::UpgradeRegion(UpgradeRegionReply {
- ready: false,
- exists: false,
- error: None,
- }));
- };
+ } = instruction.into_upgrade_regions().unwrap();
- if writable {
- return Some(InstructionReply::UpgradeRegion(UpgradeRegionReply {
+ let Some(writable) = ctx.region_server.is_region_leader(region_id) else {
+ return Some(InstructionReply::UpgradeRegion(UpgradeRegionReply {
+ ready: false,
+ exists: false,
+ error: None,
+ }));
+ };
+
+ if writable {
+ return Some(InstructionReply::UpgradeRegion(UpgradeRegionReply {
+ ready: true,
+ exists: true,
+ error: None,
+ }));
+ }
+
+ let region_server_moved = ctx.region_server.clone();
+
+ let checkpoint = match (replay_entry_id, metadata_replay_entry_id) {
+ (Some(entry_id), metadata_entry_id) => Some(ReplayCheckpoint {
+ entry_id,
+ metadata_entry_id,
+ }),
+ _ => None,
+ };
+
+ // The catchup task is almost zero cost if the inside region is writable.
+ // Therefore, it always registers a new catchup task.
+ let register_result = ctx
+ .catchup_tasks
+ .try_register(
+ region_id,
+ Box::pin(async move {
+ info!(
+ "Executing region: {region_id} catchup to: last entry id {last_entry_id:?}"
+ );
+ region_server_moved
+ .handle_request(
+ region_id,
+ RegionRequest::Catchup(RegionCatchupRequest {
+ set_writable: true,
+ entry_id: last_entry_id,
+ metadata_entry_id: metadata_last_entry_id,
+ location_id,
+ checkpoint,
+ }),
+ )
+ .await?;
+
+ Ok(())
+ }),
+ )
+ .await;
+
+ if register_result.is_busy() {
+ warn!("Another catchup task is running for the region: {region_id}");
+ }
+
+ // Returns immediately
+ let Some(replay_timeout) = replay_timeout else {
+ return Some(InstructionReply::UpgradeRegion(UpgradeRegionReply {
+ ready: false,
+ exists: true,
+ error: None,
+ }));
+ };
+
+ // We don't care that it returns a newly registered or running task.
+ let mut watcher = register_result.into_watcher();
+ let result = ctx.catchup_tasks.wait(&mut watcher, replay_timeout).await;
+
+ match result {
+ WaitResult::Timeout => Some(InstructionReply::UpgradeRegion(UpgradeRegionReply {
+ ready: false,
+ exists: true,
+ error: None,
+ })),
+ WaitResult::Finish(Ok(_)) => {
+ Some(InstructionReply::UpgradeRegion(UpgradeRegionReply {
ready: true,
exists: true,
error: None,
- }));
+ }))
}
-
- let region_server_moved = self.region_server.clone();
-
- let checkpoint = match (replay_entry_id, metadata_replay_entry_id) {
- (Some(entry_id), metadata_entry_id) => Some(ReplayCheckpoint {
- entry_id,
- metadata_entry_id,
- }),
- _ => None,
- };
-
- // The catchup task is almost zero cost if the inside region is writable.
- // Therefore, it always registers a new catchup task.
- let register_result = self
- .catchup_tasks
- .try_register(
- region_id,
- Box::pin(async move {
- info!("Executing region: {region_id} catchup to: last entry id {last_entry_id:?}");
- region_server_moved
- .handle_request(
- region_id,
- RegionRequest::Catchup(RegionCatchupRequest {
- set_writable: true,
- entry_id: last_entry_id,
- metadata_entry_id: metadata_last_entry_id,
- location_id,
- checkpoint,
- }),
- )
- .await?;
-
- Ok(())
- }),
- )
- .await;
-
- if register_result.is_busy() {
- warn!("Another catchup task is running for the region: {region_id}");
- }
-
- // Returns immediately
- let Some(replay_timeout) = replay_timeout else {
- return Some(InstructionReply::UpgradeRegion(UpgradeRegionReply {
+ WaitResult::Finish(Err(err)) => {
+ Some(InstructionReply::UpgradeRegion(UpgradeRegionReply {
ready: false,
exists: true,
- error: None,
- }));
- };
-
- // We don't care that it returns a newly registered or running task.
- let mut watcher = register_result.into_watcher();
- let result = self.catchup_tasks.wait(&mut watcher, replay_timeout).await;
-
- match result {
- WaitResult::Timeout => Some(InstructionReply::UpgradeRegion(UpgradeRegionReply {
- ready: false,
- exists: true,
- error: None,
- })),
- WaitResult::Finish(Ok(_)) => {
- Some(InstructionReply::UpgradeRegion(UpgradeRegionReply {
- ready: true,
- exists: true,
- error: None,
- }))
- }
- WaitResult::Finish(Err(err)) => {
- Some(InstructionReply::UpgradeRegion(UpgradeRegionReply {
- ready: false,
- exists: true,
- error: Some(format!("{err:?}")),
- }))
- }
+ error: Some(format!("{err:?}")),
+ }))
}
- })
+ }
}
}
#[cfg(test)]
mod tests {
- use std::assert_matches::assert_matches;
use std::time::Duration;
- use common_meta::instruction::{InstructionReply, UpgradeRegion};
+ use common_meta::instruction::{Instruction, UpgradeRegion};
use mito2::engine::MITO_ENGINE_NAME;
use store_api::region_engine::RegionRole;
use store_api::storage::RegionId;
use tokio::time::Instant;
use crate::error;
- use crate::heartbeat::handler::HandlerContext;
+ use crate::heartbeat::handler::upgrade_region::UpgradeRegionsHandler;
+ use crate::heartbeat::handler::{HandlerContext, InstructionHandler};
use crate::tests::{MockRegionEngine, mock_region_server};
#[tokio::test]
@@ -155,20 +161,20 @@ mod tests {
let waits = vec![None, Some(Duration::from_millis(100u64))];
for replay_timeout in waits {
- let reply = handler_context
- .clone()
- .handle_upgrade_region_instruction(UpgradeRegion {
- region_id,
- replay_timeout,
- ..Default::default()
- })
+ let reply = UpgradeRegionsHandler
+ .handle(
+ &handler_context,
+ Instruction::UpgradeRegion(UpgradeRegion {
+ region_id,
+ replay_timeout,
+ ..Default::default()
+ }),
+ )
.await;
- assert_matches!(reply, Some(InstructionReply::UpgradeRegion(_)));
- if let InstructionReply::UpgradeRegion(reply) = reply.unwrap() {
- assert!(!reply.exists);
- assert!(reply.error.is_none());
- }
+ let reply = reply.unwrap().expect_upgrade_region_reply();
+ assert!(!reply.exists);
+ assert!(reply.error.is_none());
}
}
@@ -192,21 +198,21 @@ mod tests {
let waits = vec![None, Some(Duration::from_millis(100u64))];
for replay_timeout in waits {
- let reply = handler_context
- .clone()
- .handle_upgrade_region_instruction(UpgradeRegion {
- region_id,
- replay_timeout,
- ..Default::default()
- })
+ let reply = UpgradeRegionsHandler
+ .handle(
+ &handler_context,
+ Instruction::UpgradeRegion(UpgradeRegion {
+ region_id,
+ replay_timeout,
+ ..Default::default()
+ }),
+ )
.await;
- assert_matches!(reply, Some(InstructionReply::UpgradeRegion(_)));
- if let InstructionReply::UpgradeRegion(reply) = reply.unwrap() {
- assert!(reply.ready);
- assert!(reply.exists);
- assert!(reply.error.is_none());
- }
+ let reply = reply.unwrap().expect_upgrade_region_reply();
+ assert!(reply.ready);
+ assert!(reply.exists);
+ assert!(reply.error.is_none());
}
}
@@ -230,21 +236,21 @@ mod tests {
let waits = vec![None, Some(Duration::from_millis(100u64))];
for replay_timeout in waits {
- let reply = handler_context
- .clone()
- .handle_upgrade_region_instruction(UpgradeRegion {
- region_id,
- replay_timeout,
- ..Default::default()
- })
+ let reply = UpgradeRegionsHandler
+ .handle(
+ &handler_context,
+ Instruction::UpgradeRegion(UpgradeRegion {
+ region_id,
+ replay_timeout,
+ ..Default::default()
+ }),
+ )
.await;
- assert_matches!(reply, Some(InstructionReply::UpgradeRegion(_)));
- if let InstructionReply::UpgradeRegion(reply) = reply.unwrap() {
- assert!(!reply.ready);
- assert!(reply.exists);
- assert!(reply.error.is_none());
- }
+ let reply = reply.unwrap().expect_upgrade_region_reply();
+ assert!(!reply.ready);
+ assert!(reply.exists);
+ assert!(reply.error.is_none());
}
}
@@ -271,40 +277,41 @@ mod tests {
let handler_context = HandlerContext::new_for_test(mock_region_server);
for replay_timeout in waits {
- let reply = handler_context
- .clone()
- .handle_upgrade_region_instruction(UpgradeRegion {
- region_id,
- replay_timeout,
- ..Default::default()
- })
+ let reply = UpgradeRegionsHandler
+ .handle(
+ &handler_context,
+ Instruction::UpgradeRegion(UpgradeRegion {
+ region_id,
+ replay_timeout,
+ ..Default::default()
+ }),
+ )
.await;
- assert_matches!(reply, Some(InstructionReply::UpgradeRegion(_)));
- if let InstructionReply::UpgradeRegion(reply) = reply.unwrap() {
- assert!(!reply.ready);
- assert!(reply.exists);
- assert!(reply.error.is_none());
- }
- }
-
- let timer = Instant::now();
- let reply = handler_context
- .handle_upgrade_region_instruction(UpgradeRegion {
- region_id,
- replay_timeout: Some(Duration::from_millis(500)),
- ..Default::default()
- })
- .await;
- assert_matches!(reply, Some(InstructionReply::UpgradeRegion(_)));
- // Must less than 300 ms.
- assert!(timer.elapsed().as_millis() < 300);
-
- if let InstructionReply::UpgradeRegion(reply) = reply.unwrap() {
- assert!(reply.ready);
+ let reply = reply.unwrap().expect_upgrade_region_reply();
+ assert!(!reply.ready);
assert!(reply.exists);
assert!(reply.error.is_none());
}
+
+ let timer = Instant::now();
+ let reply = UpgradeRegionsHandler
+ .handle(
+ &handler_context,
+ Instruction::UpgradeRegion(UpgradeRegion {
+ region_id,
+ replay_timeout: Some(Duration::from_millis(500)),
+ ..Default::default()
+ }),
+ )
+ .await;
+ // Must less than 300 ms.
+ assert!(timer.elapsed().as_millis() < 300);
+
+ let reply = reply.unwrap().expect_upgrade_region_reply();
+ assert!(reply.ready);
+ assert!(reply.exists);
+ assert!(reply.error.is_none());
}
#[tokio::test]
@@ -329,37 +336,37 @@ mod tests {
let handler_context = HandlerContext::new_for_test(mock_region_server);
- let reply = handler_context
- .clone()
- .handle_upgrade_region_instruction(UpgradeRegion {
- region_id,
- ..Default::default()
- })
+ let reply = UpgradeRegionsHandler
+ .handle(
+ &handler_context,
+ Instruction::UpgradeRegion(UpgradeRegion {
+ region_id,
+ ..Default::default()
+ }),
+ )
.await;
- assert_matches!(reply, Some(InstructionReply::UpgradeRegion(_)));
// It didn't wait for handle returns; it had no idea about the error.
- if let InstructionReply::UpgradeRegion(reply) = reply.unwrap() {
- assert!(!reply.ready);
- assert!(reply.exists);
- assert!(reply.error.is_none());
- }
+ let reply = reply.unwrap().expect_upgrade_region_reply();
+ assert!(!reply.ready);
+ assert!(reply.exists);
+ assert!(reply.error.is_none());
- let reply = handler_context
- .clone()
- .handle_upgrade_region_instruction(UpgradeRegion {
- region_id,
- replay_timeout: Some(Duration::from_millis(200)),
- ..Default::default()
- })
+ let reply = UpgradeRegionsHandler
+ .handle(
+ &handler_context,
+ Instruction::UpgradeRegion(UpgradeRegion {
+ region_id,
+ replay_timeout: Some(Duration::from_millis(200)),
+ ..Default::default()
+ }),
+ )
.await;
- assert_matches!(reply, Some(InstructionReply::UpgradeRegion(_)));
- if let InstructionReply::UpgradeRegion(reply) = reply.unwrap() {
- assert!(!reply.ready);
- assert!(reply.exists);
- assert!(reply.error.is_some());
- assert!(reply.error.unwrap().contains("mock_error"));
- }
+ let reply = reply.unwrap().expect_upgrade_region_reply();
+ assert!(!reply.ready);
+ assert!(reply.exists);
+ assert!(reply.error.is_some());
+ assert!(reply.error.unwrap().contains("mock_error"));
}
}
diff --git a/src/datatypes/src/json.rs b/src/datatypes/src/json.rs
index 380cc8ce06..902b84a131 100644
--- a/src/datatypes/src/json.rs
+++ b/src/datatypes/src/json.rs
@@ -24,6 +24,7 @@ use std::sync::Arc;
use common_base::bytes::StringBytes;
use ordered_float::OrderedFloat;
+use serde::{Deserialize, Serialize};
use serde_json::{Map, Value as Json};
use snafu::{ResultExt, ensure};
@@ -45,7 +46,7 @@ use crate::value::{ListValue, StructValue, Value};
/// convert them to fully structured StructValue for user-facing APIs: the UI protocol and the UDF interface.
///
/// **Important**: This settings only controls the internal form of JSON encoding.
-#[derive(Debug, Clone)]
+#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum JsonStructureSettings {
// TODO(sunng87): provide a limit
Structured(Option),
@@ -111,6 +112,12 @@ impl JsonStructureSettings {
}
}
+impl Default for JsonStructureSettings {
+ fn default() -> Self {
+ Self::Structured(None)
+ }
+}
+
impl<'a> JsonContext<'a> {
/// Create a new context with an updated key path
pub fn with_key(&self, key: &str) -> JsonContext<'a> {
diff --git a/src/datatypes/src/schema.rs b/src/datatypes/src/schema.rs
index 8a79b3c02e..9995072b7c 100644
--- a/src/datatypes/src/schema.rs
+++ b/src/datatypes/src/schema.rs
@@ -32,8 +32,9 @@ pub use crate::schema::column_schema::{
COLUMN_FULLTEXT_OPT_KEY_FALSE_POSITIVE_RATE, COLUMN_FULLTEXT_OPT_KEY_GRANULARITY,
COLUMN_SKIPPING_INDEX_OPT_KEY_FALSE_POSITIVE_RATE, COLUMN_SKIPPING_INDEX_OPT_KEY_GRANULARITY,
COLUMN_SKIPPING_INDEX_OPT_KEY_TYPE, COMMENT_KEY, ColumnExtType, ColumnSchema, FULLTEXT_KEY,
- FulltextAnalyzer, FulltextBackend, FulltextOptions, INVERTED_INDEX_KEY, Metadata,
- SKIPPING_INDEX_KEY, SkippingIndexOptions, SkippingIndexType, TIME_INDEX_KEY,
+ FulltextAnalyzer, FulltextBackend, FulltextOptions, INVERTED_INDEX_KEY,
+ JSON_STRUCTURE_SETTINGS_KEY, Metadata, SKIPPING_INDEX_KEY, SkippingIndexOptions,
+ SkippingIndexType, TIME_INDEX_KEY,
};
pub use crate::schema::constraint::ColumnDefaultConstraint;
pub use crate::schema::raw::RawSchema;
@@ -368,8 +369,7 @@ impl TryFrom for Schema {
type Error = Error;
fn try_from(value: DFSchemaRef) -> Result {
- let s: ArrowSchema = value.as_ref().into();
- s.try_into()
+ value.inner().clone().try_into()
}
}
diff --git a/src/datatypes/src/schema/column_schema.rs b/src/datatypes/src/schema/column_schema.rs
index f176350b8c..627d898810 100644
--- a/src/datatypes/src/schema/column_schema.rs
+++ b/src/datatypes/src/schema/column_schema.rs
@@ -23,6 +23,7 @@ use sqlparser_derive::{Visit, VisitMut};
use crate::data_type::{ConcreteDataType, DataType};
use crate::error::{self, Error, InvalidFulltextOptionSnafu, ParseExtendedTypeSnafu, Result};
+use crate::json::JsonStructureSettings;
use crate::schema::TYPE_KEY;
use crate::schema::constraint::ColumnDefaultConstraint;
use crate::value::Value;
@@ -41,6 +42,7 @@ pub const FULLTEXT_KEY: &str = "greptime:fulltext";
pub const INVERTED_INDEX_KEY: &str = "greptime:inverted_index";
/// Key used to store skip options in arrow field's metadata.
pub const SKIPPING_INDEX_KEY: &str = "greptime:skipping_index";
+pub const JSON_STRUCTURE_SETTINGS_KEY: &str = "greptime:json:structure_settings";
/// Keys used in fulltext options
pub const COLUMN_FULLTEXT_CHANGE_OPT_KEY_ENABLE: &str = "enable";
@@ -391,6 +393,21 @@ impl ColumnSchema {
self.metadata.remove(SKIPPING_INDEX_KEY);
Ok(())
}
+
+ pub fn json_structure_settings(&self) -> Result