mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-22 22:20:02 +00:00
Compare commits
38 Commits
v0.17.0-ni
...
v0.16.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
83f566ad20 | ||
|
|
ed2dff6d27 | ||
|
|
af483335b2 | ||
|
|
70852a01a3 | ||
|
|
8f3c6f72f5 | ||
|
|
d3f15e72bf | ||
|
|
b25a6527ed | ||
|
|
7b48e53261 | ||
|
|
ebe78f668e | ||
|
|
8456949749 | ||
|
|
f21bedd141 | ||
|
|
469c3140fe | ||
|
|
569d93c599 | ||
|
|
b91b520f54 | ||
|
|
7a12585af9 | ||
|
|
89b661c98a | ||
|
|
e0b1ebdfb6 | ||
|
|
eaaf9448c7 | ||
|
|
dc37382946 | ||
|
|
b31d307eb6 | ||
|
|
d9f177ba9f | ||
|
|
b940906d86 | ||
|
|
9ccc8da231 | ||
|
|
0d603bfc96 | ||
|
|
62eedbb6cd | ||
|
|
dec6da0e8a | ||
|
|
a84cf5ec67 | ||
|
|
3b7652039f | ||
|
|
9164e8f50d | ||
|
|
3854e2edb4 | ||
|
|
f3dd7cccd3 | ||
|
|
b2112c3f5c | ||
|
|
e98f2facd4 | ||
|
|
98ef92bb0a | ||
|
|
1352a5b637 | ||
|
|
42ed5042b4 | ||
|
|
20af73ec44 | ||
|
|
f3221a3b18 |
421
Cargo.lock
generated
421
Cargo.lock
generated
@@ -17,6 +17,12 @@ version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627"
|
||||
|
||||
[[package]]
|
||||
name = "adler32"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234"
|
||||
|
||||
[[package]]
|
||||
name = "aead"
|
||||
version = "0.5.2"
|
||||
@@ -1425,6 +1431,31 @@ dependencies = [
|
||||
"piper",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bon"
|
||||
version = "3.6.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "33d9ef19ae5263a138da9a86871eca537478ab0332a7770bac7e3f08b801f89f"
|
||||
dependencies = [
|
||||
"bon-macros",
|
||||
"rustversion",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bon-macros"
|
||||
version = "3.6.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "577ae008f2ca11ca7641bd44601002ee5ab49ef0af64846ce1ab6057218a5cc1"
|
||||
dependencies = [
|
||||
"darling 0.21.0",
|
||||
"ident_case",
|
||||
"prettyplease",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"rustversion",
|
||||
"syn 2.0.100",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "borsh"
|
||||
version = "1.5.1"
|
||||
@@ -1659,6 +1690,8 @@ dependencies = [
|
||||
"partition",
|
||||
"paste",
|
||||
"prometheus",
|
||||
"promql-parser",
|
||||
"rand 0.9.0",
|
||||
"rustc-hash 2.0.0",
|
||||
"serde_json",
|
||||
"session",
|
||||
@@ -1828,7 +1861,7 @@ checksum = "9c6ac4f2c0bf0f44e9161aec9675e1050aa4a530663c4a9e37e108fa948bca9f"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"chrono-tz-build",
|
||||
"phf",
|
||||
"phf 0.11.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1838,7 +1871,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e94fea34d77a245229e7746bd2beb786cd2a896f306ff491fb8cecb3074b10a7"
|
||||
dependencies = [
|
||||
"parse-zoneinfo",
|
||||
"phf_codegen",
|
||||
"phf_codegen 0.11.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2100,6 +2133,7 @@ dependencies = [
|
||||
"common-error",
|
||||
"common-grpc",
|
||||
"common-macro",
|
||||
"common-mem-prof",
|
||||
"common-meta",
|
||||
"common-options",
|
||||
"common-procedure",
|
||||
@@ -2309,10 +2343,8 @@ dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
"backon",
|
||||
"client",
|
||||
"common-error",
|
||||
"common-macro",
|
||||
"common-meta",
|
||||
"common-telemetry",
|
||||
"common-time",
|
||||
"serde",
|
||||
@@ -2334,6 +2366,7 @@ dependencies = [
|
||||
"common-meta",
|
||||
"greptime-proto",
|
||||
"meta-client",
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"tokio",
|
||||
"tonic 0.12.3",
|
||||
@@ -2520,6 +2553,7 @@ dependencies = [
|
||||
"common-procedure-test",
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
"common-runtime",
|
||||
"common-telemetry",
|
||||
"common-test-util",
|
||||
"common-time",
|
||||
@@ -2547,6 +2581,9 @@ dependencies = [
|
||||
"rand 0.9.0",
|
||||
"regex",
|
||||
"rskafka",
|
||||
"rustls",
|
||||
"rustls-native-certs 0.7.3",
|
||||
"rustls-pemfile",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_with",
|
||||
@@ -2558,6 +2595,7 @@ dependencies = [
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-postgres",
|
||||
"tokio-postgres-rustls",
|
||||
"tonic 0.12.3",
|
||||
"tracing",
|
||||
"typetag",
|
||||
@@ -2593,11 +2631,13 @@ dependencies = [
|
||||
name = "common-procedure"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
"backon",
|
||||
"common-base",
|
||||
"common-error",
|
||||
"common-event-recorder",
|
||||
"common-macro",
|
||||
"common-runtime",
|
||||
"common-telemetry",
|
||||
@@ -2645,7 +2685,7 @@ dependencies = [
|
||||
"futures-util",
|
||||
"serde",
|
||||
"snafu 0.8.5",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"sqlparser 0.54.0-greptime",
|
||||
"sqlparser_derive 0.1.1",
|
||||
"statrs",
|
||||
"store-api",
|
||||
@@ -2726,7 +2766,7 @@ dependencies = [
|
||||
"jsonb",
|
||||
"serde_json",
|
||||
"snafu 0.8.5",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"sqlparser 0.54.0-greptime",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3260,6 +3300,16 @@ dependencies = [
|
||||
"darling_macro 0.20.10",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "darling"
|
||||
version = "0.21.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a79c4acb1fd5fa3d9304be4c76e031c54d2e92d172a393e24b19a14fe8532fe9"
|
||||
dependencies = [
|
||||
"darling_core 0.21.0",
|
||||
"darling_macro 0.21.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "darling_core"
|
||||
version = "0.14.4"
|
||||
@@ -3288,6 +3338,20 @@ dependencies = [
|
||||
"syn 2.0.100",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "darling_core"
|
||||
version = "0.21.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "74875de90daf30eb59609910b84d4d368103aaec4c924824c6799b28f77d6a1d"
|
||||
dependencies = [
|
||||
"fnv",
|
||||
"ident_case",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"strsim 0.11.1",
|
||||
"syn 2.0.100",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "darling_macro"
|
||||
version = "0.14.4"
|
||||
@@ -3310,6 +3374,23 @@ dependencies = [
|
||||
"syn 2.0.100",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "darling_macro"
|
||||
version = "0.21.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e79f8e61677d5df9167cd85265f8e5f64b215cdea3fb55eebc3e622e44c7a146"
|
||||
dependencies = [
|
||||
"darling_core 0.21.0",
|
||||
"quote",
|
||||
"syn 2.0.100",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dary_heap"
|
||||
version = "0.3.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "04d2cd9c18b9f454ed67da600630b021a8a80bf33f8c95896ab33aaf1c26b728"
|
||||
|
||||
[[package]]
|
||||
name = "dashmap"
|
||||
version = "6.1.0"
|
||||
@@ -3365,7 +3446,7 @@ dependencies = [
|
||||
"parquet",
|
||||
"rand 0.8.5",
|
||||
"regex",
|
||||
"sqlparser 0.54.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"sqlparser 0.54.0",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -3392,7 +3473,7 @@ dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"log",
|
||||
"parking_lot 0.12.3",
|
||||
"sqlparser 0.54.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"sqlparser 0.54.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3438,7 +3519,7 @@ dependencies = [
|
||||
"parquet",
|
||||
"paste",
|
||||
"recursive",
|
||||
"sqlparser 0.54.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"sqlparser 0.54.0",
|
||||
"tokio",
|
||||
"web-time 1.1.0",
|
||||
]
|
||||
@@ -3492,7 +3573,7 @@ dependencies = [
|
||||
"paste",
|
||||
"recursive",
|
||||
"serde_json",
|
||||
"sqlparser 0.54.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"sqlparser 0.54.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3760,7 +3841,7 @@ dependencies = [
|
||||
"log",
|
||||
"recursive",
|
||||
"regex",
|
||||
"sqlparser 0.54.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"sqlparser 0.54.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3799,6 +3880,7 @@ dependencies = [
|
||||
"common-grpc",
|
||||
"common-macro",
|
||||
"common-meta",
|
||||
"common-options",
|
||||
"common-procedure",
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
@@ -3868,7 +3950,7 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
"snafu 0.8.5",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"sqlparser 0.54.0-greptime",
|
||||
"sqlparser_derive 0.1.1",
|
||||
]
|
||||
|
||||
@@ -4235,9 +4317,9 @@ checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1"
|
||||
|
||||
[[package]]
|
||||
name = "downcast-rs"
|
||||
version = "1.2.1"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2"
|
||||
checksum = "ea8a8b81cacc08888170eef4d13b775126db426d0b348bee9d18c2c1eaf123cf"
|
||||
|
||||
[[package]]
|
||||
name = "duration-str"
|
||||
@@ -4680,6 +4762,7 @@ dependencies = [
|
||||
"common-grpc",
|
||||
"common-macro",
|
||||
"common-meta",
|
||||
"common-options",
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
"common-runtime",
|
||||
@@ -4836,7 +4919,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"sql",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"sqlparser 0.54.0-greptime",
|
||||
"store-api",
|
||||
"strfmt",
|
||||
"substrait 0.16.0",
|
||||
@@ -5230,7 +5313,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=fe8c13f5f3c1fbef63f57fbdd29f0490dfeb987b#fe8c13f5f3c1fbef63f57fbdd29f0490dfeb987b"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=ccfd4da48bc0254ed865e479cd981a3581b02d84#ccfd4da48bc0254ed865e479cd981a3581b02d84"
|
||||
dependencies = [
|
||||
"prost 0.13.5",
|
||||
"serde",
|
||||
@@ -5980,6 +6063,29 @@ version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cb56e1aa765b4b4f3aadfab769793b7087bb03a4ea4920644a6d238e2df5b9ed"
|
||||
|
||||
[[package]]
|
||||
name = "include-flate"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "df49c16750695486c1f34de05da5b7438096156466e7f76c38fcdf285cf0113e"
|
||||
dependencies = [
|
||||
"include-flate-codegen",
|
||||
"lazy_static",
|
||||
"libflate",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "include-flate-codegen"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8c5b246c6261be723b85c61ecf87804e8ea4a35cb68be0ff282ed84b95ffe7d7"
|
||||
dependencies = [
|
||||
"libflate",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.100",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "include_dir"
|
||||
version = "0.7.4"
|
||||
@@ -6013,6 +6119,7 @@ dependencies = [
|
||||
"common-runtime",
|
||||
"common-telemetry",
|
||||
"common-test-util",
|
||||
"criterion 0.4.0",
|
||||
"fastbloom",
|
||||
"fst",
|
||||
"futures",
|
||||
@@ -6131,6 +6238,17 @@ dependencies = [
|
||||
"snafu 0.7.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "inherent"
|
||||
version = "1.0.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6c38228f24186d9cc68c729accb4d413be9eaed6ad07ff79e0270d9e56f3de13"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.100",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "inotify"
|
||||
version = "0.11.0"
|
||||
@@ -6168,9 +6286,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"js-sys",
|
||||
"wasm-bindgen",
|
||||
"web-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6300,17 +6415,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b"
|
||||
|
||||
[[package]]
|
||||
name = "jieba-rs"
|
||||
version = "0.7.0"
|
||||
name = "jieba-macros"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c1e2b0210dc78b49337af9e49d7ae41a39dceac6e5985613f1cf7763e2f76a25"
|
||||
checksum = "6105f38f083bb1a79ad523bd32fa0d8ffcb6abd2fc4da9da203c32bca5b6ace3"
|
||||
dependencies = [
|
||||
"phf_codegen 0.12.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jieba-rs"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "47982a320106da83b0c5d6aec0fb83e109f0132b69670b063adaa6fa5b4f3f4a"
|
||||
dependencies = [
|
||||
"cedarwood",
|
||||
"derive_builder 0.20.1",
|
||||
"fxhash",
|
||||
"lazy_static",
|
||||
"phf",
|
||||
"phf_codegen",
|
||||
"include-flate",
|
||||
"jieba-macros",
|
||||
"phf 0.12.1",
|
||||
"regex",
|
||||
]
|
||||
|
||||
@@ -6751,6 +6874,30 @@ version = "0.2.171"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6"
|
||||
|
||||
[[package]]
|
||||
name = "libflate"
|
||||
version = "2.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "45d9dfdc14ea4ef0900c1cddbc8dcd553fbaacd8a4a282cf4018ae9dd04fb21e"
|
||||
dependencies = [
|
||||
"adler32",
|
||||
"core2",
|
||||
"crc32fast",
|
||||
"dary_heap",
|
||||
"libflate_lz77",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libflate_lz77"
|
||||
version = "2.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e6e0d73b369f386f1c44abd9c570d5318f55ccde816ff4b562fa452e5182863d"
|
||||
dependencies = [
|
||||
"core2",
|
||||
"hashbrown 0.14.5",
|
||||
"rle-decode-fast",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libfuzzer-sys"
|
||||
version = "0.4.7"
|
||||
@@ -6781,7 +6928,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"windows-targets 0.48.5",
|
||||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -7142,11 +7289,10 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771"
|
||||
|
||||
[[package]]
|
||||
name = "measure_time"
|
||||
version = "0.8.3"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dbefd235b0aadd181626f281e1d684e116972988c14c264e42069d5e8a5775cc"
|
||||
checksum = "51c55d61e72fc3ab704396c5fa16f4c184db37978ae4e94ca8959693a235fc0e"
|
||||
dependencies = [
|
||||
"instant",
|
||||
"log",
|
||||
]
|
||||
|
||||
@@ -7239,6 +7385,7 @@ dependencies = [
|
||||
"common-catalog",
|
||||
"common-config",
|
||||
"common-error",
|
||||
"common-event-recorder",
|
||||
"common-greptimedb-telemetry",
|
||||
"common-grpc",
|
||||
"common-macro",
|
||||
@@ -8579,7 +8726,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"sql",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"sqlparser 0.54.0-greptime",
|
||||
"store-api",
|
||||
"substrait 0.16.0",
|
||||
"table",
|
||||
@@ -8703,9 +8850,9 @@ checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39"
|
||||
|
||||
[[package]]
|
||||
name = "ownedbytes"
|
||||
version = "0.7.0"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c3a059efb063b8f425b948e042e6b9bd85edfe60e913630ed727b23e2dfcc558"
|
||||
checksum = "2fbd56f7631767e61784dc43f8580f403f4475bd4aaa4da003e6295e1bab4a7e"
|
||||
dependencies = [
|
||||
"stable_deref_trait",
|
||||
]
|
||||
@@ -8860,7 +9007,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"sql",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"sqlparser 0.54.0-greptime",
|
||||
"store-api",
|
||||
"table",
|
||||
]
|
||||
@@ -9062,7 +9209,17 @@ version = "0.11.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc"
|
||||
dependencies = [
|
||||
"phf_shared",
|
||||
"phf_shared 0.11.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "913273894cec178f401a31ec4b656318d95473527be05c0752cc41cdc32be8b7"
|
||||
dependencies = [
|
||||
"phf_shared 0.12.1",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -9071,8 +9228,18 @@ version = "0.11.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e8d39688d359e6b34654d328e262234662d16cc0f60ec8dcbe5e718709342a5a"
|
||||
dependencies = [
|
||||
"phf_generator",
|
||||
"phf_shared",
|
||||
"phf_generator 0.11.2",
|
||||
"phf_shared 0.11.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf_codegen"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "efbdcb6f01d193b17f0b9c3360fa7e0e620991b193ff08702f78b3ce365d7e61"
|
||||
dependencies = [
|
||||
"phf_generator 0.12.1",
|
||||
"phf_shared 0.12.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -9081,10 +9248,20 @@ version = "0.11.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0"
|
||||
dependencies = [
|
||||
"phf_shared",
|
||||
"phf_shared 0.11.2",
|
||||
"rand 0.8.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf_generator"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2cbb1126afed61dd6368748dae63b1ee7dc480191c6262a3b4ff1e29d86a6c5b"
|
||||
dependencies = [
|
||||
"fastrand",
|
||||
"phf_shared 0.12.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf_shared"
|
||||
version = "0.11.2"
|
||||
@@ -9094,6 +9271,15 @@ dependencies = [
|
||||
"siphasher 0.3.11",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf_shared"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "06005508882fb681fd97892ecff4b7fd0fee13ef1aa569f8695dae7ab9099981"
|
||||
dependencies = [
|
||||
"siphasher 1.0.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pin-project"
|
||||
version = "1.1.5"
|
||||
@@ -9682,7 +9868,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "be769465445e8c1474e9c5dac2018218498557af32d9ed057325ec9a41ae81bf"
|
||||
dependencies = [
|
||||
"heck 0.5.0",
|
||||
"itertools 0.11.0",
|
||||
"itertools 0.14.0",
|
||||
"log",
|
||||
"multimap",
|
||||
"once_cell",
|
||||
@@ -9728,7 +9914,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"itertools 0.11.0",
|
||||
"itertools 0.14.0",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.100",
|
||||
@@ -9973,7 +10159,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"sql",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"sqlparser 0.54.0-greptime",
|
||||
"statrs",
|
||||
"store-api",
|
||||
"substrait 0.16.0",
|
||||
@@ -10306,9 +10492,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.11.0"
|
||||
version = "1.11.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8"
|
||||
checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -10581,6 +10767,12 @@ dependencies = [
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rle-decode-fast"
|
||||
version = "1.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3582f63211428f83597b51b2ddb88e2a91a9d52d12831f9d08f5e624e8977422"
|
||||
|
||||
[[package]]
|
||||
name = "roaring"
|
||||
version = "0.10.9"
|
||||
@@ -11081,6 +11273,30 @@ dependencies = [
|
||||
"sha2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sea-query"
|
||||
version = "0.32.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "64c91783d1514b99754fc6a4079081dcc2c587dadbff65c48c7f62297443536a"
|
||||
dependencies = [
|
||||
"inherent",
|
||||
"sea-query-derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sea-query-derive"
|
||||
version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bae0cbad6ab996955664982739354128c58d16e126114fe88c2a493642502aab"
|
||||
dependencies = [
|
||||
"darling 0.20.10",
|
||||
"heck 0.4.1",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.100",
|
||||
"thiserror 2.0.12",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "seahash"
|
||||
version = "4.1.0"
|
||||
@@ -11149,9 +11365,9 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.217"
|
||||
version = "1.0.219"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70"
|
||||
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
@@ -11168,9 +11384,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.217"
|
||||
version = "1.0.219"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0"
|
||||
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -11625,9 +11841,9 @@ checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d"
|
||||
|
||||
[[package]]
|
||||
name = "sketches-ddsketch"
|
||||
version = "0.2.2"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "85636c14b73d81f541e525f585c0a2109e6744e1565b5c1668e31c70c10ed65c"
|
||||
checksum = "c1e9a774a6c28142ac54bb25d25562e6bcf957493a184f15ad4eebccb23e410a"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
@@ -11788,11 +12004,12 @@ dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"jsonb",
|
||||
"lazy_static",
|
||||
"pretty_assertions",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"snafu 0.8.5",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"sqlparser 0.54.0-greptime",
|
||||
"sqlparser_derive 0.1.1",
|
||||
"store-api",
|
||||
"table",
|
||||
@@ -11849,27 +12066,27 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlparser"
|
||||
version = "0.54.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c66e3b7374ad4a6af849b08b3e7a6eda0edbd82f0fd59b57e22671bf16979899"
|
||||
dependencies = [
|
||||
"log",
|
||||
"recursive",
|
||||
"sqlparser_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sqlparser"
|
||||
version = "0.54.0"
|
||||
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e#0cf6c04490d59435ee965edd2078e8855bd8471e"
|
||||
version = "0.54.0-greptime"
|
||||
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=df6fcca80ce903f5beef7002cd2c1b062e7024f8#df6fcca80ce903f5beef7002cd2c1b062e7024f8"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"log",
|
||||
"recursive",
|
||||
"regex",
|
||||
"serde",
|
||||
"sqlparser 0.54.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"sqlparser_derive 0.3.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"sqlparser 0.54.0",
|
||||
"sqlparser_derive 0.3.0-greptime",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sqlparser"
|
||||
version = "0.54.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c66e3b7374ad4a6af849b08b3e7a6eda0edbd82f0fd59b57e22671bf16979899"
|
||||
dependencies = [
|
||||
"log",
|
||||
"recursive",
|
||||
"sqlparser_derive 0.3.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -11885,9 +12102,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlparser_derive"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "da5fc6819faabb412da764b99d3b713bb55083c11e7e0c00144d386cd6a1939c"
|
||||
version = "0.3.0-greptime"
|
||||
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=df6fcca80ce903f5beef7002cd2c1b062e7024f8#df6fcca80ce903f5beef7002cd2c1b062e7024f8"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -11897,7 +12113,8 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "sqlparser_derive"
|
||||
version = "0.3.0"
|
||||
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e#0cf6c04490d59435ee965edd2078e8855bd8471e"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "da5fc6819faabb412da764b99d3b713bb55083c11e7e0c00144d386cd6a1939c"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -12172,7 +12389,7 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
"snafu 0.8.5",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"sqlparser 0.54.0-greptime",
|
||||
"strum 0.27.1",
|
||||
"tokio",
|
||||
]
|
||||
@@ -12197,7 +12414,7 @@ checksum = "bf776ba3fa74f83bf4b63c3dcbbf82173db2632ed8452cb2d891d33f459de70f"
|
||||
dependencies = [
|
||||
"new_debug_unreachable",
|
||||
"parking_lot 0.12.3",
|
||||
"phf_shared",
|
||||
"phf_shared 0.11.2",
|
||||
"precomputed-hash",
|
||||
]
|
||||
|
||||
@@ -12542,7 +12759,7 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
"snafu 0.8.5",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"sqlparser 0.54.0-greptime",
|
||||
"store-api",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -12556,14 +12773,15 @@ checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417"
|
||||
|
||||
[[package]]
|
||||
name = "tantivy"
|
||||
version = "0.22.0"
|
||||
version = "0.24.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f8d0582f186c0a6d55655d24543f15e43607299425c5ad8352c242b914b31856"
|
||||
checksum = "64a966cb0e76e311f09cf18507c9af192f15d34886ee43d7ba7c7e3803660c43"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"arc-swap",
|
||||
"base64 0.22.1",
|
||||
"bitpacking",
|
||||
"bon",
|
||||
"byteorder",
|
||||
"census",
|
||||
"crc32fast",
|
||||
@@ -12573,20 +12791,20 @@ dependencies = [
|
||||
"fnv",
|
||||
"fs4",
|
||||
"htmlescape",
|
||||
"itertools 0.12.1",
|
||||
"hyperloglogplus",
|
||||
"itertools 0.14.0",
|
||||
"levenshtein_automata",
|
||||
"log",
|
||||
"lru",
|
||||
"lz4_flex",
|
||||
"measure_time",
|
||||
"memmap2",
|
||||
"num_cpus",
|
||||
"once_cell",
|
||||
"oneshot",
|
||||
"rayon",
|
||||
"regex",
|
||||
"rust-stemmers",
|
||||
"rustc-hash 1.1.0",
|
||||
"rustc-hash 2.0.0",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sketches-ddsketch",
|
||||
@@ -12599,7 +12817,7 @@ dependencies = [
|
||||
"tantivy-stacker",
|
||||
"tantivy-tokenizer-api",
|
||||
"tempfile",
|
||||
"thiserror 1.0.64",
|
||||
"thiserror 2.0.12",
|
||||
"time",
|
||||
"uuid",
|
||||
"winapi",
|
||||
@@ -12608,22 +12826,22 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tantivy-bitpacker"
|
||||
version = "0.6.0"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "284899c2325d6832203ac6ff5891b297fc5239c3dc754c5bc1977855b23c10df"
|
||||
checksum = "1adc286a39e089ae9938935cd488d7d34f14502544a36607effd2239ff0e2494"
|
||||
dependencies = [
|
||||
"bitpacking",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tantivy-columnar"
|
||||
version = "0.3.0"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "12722224ffbe346c7fec3275c699e508fd0d4710e629e933d5736ec524a1f44e"
|
||||
checksum = "6300428e0c104c4f7db6f95b466a6f5c1b9aece094ec57cdd365337908dc7344"
|
||||
dependencies = [
|
||||
"downcast-rs",
|
||||
"fastdivide",
|
||||
"itertools 0.12.1",
|
||||
"itertools 0.14.0",
|
||||
"serde",
|
||||
"tantivy-bitpacker",
|
||||
"tantivy-common",
|
||||
@@ -12633,9 +12851,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tantivy-common"
|
||||
version = "0.7.0"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8019e3cabcfd20a1380b491e13ff42f57bb38bf97c3d5fa5c07e50816e0621f4"
|
||||
checksum = "e91b6ea6090ce03dc72c27d0619e77185d26cc3b20775966c346c6d4f7e99d7f"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"byteorder",
|
||||
@@ -12657,9 +12875,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tantivy-jieba"
|
||||
version = "0.11.0"
|
||||
version = "0.16.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0f2fe65c125f0d76d06f0f2ce9fbb9287b53f0dafb51a6270d984a840e2f16c1"
|
||||
checksum = "3b08147cc130e323ecc522117927b198bec617fe1df562a0b6449905858d0363"
|
||||
dependencies = [
|
||||
"jieba-rs",
|
||||
"lazy_static",
|
||||
@@ -12668,19 +12886,23 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tantivy-query-grammar"
|
||||
version = "0.22.0"
|
||||
version = "0.24.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "847434d4af57b32e309f4ab1b4f1707a6c566656264caa427ff4285c4d9d0b82"
|
||||
checksum = "e810cdeeebca57fc3f7bfec5f85fdbea9031b2ac9b990eb5ff49b371d52bbe6a"
|
||||
dependencies = [
|
||||
"nom",
|
||||
"serde",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tantivy-sstable"
|
||||
version = "0.3.0"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c69578242e8e9fc989119f522ba5b49a38ac20f576fc778035b96cc94f41f98e"
|
||||
checksum = "709f22c08a4c90e1b36711c1c6cad5ae21b20b093e535b69b18783dd2cb99416"
|
||||
dependencies = [
|
||||
"futures-util",
|
||||
"itertools 0.14.0",
|
||||
"tantivy-bitpacker",
|
||||
"tantivy-common",
|
||||
"tantivy-fst",
|
||||
@@ -12689,9 +12911,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tantivy-stacker"
|
||||
version = "0.3.0"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c56d6ff5591fc332739b3ce7035b57995a3ce29a93ffd6012660e0949c956ea8"
|
||||
checksum = "2bcdebb267671311d1e8891fd9d1301803fdb8ad21ba22e0a30d0cab49ba59c1"
|
||||
dependencies = [
|
||||
"murmurhash32",
|
||||
"rand_distr",
|
||||
@@ -12700,9 +12922,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tantivy-tokenizer-api"
|
||||
version = "0.3.0"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2a0dcade25819a89cfe6f17d932c9cedff11989936bf6dd4f336d50392053b04"
|
||||
checksum = "dfa942fcee81e213e09715bbce8734ae2180070b97b33839a795ba1de201547d"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
@@ -12807,7 +13029,7 @@ dependencies = [
|
||||
"serde_yaml",
|
||||
"snafu 0.8.5",
|
||||
"sql",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"sqlparser 0.54.0-greptime",
|
||||
"sqlx",
|
||||
"store-api",
|
||||
"strum 0.27.1",
|
||||
@@ -12835,6 +13057,7 @@ dependencies = [
|
||||
"common-catalog",
|
||||
"common-config",
|
||||
"common-error",
|
||||
"common-event-recorder",
|
||||
"common-grpc",
|
||||
"common-meta",
|
||||
"common-procedure",
|
||||
@@ -12877,6 +13100,7 @@ dependencies = [
|
||||
"rand 0.9.0",
|
||||
"rstest",
|
||||
"rstest_reuse",
|
||||
"sea-query",
|
||||
"serde_json",
|
||||
"servers",
|
||||
"session",
|
||||
@@ -12895,6 +13119,7 @@ dependencies = [
|
||||
"tonic 0.12.3",
|
||||
"tower 0.5.2",
|
||||
"url",
|
||||
"urlencoding",
|
||||
"uuid",
|
||||
"yaml-rust",
|
||||
"zstd 0.13.2",
|
||||
@@ -13165,7 +13390,7 @@ dependencies = [
|
||||
"log",
|
||||
"parking_lot 0.12.3",
|
||||
"percent-encoding",
|
||||
"phf",
|
||||
"phf 0.11.2",
|
||||
"pin-project-lite",
|
||||
"postgres-protocol",
|
||||
"postgres-types",
|
||||
@@ -14373,7 +14598,7 @@ version = "0.1.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
|
||||
dependencies = [
|
||||
"windows-sys 0.48.0",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
@@ -140,7 +140,7 @@ etcd-client = "0.14"
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "fe8c13f5f3c1fbef63f57fbdd29f0490dfeb987b" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ccfd4da48bc0254ed865e479cd981a3581b02d84" }
|
||||
hex = "0.4"
|
||||
http = "1"
|
||||
humantime = "2.1"
|
||||
@@ -173,6 +173,7 @@ parking_lot = "0.12"
|
||||
parquet = { version = "54.2", default-features = false, features = ["arrow", "async", "object_store"] }
|
||||
paste = "1.0"
|
||||
pin-project = "1.0"
|
||||
pretty_assertions = "1.4.0"
|
||||
prometheus = { version = "0.13.3", features = ["process"] }
|
||||
promql-parser = { version = "0.6", features = ["ser"] }
|
||||
prost = { version = "0.13", features = ["no-recursion-limit"] }
|
||||
@@ -196,6 +197,7 @@ rust_decimal = "1.33"
|
||||
rustc-hash = "2.0"
|
||||
# It is worth noting that we should try to avoid using aws-lc-rs until it can be compiled on various platforms.
|
||||
rustls = { version = "0.23.25", default-features = false }
|
||||
sea-query = "0.32"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
||||
serde_with = "3"
|
||||
@@ -204,7 +206,7 @@ simd-json = "0.15"
|
||||
similar-asserts = "1.6.0"
|
||||
smallvec = { version = "1", features = ["serde"] }
|
||||
snafu = "0.8"
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "0cf6c04490d59435ee965edd2078e8855bd8471e", features = [
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "df6fcca80ce903f5beef7002cd2c1b062e7024f8", features = [
|
||||
"visitor",
|
||||
"serde",
|
||||
] } # branch = "v0.54.x"
|
||||
|
||||
@@ -147,6 +147,7 @@
|
||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.max_concurrent_scan_files` | Integer | `128` | Maximum number of SST files to scan concurrently. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
@@ -207,6 +208,8 @@
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||
|
||||
|
||||
## Distributed Mode
|
||||
@@ -311,6 +314,8 @@
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||
|
||||
|
||||
### Metasrv
|
||||
@@ -333,6 +338,12 @@
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `backend_tls` | -- | -- | TLS configuration for kv store backend (only applicable for PostgreSQL/MySQL backends)<br/>When using PostgreSQL or MySQL as metadata store, you can configure TLS here |
|
||||
| `backend_tls.mode` | String | `prefer` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- "disable" - No TLS<br/>- "prefer" (default) - Try TLS, fallback to plain<br/>- "require" - Require TLS<br/>- "verify_ca" - Require TLS and verify CA<br/>- "verify_full" - Require TLS and verify hostname |
|
||||
| `backend_tls.cert_path` | String | `""` | Path to client certificate file (for client authentication)<br/>Like "/path/to/client.crt" |
|
||||
| `backend_tls.key_path` | String | `""` | Path to client private key file (for client authentication)<br/>Like "/path/to/client.key" |
|
||||
| `backend_tls.ca_cert_path` | String | `""` | Path to CA certificate file (for server certificate verification)<br/>Required when using custom CAs or self-signed certificates<br/>Leave empty to use system root certificates only<br/>Like "/path/to/ca.crt" |
|
||||
| `backend_tls.watch` | Bool | `false` | Watch for certificate file changes and auto reload |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:3002` | The address to bind the gRPC server. |
|
||||
| `grpc.server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
|
||||
@@ -370,6 +381,8 @@
|
||||
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>Only accepts strings that match the following regular expression pattern:<br/>[a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. |
|
||||
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
|
||||
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
|
||||
| `event_recorder` | -- | -- | Configuration options for the event recorder. |
|
||||
| `event_recorder.ttl` | String | `30d` | TTL for the events table that will be used to store the events. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
@@ -389,6 +402,8 @@
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||
|
||||
|
||||
### Datanode
|
||||
@@ -501,6 +516,7 @@
|
||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.max_concurrent_scan_files` | Integer | `128` | Maximum number of SST files to scan concurrently. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
@@ -554,6 +570,8 @@
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||
|
||||
|
||||
### Flownode
|
||||
@@ -611,3 +629,5 @@
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `query` | -- | -- | -- |
|
||||
| `query.parallelism` | Integer | `1` | Parallelism of the query engine for query sent by flownode.<br/>Default to 1, so it won't use too much cpu or memory |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||
|
||||
@@ -474,6 +474,9 @@ sst_write_buffer_size = "8MB"
|
||||
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
||||
parallel_scan_channel_size = 32
|
||||
|
||||
## Maximum number of SST files to scan concurrently.
|
||||
max_concurrent_scan_files = 128
|
||||
|
||||
## Whether to allow stale WAL entries read during replay.
|
||||
allow_stale_entries = false
|
||||
|
||||
@@ -669,3 +672,11 @@ headers = { }
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
## The memory options.
|
||||
[memory]
|
||||
## Whether to enable heap profiling activation during startup.
|
||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
## Default is true.
|
||||
enable_heap_profiling = true
|
||||
|
||||
@@ -136,3 +136,11 @@ default_ratio = 1.0
|
||||
## Parallelism of the query engine for query sent by flownode.
|
||||
## Default to 1, so it won't use too much cpu or memory
|
||||
parallelism = 1
|
||||
|
||||
## The memory options.
|
||||
[memory]
|
||||
## Whether to enable heap profiling activation during startup.
|
||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
## Default is true.
|
||||
enable_heap_profiling = true
|
||||
|
||||
@@ -280,3 +280,11 @@ headers = { }
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
## The memory options.
|
||||
[memory]
|
||||
## Whether to enable heap profiling activation during startup.
|
||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
## Default is true.
|
||||
enable_heap_profiling = true
|
||||
|
||||
@@ -65,6 +65,34 @@ node_max_idle_time = "24hours"
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
#+ compact_rt_size = 4
|
||||
|
||||
## TLS configuration for kv store backend (only applicable for PostgreSQL/MySQL backends)
|
||||
## When using PostgreSQL or MySQL as metadata store, you can configure TLS here
|
||||
[backend_tls]
|
||||
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
|
||||
## - "disable" - No TLS
|
||||
## - "prefer" (default) - Try TLS, fallback to plain
|
||||
## - "require" - Require TLS
|
||||
## - "verify_ca" - Require TLS and verify CA
|
||||
## - "verify_full" - Require TLS and verify hostname
|
||||
mode = "prefer"
|
||||
|
||||
## Path to client certificate file (for client authentication)
|
||||
## Like "/path/to/client.crt"
|
||||
cert_path = ""
|
||||
|
||||
## Path to client private key file (for client authentication)
|
||||
## Like "/path/to/client.key"
|
||||
key_path = ""
|
||||
|
||||
## Path to CA certificate file (for server certificate verification)
|
||||
## Required when using custom CAs or self-signed certificates
|
||||
## Leave empty to use system root certificates only
|
||||
## Like "/path/to/ca.crt"
|
||||
ca_cert_path = ""
|
||||
|
||||
## Watch for certificate file changes and auto reload
|
||||
watch = false
|
||||
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
@@ -212,6 +240,11 @@ create_topic_timeout = "30s"
|
||||
# client_cert_path = "/path/to/client_cert"
|
||||
# client_key_path = "/path/to/key"
|
||||
|
||||
## Configuration options for the event recorder.
|
||||
[event_recorder]
|
||||
## TTL for the events table that will be used to store the events.
|
||||
ttl = "30d"
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
@@ -265,3 +298,11 @@ headers = { }
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
## The memory options.
|
||||
[memory]
|
||||
## Whether to enable heap profiling activation during startup.
|
||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
## Default is true.
|
||||
enable_heap_profiling = true
|
||||
|
||||
@@ -565,6 +565,9 @@ sst_write_buffer_size = "8MB"
|
||||
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
||||
parallel_scan_channel_size = 32
|
||||
|
||||
## Maximum number of SST files to scan concurrently.
|
||||
max_concurrent_scan_files = 128
|
||||
|
||||
## Whether to allow stale WAL entries read during replay.
|
||||
allow_stale_entries = false
|
||||
|
||||
@@ -783,3 +786,11 @@ headers = { }
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
## The memory options.
|
||||
[memory]
|
||||
## Whether to enable heap profiling activation during startup.
|
||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
## Default is true.
|
||||
enable_heap_profiling = true
|
||||
|
||||
@@ -47,4 +47,6 @@ WORKDIR /greptime
|
||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
|
||||
@@ -47,4 +47,6 @@ WORKDIR /greptime
|
||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
|
||||
@@ -15,4 +15,6 @@ ADD $TARGETARCH/greptime /greptime/bin/
|
||||
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
|
||||
@@ -18,4 +18,6 @@ ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENV TARGET_BIN=$TARGET_BIN
|
||||
|
||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||
|
||||
ENTRYPOINT ["sh", "-c", "exec $TARGET_BIN \"$@\"", "--"]
|
||||
|
||||
@@ -30,6 +30,23 @@ curl https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph
|
||||
|
||||
## Profiling
|
||||
|
||||
### Configuration
|
||||
|
||||
You can control heap profiling activation through configuration. Add the following to your configuration file:
|
||||
|
||||
```toml
|
||||
[memory]
|
||||
# Whether to enable heap profiling activation during startup.
|
||||
# When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
# is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
# Default is true.
|
||||
enable_heap_profiling = true
|
||||
```
|
||||
|
||||
By default, if you set `MALLOC_CONF=prof:true,prof_active:false`, the database will enable profiling during startup. You can disable this behavior by setting `enable_heap_profiling = false` in the configuration.
|
||||
|
||||
### Starting with environment variables
|
||||
|
||||
Start GreptimeDB instance with environment variables:
|
||||
|
||||
```bash
|
||||
@@ -40,6 +57,23 @@ MALLOC_CONF=prof:true ./target/debug/greptime standalone start
|
||||
_RJEM_MALLOC_CONF=prof:true ./target/debug/greptime standalone start
|
||||
```
|
||||
|
||||
### Memory profiling control
|
||||
|
||||
You can control heap profiling activation using the new HTTP APIs:
|
||||
|
||||
```bash
|
||||
# Check current profiling status
|
||||
curl -X GET localhost:4000/debug/prof/mem/status
|
||||
|
||||
# Activate heap profiling (if not already active)
|
||||
curl -X POST localhost:4000/debug/prof/mem/activate
|
||||
|
||||
# Deactivate heap profiling
|
||||
curl -X POST localhost:4000/debug/prof/mem/deactivate
|
||||
```
|
||||
|
||||
### Dump memory profiling data
|
||||
|
||||
Dump memory profiling data through HTTP API:
|
||||
|
||||
```bash
|
||||
|
||||
151
docs/rfcs/2025-07-04-compatibility-test-framework.md
Normal file
151
docs/rfcs/2025-07-04-compatibility-test-framework.md
Normal file
@@ -0,0 +1,151 @@
|
||||
---
|
||||
Feature Name: Compatibility Test Framework
|
||||
Tracking Issue: TBD
|
||||
Date: 2025-07-04
|
||||
Author: "Ruihang Xia <waynestxia@gmail.com>"
|
||||
---
|
||||
|
||||
# Summary
|
||||
|
||||
This RFC proposes a compatibility test framework for GreptimeDB to ensure backward/forward compatibility for different versions of GreptimeDB.
|
||||
|
||||
# Motivation
|
||||
|
||||
In current practice, we don't have a systematic way to test and ensure the compatibility of different versions of GreptimeDB. Each time we release a new version, we need to manually test the compatibility with ad-hoc cases. This is not only time-consuming, but also prone to errors and unmaintainable. Highly rely on the release manager to ensure the compatibility of different versions of GreptimeDB.
|
||||
|
||||
We don't have a detailed guide on the release SoP of how to test and ensure the compatibility of the new version. And has broken the compatibility of the new version many times (`v0.14.1` and `v0.15.1` are two examples, which are both released right after the major release).
|
||||
|
||||
# Details
|
||||
|
||||
This RFC proposes a compatibility test framework that is easy to maintain, extend and run. It can tell the compatibility between any given two versions of GreptimeDB, both backward and forward. It's based on the Sqlness library but used in a different way.
|
||||
|
||||
Generally speaking, the framework is composed of two parts:
|
||||
|
||||
1. Test cases: A set of test cases that are maintained dedicatedly for the compatibility test. Still in the `.sql` and `.result` format.
|
||||
2. Test framework: A new sqlness runner that is used to run the test cases. With some new features that is not required by the integration sqlness test.
|
||||
|
||||
## Test Cases
|
||||
|
||||
### Structure
|
||||
|
||||
The case set is organized in three parts:
|
||||
|
||||
- `1.feature`: Use a new feature
|
||||
- `2.verify`: Verify database behavior
|
||||
- `3.cleanup`: Paired with `1.feature`, cleanup the test environment.
|
||||
|
||||
These three parts are organized in a tree structure, and should be run in sequence:
|
||||
|
||||
```
|
||||
compatibility_test/
|
||||
├── 1.feature/
|
||||
│ ├── feature-a/
|
||||
│ ├── feature-b/
|
||||
│ └── feature-c/
|
||||
├── 2.verify/
|
||||
│ ├── verify-metadata/
|
||||
│ ├── verify-data/
|
||||
│ └── verify-schema/
|
||||
└── 3.cleanup/
|
||||
├── cleanup-a/
|
||||
├── cleanup-b/
|
||||
└── cleanup-c/
|
||||
```
|
||||
|
||||
### Example
|
||||
|
||||
For example, for a new feature like adding new index option ([#6416](https://github.com/GreptimeTeam/greptimedb/pull/6416)), we (who implement the feature) create a new test case like this:
|
||||
|
||||
```sql
|
||||
-- path: compatibility_test/1.feature/index-option/granularity_and_false_positive_rate.sql
|
||||
|
||||
-- SQLNESS ARG since=0.15.0
|
||||
-- SQLNESS IGNORE_RESULT
|
||||
CREATE TABLE granularity_and_false_positive_rate (ts timestamp time index, val double) with ("index.granularity" = "8192", "index.false_positive_rate" = "0.01");
|
||||
```
|
||||
|
||||
And
|
||||
|
||||
```sql
|
||||
-- path: compatibility_test/3.cleanup/index-option/granularity_and_false_positive_rate.sql
|
||||
drop table granularity_and_false_positive_rate;
|
||||
```
|
||||
|
||||
Since this new feature don't require some special way to verify the database behavior, we can reuse existing test cases in `2.verify/` to verify the database behavior. For example, we can reuse the `verify-metadata` test case to verify the metadata of the table.
|
||||
|
||||
```sql
|
||||
-- path: compatibility_test/2.verify/verify-metadata/show-create-table.sql
|
||||
|
||||
-- SQLNESS TEMPLATE TABLE="SHOW TABLES";
|
||||
SHOW CREATE TABLE $TABLE;
|
||||
```
|
||||
|
||||
In this example, we use some new sqlness features that will be introduced in the next section (`since`, `IGNORE_RESULT`, `TEMPLATE`).
|
||||
|
||||
### Maintenance
|
||||
|
||||
Each time implement a new feature that should be covered by the compatibility test, we should create a new test case in `1.feature/` and `3.cleanup/` for them. And check if existing cases in `2.verify/` can be reused to verify the database behavior.
|
||||
|
||||
This simulates an enthusiastic user who uses all the new features at the first time. All the new Maintenance burden is on the feature implementer to write one more test case for the new feature, to "fixation" the behavior. And once there is a breaking change in the future, it can be detected by the compatibility test framework automatically.
|
||||
|
||||
Another topic is about deprecation. If a feature is deprecated, we should also mark it in the test case. Still use above example, assume we deprecate the `index.granularity` and `index.false_positive_rate` index options in `v0.99.0`, we can mark them as:
|
||||
```sql
|
||||
-- SQLNESS ARG since=0.15.0 till=0.99.0
|
||||
...
|
||||
```
|
||||
|
||||
This tells the framework to ignore this feature in version `v0.99.0` and later. Currently, we have so many experimental features that are scheduled to be broken in the future, this is a good way to mark them.
|
||||
|
||||
## Test Framework
|
||||
|
||||
This section is about new sqlness features required by this framework.
|
||||
|
||||
### Since and Till
|
||||
|
||||
Follows the `ARG` interceptor in sqlness, we can mark a feature is available between two given versions. Only the `since` is required:
|
||||
|
||||
```sql
|
||||
-- SQLNESS ARG since=VERSION_STRING [till=VERSION_STRING]
|
||||
```
|
||||
|
||||
### IGNORE_RESULT
|
||||
|
||||
`IGNORE_RESULT` is a new interceptor, it tells the runner to ignore the result of the query, only check whether the query is executed successfully.
|
||||
|
||||
This is useful to reduce the Maintenance burden of the test cases, unlike the integration sqlness test, in most cases we don't care about the result of the query, only need to make sure the query is executed successfully.
|
||||
|
||||
### TEMPLATE
|
||||
|
||||
`TEMPLATE` is another new interceptor, it can generate queries from a template based on a runtime data.
|
||||
|
||||
In above example, we need to run the `SHOW CREATE TABLE` query for all existing tables, so we can use the `TEMPLATE` interceptor to generate the query with a dynamic table list.
|
||||
|
||||
### RUNNER
|
||||
|
||||
There are also some extra requirement for the runner itself:
|
||||
|
||||
- It should run the test cases in sequence, first `1.feature/`, then `2.verify/`, and finally `3.cleanup/`.
|
||||
- It should be able to fetch required version automatically to finish the test.
|
||||
- It should handle the `since` and `till` properly.
|
||||
|
||||
On the `1.feature` phase, the runner needs to identify all features need to be tested by version number. And then restart with a new version (the `to` version) to run `2.verify/` and `3.cleanup/` phase.
|
||||
|
||||
## Test Report
|
||||
|
||||
Finally, we can run the compatibility test to verify the compatibility between any given two versions of GreptimeDB, for example:
|
||||
|
||||
```bash
|
||||
# check backward compatibility between v0.15.0 and v0.16.0 when releasing v0.16.0
|
||||
./sqlness run --from=0.15.0 --to=0.16.0
|
||||
|
||||
# check forward compatibility when downgrading from v0.15.0 to v0.13.0
|
||||
./sqlness run --from=0.15.0 --to=0.13.0
|
||||
```
|
||||
|
||||
We can also use a script to run the compatibility test for all the versions in a given range to give a quick report with all versions we need.
|
||||
|
||||
And we always bump the version in `Cargo.toml` to the next major release version, so the next major release version can be used as "latest" unpublished version for scenarios like local testing.
|
||||
|
||||
# Alternatives
|
||||
|
||||
There was a previous attempt to implement a compatibility test framework that was disabled due to some reasons [#3728](https://github.com/GreptimeTeam/greptimedb/issues/3728).
|
||||
File diff suppressed because it is too large
Load Diff
@@ -21,14 +21,14 @@
|
||||
# Resources
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$datanode"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{instance}}]-[{{ pod }}]` |
|
||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$datanode"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$frontend"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$frontend"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$metasrv"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$metasrv"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$flownode"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$flownode"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$datanode"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-datanode"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{instance}}]-[{{ pod }}]` |
|
||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$datanode"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-datanode"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$frontend"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-frontend"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$frontend"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-frontend"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$metasrv"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-metasrv"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$metasrv"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-metasrv"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$flownode"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-flownode"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$flownode"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-flownode"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
# Frontend Requests
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
@@ -72,6 +72,7 @@
|
||||
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
| Active Series and Field Builders Count | `sum by(instance, pod) (greptime_mito_memtable_active_series_count)`<br/>`sum by(instance, pod) (greptime_mito_memtable_field_builder_count)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]-series` |
|
||||
| Region Worker Convert Requests | `histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to decode requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
| Cache Miss | `sum by (instance,pod, type) (rate(greptime_mito_cache_miss{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | The local cache miss of the datanode. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||
# OpenDAL
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
|
||||
@@ -180,13 +180,18 @@ groups:
|
||||
- title: Datanode Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: decbytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{instance=~"$datanode"}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-datanode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Datanode CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -197,16 +202,26 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-datanode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Frontend Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: decbytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{instance=~"$frontend"}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-frontend"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Frontend CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -217,16 +232,26 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]-cpu'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-frontend"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Metasrv Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: decbytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{instance=~"$metasrv"}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]-resident'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-metasrv"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Metasrv CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -237,16 +262,26 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-metasrv"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Flownode Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: decbytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{instance=~"$flownode"}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-flownode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Flownode CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -257,6 +292,11 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-flownode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Frontend Requests
|
||||
panels:
|
||||
- title: HTTP QPS per Instance
|
||||
@@ -642,6 +682,15 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
||||
- title: Cache Miss
|
||||
type: timeseries
|
||||
description: The local cache miss of the datanode.
|
||||
queries:
|
||||
- expr: sum by (instance,pod, type) (rate(greptime_mito_cache_miss{instance=~"$datanode"}[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{type}}]'
|
||||
- title: OpenDAL
|
||||
panels:
|
||||
- title: QPS per Instance
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -21,14 +21,14 @@
|
||||
# Resources
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{instance}}]-[{{ pod }}]` |
|
||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-datanode"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{instance}}]-[{{ pod }}]` |
|
||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-datanode"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-frontend"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-frontend"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-metasrv"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-metasrv"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-flownode"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-flownode"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
# Frontend Requests
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
@@ -72,6 +72,7 @@
|
||||
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
| Active Series and Field Builders Count | `sum by(instance, pod) (greptime_mito_memtable_active_series_count)`<br/>`sum by(instance, pod) (greptime_mito_memtable_field_builder_count)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]-series` |
|
||||
| Region Worker Convert Requests | `histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to decode requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||
| Cache Miss | `sum by (instance,pod, type) (rate(greptime_mito_cache_miss{}[$__rate_interval]))` | `timeseries` | The local cache miss of the datanode. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||
# OpenDAL
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
|
||||
@@ -180,13 +180,18 @@ groups:
|
||||
- title: Datanode Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: decbytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-datanode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Datanode CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -197,16 +202,26 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-datanode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Frontend Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: decbytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-frontend"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Frontend CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -217,16 +232,26 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]-cpu'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-frontend"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Metasrv Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: decbytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]-resident'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-metasrv"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Metasrv CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -237,16 +262,26 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-metasrv"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Flownode Memory per Instance
|
||||
type: timeseries
|
||||
description: Current memory usage by instance
|
||||
unit: decbytes
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-flownode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Flownode CPU Usage per Instance
|
||||
type: timeseries
|
||||
description: Current cpu usage by instance
|
||||
@@ -257,6 +292,11 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-flownode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: limit
|
||||
- title: Frontend Requests
|
||||
panels:
|
||||
- title: HTTP QPS per Instance
|
||||
@@ -642,6 +682,15 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
||||
- title: Cache Miss
|
||||
type: timeseries
|
||||
description: The local cache miss of the datanode.
|
||||
queries:
|
||||
- expr: sum by (instance,pod, type) (rate(greptime_mito_cache_miss{}[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{type}}]'
|
||||
- title: OpenDAL
|
||||
panels:
|
||||
- title: QPS per Instance
|
||||
|
||||
@@ -26,7 +26,7 @@ check_dashboards_generation() {
|
||||
./grafana/scripts/gen-dashboards.sh
|
||||
|
||||
if [[ -n "$(git diff --name-only grafana/dashboards/metrics)" ]]; then
|
||||
echo "Error: The dashboards are not generated correctly. You should execute the `make dashboards` command."
|
||||
echo "Error: The dashboards are not generated correctly. You should execute the 'make dashboards' command."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ use std::any::Any;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::prelude::*;
|
||||
use snafu::Location;
|
||||
@@ -66,12 +67,28 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid time unit: {time_unit}"))]
|
||||
InvalidTimeUnit {
|
||||
time_unit: i32,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Inconsistent time unit: {:?}", units))]
|
||||
InconsistentTimeUnit {
|
||||
units: Vec<TimeUnit>,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::UnknownColumnDataType { .. } => StatusCode::InvalidArguments,
|
||||
Error::UnknownColumnDataType { .. }
|
||||
| Error::InvalidTimeUnit { .. }
|
||||
| Error::InconsistentTimeUnit { .. } => StatusCode::InvalidArguments,
|
||||
Error::IntoColumnDataType { .. } | Error::SerializeJson { .. } => {
|
||||
StatusCode::Unexpected
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_base::BitVec;
|
||||
@@ -46,7 +47,7 @@ use greptime_proto::v1::{
|
||||
use paste::paste;
|
||||
use snafu::prelude::*;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::error::{self, InconsistentTimeUnitSnafu, InvalidTimeUnitSnafu, Result};
|
||||
use crate::v1::column::Values;
|
||||
use crate::v1::{Column, ColumnDataType, Value as GrpcValue};
|
||||
|
||||
@@ -1079,6 +1080,89 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_pb_time_unit(unit: v1::TimeUnit) -> TimeUnit {
|
||||
match unit {
|
||||
v1::TimeUnit::Second => TimeUnit::Second,
|
||||
v1::TimeUnit::Millisecond => TimeUnit::Millisecond,
|
||||
v1::TimeUnit::Microsecond => TimeUnit::Microsecond,
|
||||
v1::TimeUnit::Nanosecond => TimeUnit::Nanosecond,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_pb_time_unit(unit: TimeUnit) -> v1::TimeUnit {
|
||||
match unit {
|
||||
TimeUnit::Second => v1::TimeUnit::Second,
|
||||
TimeUnit::Millisecond => v1::TimeUnit::Millisecond,
|
||||
TimeUnit::Microsecond => v1::TimeUnit::Microsecond,
|
||||
TimeUnit::Nanosecond => v1::TimeUnit::Nanosecond,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_pb_time_ranges(time_ranges: v1::TimeRanges) -> Result<Vec<(Timestamp, Timestamp)>> {
|
||||
if time_ranges.time_ranges.is_empty() {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
let proto_time_unit = v1::TimeUnit::try_from(time_ranges.time_unit).map_err(|_| {
|
||||
InvalidTimeUnitSnafu {
|
||||
time_unit: time_ranges.time_unit,
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
let time_unit = from_pb_time_unit(proto_time_unit);
|
||||
Ok(time_ranges
|
||||
.time_ranges
|
||||
.into_iter()
|
||||
.map(|r| {
|
||||
(
|
||||
Timestamp::new(r.start, time_unit),
|
||||
Timestamp::new(r.end, time_unit),
|
||||
)
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
|
||||
/// All time_ranges must be of the same time unit.
|
||||
///
|
||||
/// if input `time_ranges` is empty, it will return a default `TimeRanges` with `Millisecond` as the time unit.
|
||||
pub fn to_pb_time_ranges(time_ranges: &[(Timestamp, Timestamp)]) -> Result<v1::TimeRanges> {
|
||||
let is_same_time_unit = time_ranges.windows(2).all(|x| {
|
||||
x[0].0.unit() == x[1].0.unit()
|
||||
&& x[0].1.unit() == x[1].1.unit()
|
||||
&& x[0].0.unit() == x[0].1.unit()
|
||||
});
|
||||
|
||||
if !is_same_time_unit {
|
||||
let all_time_units: Vec<_> = time_ranges
|
||||
.iter()
|
||||
.map(|(s, e)| [s.unit(), e.unit()])
|
||||
.clone()
|
||||
.flatten()
|
||||
.collect::<HashSet<_>>()
|
||||
.into_iter()
|
||||
.collect();
|
||||
InconsistentTimeUnitSnafu {
|
||||
units: all_time_units,
|
||||
}
|
||||
.fail()?
|
||||
}
|
||||
|
||||
let mut pb_time_ranges = v1::TimeRanges {
|
||||
// default time unit is Millisecond
|
||||
time_unit: v1::TimeUnit::Millisecond as i32,
|
||||
time_ranges: Vec::with_capacity(time_ranges.len()),
|
||||
};
|
||||
if let Some((start, _end)) = time_ranges.first() {
|
||||
pb_time_ranges.time_unit = to_pb_time_unit(start.unit()) as i32;
|
||||
}
|
||||
for (start, end) in time_ranges {
|
||||
pb_time_ranges.time_ranges.push(v1::TimeRange {
|
||||
start: start.value(),
|
||||
end: end.value(),
|
||||
});
|
||||
}
|
||||
Ok(pb_time_ranges)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -44,6 +44,8 @@ moka = { workspace = true, features = ["future", "sync"] }
|
||||
partition.workspace = true
|
||||
paste.workspace = true
|
||||
prometheus.workspace = true
|
||||
promql-parser.workspace = true
|
||||
rand.workspace = true
|
||||
rustc-hash.workspace = true
|
||||
serde_json.workspace = true
|
||||
session.workspace = true
|
||||
|
||||
@@ -16,8 +16,8 @@ use api::v1::meta::ProcedureStatus;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cluster::{ClusterInfo, NodeInfo};
|
||||
use common_meta::datanode::RegionStat;
|
||||
use common_meta::ddl::{ExecutorContext, ProcedureExecutor};
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
use common_meta::procedure_executor::{ExecutorContext, ProcedureExecutor};
|
||||
use common_meta::rpc::procedure;
|
||||
use common_procedure::{ProcedureInfo, ProcedureState};
|
||||
use meta_client::MetaClientRef;
|
||||
|
||||
@@ -23,7 +23,8 @@ use common_catalog::consts::{
|
||||
};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache::{
|
||||
LayeredCacheRegistryRef, TableRoute, TableRouteCacheRef, ViewInfoCacheRef,
|
||||
LayeredCacheRegistryRef, TableInfoCacheRef, TableNameCacheRef, TableRoute, TableRouteCacheRef,
|
||||
ViewInfoCacheRef,
|
||||
};
|
||||
use common_meta::key::catalog_name::CatalogNameKey;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
@@ -41,7 +42,7 @@ use session::context::{Channel, QueryContext};
|
||||
use snafu::prelude::*;
|
||||
use store_api::metric_engine_consts::METRIC_ENGINE_NAME;
|
||||
use table::dist_table::DistTable;
|
||||
use table::metadata::TableId;
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
use table::table_name::TableName;
|
||||
use table::TableRef;
|
||||
@@ -325,6 +326,63 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn table_id(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
query_ctx: Option<&QueryContext>,
|
||||
) -> Result<Option<TableId>> {
|
||||
let channel = query_ctx.map_or(Channel::Unknown, |ctx| ctx.channel());
|
||||
if let Some(table) =
|
||||
self.system_catalog
|
||||
.table(catalog_name, schema_name, table_name, query_ctx)
|
||||
{
|
||||
return Ok(Some(table.table_info().table_id()));
|
||||
}
|
||||
|
||||
let table_cache: TableNameCacheRef =
|
||||
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "table_name_cache",
|
||||
})?;
|
||||
|
||||
let table = table_cache
|
||||
.get_by_ref(&TableName {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
schema_name: schema_name.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
})
|
||||
.await
|
||||
.context(GetTableCacheSnafu)?;
|
||||
|
||||
if let Some(table) = table {
|
||||
return Ok(Some(table));
|
||||
}
|
||||
|
||||
if channel == Channel::Postgres {
|
||||
// falldown to pg_catalog
|
||||
if let Some(table) =
|
||||
self.system_catalog
|
||||
.table(catalog_name, PG_CATALOG_NAME, table_name, query_ctx)
|
||||
{
|
||||
return Ok(Some(table.table_info().table_id()));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn table_info_by_id(&self, table_id: TableId) -> Result<Option<TableInfoRef>> {
|
||||
let table_info_cache: TableInfoCacheRef =
|
||||
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "table_info_cache",
|
||||
})?;
|
||||
table_info_cache
|
||||
.get_by_ref(&table_id)
|
||||
.await
|
||||
.context(GetTableCacheSnafu)
|
||||
}
|
||||
|
||||
async fn tables_by_ids(
|
||||
&self,
|
||||
catalog: &str,
|
||||
|
||||
@@ -25,7 +25,7 @@ use common_catalog::consts::{INFORMATION_SCHEMA_NAME, PG_CATALOG_NAME};
|
||||
use futures::future::BoxFuture;
|
||||
use futures_util::stream::BoxStream;
|
||||
use session::context::QueryContext;
|
||||
use table::metadata::TableId;
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::Result;
|
||||
@@ -89,6 +89,23 @@ pub trait CatalogManager: Send + Sync {
|
||||
query_ctx: Option<&QueryContext>,
|
||||
) -> Result<Option<TableRef>>;
|
||||
|
||||
/// Returns the table id of provided table ident.
|
||||
async fn table_id(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table_name: &str,
|
||||
query_ctx: Option<&QueryContext>,
|
||||
) -> Result<Option<TableId>> {
|
||||
Ok(self
|
||||
.table(catalog, schema, table_name, query_ctx)
|
||||
.await?
|
||||
.map(|t| t.table_info().ident.table_id))
|
||||
}
|
||||
|
||||
/// Returns the table of provided id.
|
||||
async fn table_info_by_id(&self, table_id: TableId) -> Result<Option<TableInfoRef>>;
|
||||
|
||||
/// Returns the tables by table ids.
|
||||
async fn tables_by_ids(
|
||||
&self,
|
||||
|
||||
@@ -28,7 +28,7 @@ use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use futures_util::stream::BoxStream;
|
||||
use session::context::QueryContext;
|
||||
use snafu::OptionExt;
|
||||
use table::metadata::TableId;
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
|
||||
@@ -144,6 +144,18 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn table_info_by_id(&self, table_id: TableId) -> Result<Option<TableInfoRef>> {
|
||||
Ok(self
|
||||
.catalogs
|
||||
.read()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.flat_map(|(_, schema_entries)| schema_entries.values())
|
||||
.flat_map(|tables| tables.values())
|
||||
.find(|t| t.table_info().ident.table_id == table_id)
|
||||
.map(|t| t.table_info()))
|
||||
}
|
||||
|
||||
async fn tables_by_ids(
|
||||
&self,
|
||||
catalog: &str,
|
||||
|
||||
@@ -14,17 +14,24 @@
|
||||
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
use std::fmt::{Debug, Display, Formatter};
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::{Duration, Instant, UNIX_EPOCH};
|
||||
|
||||
use api::v1::frontend::{KillProcessRequest, ListProcessRequest, ProcessInfo};
|
||||
use common_base::cancellation::CancellationHandle;
|
||||
use common_frontend::selector::{FrontendSelector, MetaClientSelector};
|
||||
use common_telemetry::{debug, info, warn};
|
||||
use common_frontend::slow_query_event::SlowQueryEvent;
|
||||
use common_telemetry::{debug, error, info, warn};
|
||||
use common_time::util::current_time_millis;
|
||||
use meta_client::MetaClientRef;
|
||||
use promql_parser::parser::EvalStmt;
|
||||
use rand::random;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use sql::statements::statement::Statement;
|
||||
use tokio::sync::mpsc::Sender;
|
||||
|
||||
use crate::error;
|
||||
use crate::metrics::{PROCESS_KILL_COUNT, PROCESS_LIST_COUNT};
|
||||
@@ -44,6 +51,23 @@ pub struct ProcessManager {
|
||||
frontend_selector: Option<MetaClientSelector>,
|
||||
}
|
||||
|
||||
/// Represents a parsed query statement, functionally equivalent to [query::parser::QueryStatement].
|
||||
/// This enum is defined here to avoid cyclic dependencies with the query parser module.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum QueryStatement {
|
||||
Sql(Statement),
|
||||
Promql(EvalStmt),
|
||||
}
|
||||
|
||||
impl Display for QueryStatement {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
QueryStatement::Sql(stmt) => write!(f, "{}", stmt),
|
||||
QueryStatement::Promql(eval_stmt) => write!(f, "{}", eval_stmt),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ProcessManager {
|
||||
/// Create a [ProcessManager] instance with server address and kv client.
|
||||
pub fn new(server_addr: String, meta_client: Option<MetaClientRef>) -> Self {
|
||||
@@ -67,6 +91,7 @@ impl ProcessManager {
|
||||
query: String,
|
||||
client: String,
|
||||
query_id: Option<ProcessId>,
|
||||
_slow_query_timer: Option<SlowQueryTimer>,
|
||||
) -> Ticket {
|
||||
let id = query_id.unwrap_or_else(|| self.next_id.fetch_add(1, Ordering::Relaxed));
|
||||
let process = ProcessInfo {
|
||||
@@ -93,6 +118,7 @@ impl ProcessManager {
|
||||
manager: self.clone(),
|
||||
id,
|
||||
cancellation_handle,
|
||||
_slow_query_timer,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -223,6 +249,7 @@ pub struct Ticket {
|
||||
pub(crate) manager: ProcessManagerRef,
|
||||
pub(crate) id: ProcessId,
|
||||
pub cancellation_handle: Arc<CancellationHandle>,
|
||||
_slow_query_timer: Option<SlowQueryTimer>,
|
||||
}
|
||||
|
||||
impl Drop for Ticket {
|
||||
@@ -263,6 +290,107 @@ impl Debug for CancellableProcess {
|
||||
}
|
||||
}
|
||||
|
||||
/// SlowQueryTimer is used to log slow query when it's dropped.
|
||||
/// In drop(), it will check if the query is slow and send the slow query event to the handler.
|
||||
pub struct SlowQueryTimer {
|
||||
start: Instant,
|
||||
stmt: QueryStatement,
|
||||
query_ctx: QueryContextRef,
|
||||
threshold: Option<Duration>,
|
||||
sample_ratio: Option<f64>,
|
||||
tx: Sender<SlowQueryEvent>,
|
||||
}
|
||||
|
||||
impl SlowQueryTimer {
|
||||
pub fn new(
|
||||
stmt: QueryStatement,
|
||||
query_ctx: QueryContextRef,
|
||||
threshold: Option<Duration>,
|
||||
sample_ratio: Option<f64>,
|
||||
tx: Sender<SlowQueryEvent>,
|
||||
) -> Self {
|
||||
Self {
|
||||
start: Instant::now(),
|
||||
stmt,
|
||||
query_ctx,
|
||||
threshold,
|
||||
sample_ratio,
|
||||
tx,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SlowQueryTimer {
|
||||
fn send_slow_query_event(&self, elapsed: Duration, threshold: Duration) {
|
||||
let mut slow_query_event = SlowQueryEvent {
|
||||
cost: elapsed.as_millis() as u64,
|
||||
threshold: threshold.as_millis() as u64,
|
||||
query: "".to_string(),
|
||||
query_ctx: self.query_ctx.clone(),
|
||||
|
||||
// The following fields are only used for PromQL queries.
|
||||
is_promql: false,
|
||||
promql_range: None,
|
||||
promql_step: None,
|
||||
promql_start: None,
|
||||
promql_end: None,
|
||||
};
|
||||
|
||||
match &self.stmt {
|
||||
QueryStatement::Promql(stmt) => {
|
||||
slow_query_event.is_promql = true;
|
||||
slow_query_event.query = stmt.expr.to_string();
|
||||
slow_query_event.promql_step = Some(stmt.interval.as_millis() as u64);
|
||||
|
||||
let start = stmt
|
||||
.start
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_millis() as i64;
|
||||
|
||||
let end = stmt
|
||||
.end
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_millis() as i64;
|
||||
|
||||
slow_query_event.promql_range = Some((end - start) as u64);
|
||||
slow_query_event.promql_start = Some(start);
|
||||
slow_query_event.promql_end = Some(end);
|
||||
}
|
||||
QueryStatement::Sql(stmt) => {
|
||||
slow_query_event.query = stmt.to_string();
|
||||
}
|
||||
}
|
||||
|
||||
// Send SlowQueryEvent to the handler.
|
||||
if let Err(e) = self.tx.try_send(slow_query_event) {
|
||||
error!(e; "Failed to send slow query event");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for SlowQueryTimer {
|
||||
fn drop(&mut self) {
|
||||
if let Some(threshold) = self.threshold {
|
||||
// Calculate the elaspsed duration since the timer is created.
|
||||
let elapsed = self.start.elapsed();
|
||||
if elapsed > threshold {
|
||||
if let Some(ratio) = self.sample_ratio {
|
||||
// Only capture a portion of slow queries based on sample_ratio.
|
||||
// Generate a random number in [0, 1) and compare it with sample_ratio.
|
||||
if ratio >= 1.0 || random::<f64>() <= ratio {
|
||||
self.send_slow_query_event(elapsed, threshold);
|
||||
}
|
||||
} else {
|
||||
// Captures all slow queries if sample_ratio is not set.
|
||||
self.send_slow_query_event(elapsed, threshold);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
@@ -278,6 +406,7 @@ mod tests {
|
||||
"SELECT * FROM table".to_string(),
|
||||
"".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
let running_processes = process_manager.local_processes(None).unwrap();
|
||||
@@ -301,6 +430,7 @@ mod tests {
|
||||
"SELECT * FROM table".to_string(),
|
||||
"client1".to_string(),
|
||||
Some(custom_id),
|
||||
None,
|
||||
);
|
||||
|
||||
assert_eq!(ticket.id, custom_id);
|
||||
@@ -321,6 +451,7 @@ mod tests {
|
||||
"SELECT * FROM table1".to_string(),
|
||||
"client1".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
let ticket2 = process_manager.clone().register_query(
|
||||
@@ -329,6 +460,7 @@ mod tests {
|
||||
"SELECT * FROM table2".to_string(),
|
||||
"client2".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
let running_processes = process_manager.local_processes(Some("public")).unwrap();
|
||||
@@ -350,6 +482,7 @@ mod tests {
|
||||
"SELECT * FROM table1".to_string(),
|
||||
"client1".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
let _ticket2 = process_manager.clone().register_query(
|
||||
@@ -358,6 +491,7 @@ mod tests {
|
||||
"SELECT * FROM table2".to_string(),
|
||||
"client2".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
// Test listing processes for specific catalog
|
||||
@@ -384,6 +518,7 @@ mod tests {
|
||||
"SELECT * FROM table".to_string(),
|
||||
"client1".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
assert_eq!(process_manager.local_processes(None).unwrap().len(), 1);
|
||||
process_manager.deregister_query("public".to_string(), ticket.id);
|
||||
@@ -400,6 +535,7 @@ mod tests {
|
||||
"SELECT * FROM table".to_string(),
|
||||
"client1".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
assert!(!ticket.cancellation_handle.is_cancelled());
|
||||
@@ -417,6 +553,7 @@ mod tests {
|
||||
"SELECT * FROM table".to_string(),
|
||||
"client1".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
assert!(!ticket.cancellation_handle.is_cancelled());
|
||||
let killed = process_manager
|
||||
@@ -462,6 +599,7 @@ mod tests {
|
||||
"SELECT COUNT(*) FROM users WHERE age > 18".to_string(),
|
||||
"test_client".to_string(),
|
||||
Some(42),
|
||||
None,
|
||||
);
|
||||
|
||||
let processes = process_manager.local_processes(None).unwrap();
|
||||
@@ -488,6 +626,7 @@ mod tests {
|
||||
"SELECT * FROM table".to_string(),
|
||||
"client1".to_string(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
// Process should be registered
|
||||
|
||||
@@ -75,7 +75,7 @@ impl StoreConfig {
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
BackendImpl::PostgresStore => {
|
||||
let table_name = &self.meta_table_name;
|
||||
let pool = meta_srv::bootstrap::create_postgres_pool(store_addrs)
|
||||
let pool = meta_srv::bootstrap::create_postgres_pool(store_addrs, None)
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(common_meta::kv_backend::rds::PgStore::with_pg_pool(
|
||||
|
||||
@@ -38,6 +38,7 @@ common-config.workspace = true
|
||||
common-error.workspace = true
|
||||
common-grpc.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-mem-prof.workspace = true
|
||||
common-meta.workspace = true
|
||||
common-options.workspace = true
|
||||
common-procedure.workspace = true
|
||||
|
||||
@@ -28,7 +28,7 @@ use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::datanode::{DatanodeOptions, Instance, APP_NAME};
|
||||
use crate::error::{MetaClientInitSnafu, MissingConfigSnafu, Result, StartDatanodeSnafu};
|
||||
use crate::{create_resource_limit_metrics, log_versions};
|
||||
use crate::{create_resource_limit_metrics, log_versions, maybe_activate_heap_profile};
|
||||
|
||||
/// Builder for Datanode instance.
|
||||
pub struct InstanceBuilder {
|
||||
@@ -68,6 +68,7 @@ impl InstanceBuilder {
|
||||
);
|
||||
|
||||
log_versions(verbose_version(), short_version(), APP_NAME);
|
||||
maybe_activate_heap_profile(&dn_opts.memory);
|
||||
create_resource_limit_metrics(APP_NAME);
|
||||
|
||||
plugins::setup_datanode_plugins(plugins, &opts.plugins, dn_opts)
|
||||
|
||||
@@ -46,7 +46,7 @@ use crate::error::{
|
||||
MissingConfigSnafu, Result, ShutdownFlownodeSnafu, StartFlownodeSnafu,
|
||||
};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{create_resource_limit_metrics, log_versions, App};
|
||||
use crate::{create_resource_limit_metrics, log_versions, maybe_activate_heap_profile, App};
|
||||
|
||||
pub const APP_NAME: &str = "greptime-flownode";
|
||||
|
||||
@@ -280,6 +280,7 @@ impl StartCommand {
|
||||
);
|
||||
|
||||
log_versions(verbose_version(), short_version(), APP_NAME);
|
||||
maybe_activate_heap_profile(&opts.component.memory);
|
||||
create_resource_limit_metrics(APP_NAME);
|
||||
|
||||
info!("Flownode start command: {:#?}", self);
|
||||
|
||||
@@ -47,7 +47,7 @@ use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{create_resource_limit_metrics, log_versions, App};
|
||||
use crate::{create_resource_limit_metrics, log_versions, maybe_activate_heap_profile, App};
|
||||
|
||||
type FrontendOptions = GreptimeOptions<frontend::frontend::FrontendOptions>;
|
||||
|
||||
@@ -283,6 +283,7 @@ impl StartCommand {
|
||||
);
|
||||
|
||||
log_versions(verbose_version(), short_version(), APP_NAME);
|
||||
maybe_activate_heap_profile(&opts.component.memory);
|
||||
create_resource_limit_metrics(APP_NAME);
|
||||
|
||||
info!("Frontend start command: {:#?}", self);
|
||||
|
||||
@@ -15,7 +15,10 @@
|
||||
#![feature(assert_matches, let_chains)]
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_telemetry::{error, info};
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_mem_prof::activate_heap_profile;
|
||||
use common_telemetry::{error, info, warn};
|
||||
use stat::{get_cpu_limit, get_memory_limit};
|
||||
|
||||
use crate::error::Result;
|
||||
@@ -145,3 +148,20 @@ fn log_env_flags() {
|
||||
info!("argument: {}", argument);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn maybe_activate_heap_profile(memory_options: &common_options::memory::MemoryOptions) {
|
||||
if memory_options.enable_heap_profiling {
|
||||
match activate_heap_profile() {
|
||||
Ok(()) => {
|
||||
info!("Heap profile is active");
|
||||
}
|
||||
Err(err) => {
|
||||
if err.status_code() == StatusCode::Unsupported {
|
||||
info!("Heap profile is not supported");
|
||||
} else {
|
||||
warn!(err; "Failed to activate heap profile");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{self, LoadLayeredConfigSnafu, Result, StartMetaServerSnafu};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{create_resource_limit_metrics, log_versions, App};
|
||||
use crate::{create_resource_limit_metrics, log_versions, maybe_activate_heap_profile, App};
|
||||
|
||||
type MetasrvOptions = GreptimeOptions<meta_srv::metasrv::MetasrvOptions>;
|
||||
|
||||
@@ -325,6 +325,7 @@ impl StartCommand {
|
||||
);
|
||||
|
||||
log_versions(verbose_version(), short_version(), APP_NAME);
|
||||
maybe_activate_heap_profile(&opts.component.memory);
|
||||
create_resource_limit_metrics(APP_NAME);
|
||||
|
||||
info!("Metasrv start command: {:#?}", self);
|
||||
|
||||
@@ -34,17 +34,19 @@ use common_meta::cluster::{NodeInfo, NodeStatus};
|
||||
use common_meta::datanode::RegionStat;
|
||||
use common_meta::ddl::flow_meta::FlowMetadataAllocator;
|
||||
use common_meta::ddl::table_meta::TableMetadataAllocator;
|
||||
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl, ProcedureExecutorRef};
|
||||
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl};
|
||||
use common_meta::ddl_manager::DdlManager;
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::procedure_executor::LocalProcedureExecutor;
|
||||
use common_meta::region_keeper::MemoryRegionKeeper;
|
||||
use common_meta::region_registry::LeaderRegionRegistry;
|
||||
use common_meta::sequence::SequenceBuilder;
|
||||
use common_meta::wal_options_allocator::{build_wal_options_allocator, WalOptionsAllocatorRef};
|
||||
use common_options::memory::MemoryOptions;
|
||||
use common_procedure::{ProcedureInfo, ProcedureManagerRef};
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::{
|
||||
@@ -83,7 +85,7 @@ use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{Result, StartFlownodeSnafu};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{create_resource_limit_metrics, error, log_versions, App};
|
||||
use crate::{create_resource_limit_metrics, error, log_versions, maybe_activate_heap_profile, App};
|
||||
|
||||
pub const APP_NAME: &str = "greptime-standalone";
|
||||
|
||||
@@ -157,6 +159,7 @@ pub struct StandaloneOptions {
|
||||
pub max_in_flight_write_bytes: Option<ReadableSize>,
|
||||
pub slow_query: Option<SlowQueryOptions>,
|
||||
pub query: QueryOptions,
|
||||
pub memory: MemoryOptions,
|
||||
}
|
||||
|
||||
impl Default for StandaloneOptions {
|
||||
@@ -190,6 +193,7 @@ impl Default for StandaloneOptions {
|
||||
max_in_flight_write_bytes: None,
|
||||
slow_query: Some(SlowQueryOptions::default()),
|
||||
query: QueryOptions::default(),
|
||||
memory: MemoryOptions::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -486,6 +490,7 @@ impl StartCommand {
|
||||
);
|
||||
|
||||
log_versions(verbose_version(), short_version(), APP_NAME);
|
||||
maybe_activate_heap_profile(&opts.component.memory);
|
||||
create_resource_limit_metrics(APP_NAME);
|
||||
|
||||
info!("Standalone start command: {:#?}", self);
|
||||
@@ -636,9 +641,8 @@ impl StartCommand {
|
||||
flow_metadata_allocator: flow_metadata_allocator.clone(),
|
||||
region_failure_detector_controller: Arc::new(NoopRegionFailureDetectorControl),
|
||||
};
|
||||
let procedure_manager_c = procedure_manager.clone();
|
||||
|
||||
let ddl_manager = DdlManager::try_new(ddl_context, procedure_manager_c, true)
|
||||
let ddl_manager = DdlManager::try_new(ddl_context, procedure_manager.clone(), true)
|
||||
.context(error::InitDdlManagerSnafu)?;
|
||||
#[cfg(feature = "enterprise")]
|
||||
let ddl_manager = {
|
||||
@@ -646,7 +650,11 @@ impl StartCommand {
|
||||
plugins.get();
|
||||
ddl_manager.with_trigger_ddl_manager(trigger_ddl_manager)
|
||||
};
|
||||
let ddl_task_executor: ProcedureExecutorRef = Arc::new(ddl_manager);
|
||||
|
||||
let procedure_executor = Arc::new(LocalProcedureExecutor::new(
|
||||
Arc::new(ddl_manager),
|
||||
procedure_manager.clone(),
|
||||
));
|
||||
|
||||
let fe_instance = FrontendBuilder::new(
|
||||
fe_opts.clone(),
|
||||
@@ -654,7 +662,7 @@ impl StartCommand {
|
||||
layered_cache_registry.clone(),
|
||||
catalog_manager.clone(),
|
||||
node_manager.clone(),
|
||||
ddl_task_executor.clone(),
|
||||
procedure_executor.clone(),
|
||||
process_manager,
|
||||
)
|
||||
.with_plugin(plugins.clone())
|
||||
@@ -679,7 +687,7 @@ impl StartCommand {
|
||||
catalog_manager.clone(),
|
||||
kv_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
ddl_task_executor.clone(),
|
||||
procedure_executor,
|
||||
node_manager,
|
||||
)
|
||||
.await
|
||||
|
||||
@@ -34,6 +34,7 @@ use query::options::QueryOptions;
|
||||
use servers::export_metrics::ExportMetricsOption;
|
||||
use servers::grpc::GrpcOptions;
|
||||
use servers::http::HttpOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
use store_api::path_utils::WAL_DIR;
|
||||
|
||||
#[allow(deprecated)]
|
||||
@@ -190,6 +191,13 @@ fn test_load_metasrv_example_config() {
|
||||
remote_write: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
backend_tls: Some(TlsOption {
|
||||
mode: TlsMode::Prefer,
|
||||
cert_path: String::new(),
|
||||
key_path: String::new(),
|
||||
ca_cert_path: String::new(),
|
||||
watch: false,
|
||||
}),
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
@@ -245,6 +253,7 @@ fn test_load_flownode_example_config() {
|
||||
..Default::default()
|
||||
},
|
||||
user_provider: None,
|
||||
memory: Default::default(),
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
@@ -298,6 +307,7 @@ fn test_load_standalone_example_config() {
|
||||
cors_allowed_origins: vec!["https://example.com".to_string()],
|
||||
..Default::default()
|
||||
},
|
||||
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
|
||||
@@ -8,10 +8,8 @@ license.workspace = true
|
||||
api.workspace = true
|
||||
async-trait.workspace = true
|
||||
backon.workspace = true
|
||||
client.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
serde.workspace = true
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::ColumnSchema;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
@@ -35,6 +35,30 @@ pub enum Error {
|
||||
expected: Vec<ColumnSchema>,
|
||||
actual: Vec<ColumnSchema>,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to serialize event"))]
|
||||
SerializeEvent {
|
||||
#[snafu(source)]
|
||||
error: serde_json::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to insert events"))]
|
||||
InsertEvents {
|
||||
// BoxedError is utilized here to prevent introducing a circular dependency that would arise from directly referencing `client::error::Error`.
|
||||
source: BoxedError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Keyvalue backend error"))]
|
||||
KvBackend {
|
||||
// BoxedError is utilized here to prevent introducing a circular dependency that would arise from directly referencing `common_meta::error::Error`.
|
||||
source: BoxedError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -42,8 +66,12 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::MismatchedSchema { .. } => StatusCode::InvalidArguments,
|
||||
Error::NoAvailableFrontend { .. } => StatusCode::Internal,
|
||||
Error::MismatchedSchema { .. } | Error::SerializeEvent { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::NoAvailableFrontend { .. }
|
||||
| Error::InsertEvents { .. }
|
||||
| Error::KvBackend { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -81,7 +81,9 @@ pub trait Event: Send + Sync + Debug {
|
||||
}
|
||||
|
||||
/// Returns the JSON bytes of the event as the payload. It will use JSON type to store the payload.
|
||||
fn json_payload(&self) -> Result<String>;
|
||||
fn json_payload(&self) -> Result<String> {
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
/// Add the extra schema to the event with the default schema.
|
||||
fn extra_schema(&self) -> Vec<ColumnSchema> {
|
||||
@@ -97,6 +99,14 @@ pub trait Event: Send + Sync + Debug {
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
}
|
||||
|
||||
/// Eventable trait defines the interface for objects that can be converted to [Event].
|
||||
pub trait Eventable: Send + Sync + Debug {
|
||||
/// Converts the object to an [Event].
|
||||
fn to_event(&self) -> Option<Box<dyn Event>> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the hints for the insert operation.
|
||||
pub fn insert_hints() -> Vec<(&'static str, &'static str)> {
|
||||
vec![
|
||||
@@ -199,7 +209,7 @@ fn validate_events(events: &[&Box<dyn Event>]) -> Result<()> {
|
||||
}
|
||||
|
||||
/// EventRecorder trait defines the interface for recording events.
|
||||
pub trait EventRecorder: Send + Sync + 'static {
|
||||
pub trait EventRecorder: Send + Sync + Debug + 'static {
|
||||
/// Records an event for persistence and processing by [EventHandler].
|
||||
fn record(&self, event: Box<dyn Event>);
|
||||
|
||||
@@ -231,6 +241,7 @@ impl Default for EventRecorderOptions {
|
||||
}
|
||||
|
||||
/// Implementation of [EventRecorder] that records the events and processes them in the background by the [EventHandler].
|
||||
#[derive(Debug)]
|
||||
pub struct EventRecorderImpl {
|
||||
// The channel to send the events to the background processor.
|
||||
tx: Sender<Box<dyn Event>>,
|
||||
|
||||
@@ -12,6 +12,7 @@ common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
greptime-proto.workspace = true
|
||||
meta-client.workspace = true
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
tonic.workspace = true
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ use snafu::OptionExt;
|
||||
|
||||
pub mod error;
|
||||
pub mod selector;
|
||||
pub mod slow_query_event;
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct DisplayProcessId {
|
||||
|
||||
@@ -12,16 +12,17 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Format to store in parquet.
|
||||
//!
|
||||
//! We store two additional internal columns at last:
|
||||
//! - `__sequence`, the sequence number of a row. Type: uint64
|
||||
//! - `__op_type`, the op type of the row. Type: uint8
|
||||
//!
|
||||
//! We store other columns in the same order as [RegionMetadata::field_columns()](store_api::metadata::RegionMetadata::field_columns()).
|
||||
//!
|
||||
use session::context::QueryContextRef;
|
||||
|
||||
/// Number of columns that have fixed positions.
|
||||
///
|
||||
/// Contains all internal columns.
|
||||
pub(crate) const PLAIN_FIXED_POS_COLUMN_NUM: usize = 2;
|
||||
#[derive(Debug)]
|
||||
pub struct SlowQueryEvent {
|
||||
pub cost: u64,
|
||||
pub threshold: u64,
|
||||
pub query: String,
|
||||
pub is_promql: bool,
|
||||
pub query_ctx: QueryContextRef,
|
||||
pub promql_range: Option<u64>,
|
||||
pub promql_step: Option<u64>,
|
||||
pub promql_start: Option<i64>,
|
||||
pub promql_end: Option<i64>,
|
||||
}
|
||||
@@ -66,6 +66,6 @@ wkt = { version = "0.11", optional = true }
|
||||
[dev-dependencies]
|
||||
approx = "0.5"
|
||||
futures.workspace = true
|
||||
pretty_assertions = "1.4.0"
|
||||
pretty_assertions.workspace = true
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -16,6 +16,9 @@ mod add_region_follower;
|
||||
mod flush_compact_region;
|
||||
mod flush_compact_table;
|
||||
mod migrate_region;
|
||||
mod reconcile_catalog;
|
||||
mod reconcile_database;
|
||||
mod reconcile_table;
|
||||
mod remove_region_follower;
|
||||
|
||||
use std::sync::Arc;
|
||||
@@ -24,6 +27,9 @@ use add_region_follower::AddRegionFollowerFunction;
|
||||
use flush_compact_region::{CompactRegionFunction, FlushRegionFunction};
|
||||
use flush_compact_table::{CompactTableFunction, FlushTableFunction};
|
||||
use migrate_region::MigrateRegionFunction;
|
||||
use reconcile_catalog::ReconcileCatalogFunction;
|
||||
use reconcile_database::ReconcileDatabaseFunction;
|
||||
use reconcile_table::ReconcileTableFunction;
|
||||
use remove_region_follower::RemoveRegionFollowerFunction;
|
||||
|
||||
use crate::flush_flow::FlushFlowFunction;
|
||||
@@ -43,5 +49,8 @@ impl AdminFunction {
|
||||
registry.register_async(Arc::new(FlushTableFunction));
|
||||
registry.register_async(Arc::new(CompactTableFunction));
|
||||
registry.register_async(Arc::new(FlushFlowFunction));
|
||||
registry.register_async(Arc::new(ReconcileCatalogFunction));
|
||||
registry.register_async(Arc::new(ReconcileDatabaseFunction));
|
||||
registry.register_async(Arc::new(ReconcileTableFunction));
|
||||
}
|
||||
}
|
||||
|
||||
179
src/common/function/src/admin/reconcile_catalog.rs
Normal file
179
src/common/function/src/admin/reconcile_catalog.rs
Normal file
@@ -0,0 +1,179 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::meta::reconcile_request::Target;
|
||||
use api::v1::meta::{ReconcileCatalog, ReconcileRequest};
|
||||
use common_macro::admin_fn;
|
||||
use common_query::error::{
|
||||
InvalidFuncArgsSnafu, MissingProcedureServiceHandlerSnafu, Result,
|
||||
UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||
use common_telemetry::info;
|
||||
use datatypes::prelude::*;
|
||||
use session::context::QueryContextRef;
|
||||
|
||||
use crate::handlers::ProcedureServiceHandlerRef;
|
||||
use crate::helper::{
|
||||
cast_u32, default_parallelism, default_resolve_strategy, get_string_from_params,
|
||||
parse_resolve_strategy,
|
||||
};
|
||||
|
||||
const FN_NAME: &str = "reconcile_catalog";
|
||||
|
||||
/// A function to reconcile a catalog.
|
||||
/// Returns the procedure id if success.
|
||||
///
|
||||
/// - `reconcile_catalog(resolve_strategy)`.
|
||||
/// - `reconcile_catalog(resolve_strategy, parallelism)`.
|
||||
///
|
||||
/// - `reconcile_catalog()`.
|
||||
#[admin_fn(
|
||||
name = ReconcileCatalogFunction,
|
||||
display_name = reconcile_catalog,
|
||||
sig_fn = signature,
|
||||
ret = string
|
||||
)]
|
||||
pub(crate) async fn reconcile_catalog(
|
||||
procedure_service_handler: &ProcedureServiceHandlerRef,
|
||||
query_ctx: &QueryContextRef,
|
||||
params: &[ValueRef<'_>],
|
||||
) -> Result<Value> {
|
||||
let (resolve_strategy, parallelism) = match params.len() {
|
||||
0 => (default_resolve_strategy(), default_parallelism()),
|
||||
1 => (
|
||||
parse_resolve_strategy(get_string_from_params(params, 0, FN_NAME)?)?,
|
||||
default_parallelism(),
|
||||
),
|
||||
2 => {
|
||||
let Some(parallelism) = cast_u32(¶ms[1])? else {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: FN_NAME,
|
||||
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
(
|
||||
parse_resolve_strategy(get_string_from_params(params, 0, FN_NAME)?)?,
|
||||
parallelism,
|
||||
)
|
||||
}
|
||||
size => {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 0, 1 or 2, have: {}",
|
||||
size
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
};
|
||||
info!(
|
||||
"Reconciling catalog with resolve_strategy: {:?}, parallelism: {}",
|
||||
resolve_strategy, parallelism
|
||||
);
|
||||
let pid = procedure_service_handler
|
||||
.reconcile(ReconcileRequest {
|
||||
target: Some(Target::ReconcileCatalog(ReconcileCatalog {
|
||||
catalog_name: query_ctx.current_catalog().to_string(),
|
||||
parallelism,
|
||||
resolve_strategy: resolve_strategy as i32,
|
||||
})),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
match pid {
|
||||
Some(pid) => Ok(Value::from(pid)),
|
||||
None => Ok(Value::Null),
|
||||
}
|
||||
}
|
||||
|
||||
fn signature() -> Signature {
|
||||
let nums = ConcreteDataType::numerics();
|
||||
let mut signs = Vec::with_capacity(2 + nums.len());
|
||||
signs.extend([
|
||||
// reconcile_catalog()
|
||||
TypeSignature::NullAry,
|
||||
// reconcile_catalog(resolve_strategy)
|
||||
TypeSignature::Exact(vec![ConcreteDataType::string_datatype()]),
|
||||
]);
|
||||
for sign in nums {
|
||||
// reconcile_catalog(resolve_strategy, parallelism)
|
||||
signs.push(TypeSignature::Exact(vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
sign,
|
||||
]));
|
||||
}
|
||||
Signature::one_of(signs, Volatility::Immutable)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::Error;
|
||||
use datatypes::vectors::{StringVector, UInt64Vector, VectorRef};
|
||||
|
||||
use crate::admin::reconcile_catalog::ReconcileCatalogFunction;
|
||||
use crate::function::{AsyncFunction, FunctionContext};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_reconcile_catalog() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
// reconcile_catalog()
|
||||
let f = ReconcileCatalogFunction;
|
||||
let args = vec![];
|
||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
||||
let expect: VectorRef = Arc::new(StringVector::from(vec!["test_pid"]));
|
||||
assert_eq!(expect, result);
|
||||
|
||||
// reconcile_catalog(resolve_strategy)
|
||||
let f = ReconcileCatalogFunction;
|
||||
let args = vec![Arc::new(StringVector::from(vec!["UseMetasrv"])) as _];
|
||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
||||
let expect: VectorRef = Arc::new(StringVector::from(vec!["test_pid"]));
|
||||
assert_eq!(expect, result);
|
||||
|
||||
// reconcile_catalog(resolve_strategy, parallelism)
|
||||
let f = ReconcileCatalogFunction;
|
||||
let args = vec![
|
||||
Arc::new(StringVector::from(vec!["UseLatest"])) as _,
|
||||
Arc::new(UInt64Vector::from_slice([10])) as _,
|
||||
];
|
||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
||||
let expect: VectorRef = Arc::new(StringVector::from(vec!["test_pid"]));
|
||||
assert_eq!(expect, result);
|
||||
|
||||
// unsupported input data type
|
||||
let f = ReconcileCatalogFunction;
|
||||
let args = vec![
|
||||
Arc::new(StringVector::from(vec!["UseLatest"])) as _,
|
||||
Arc::new(StringVector::from(vec!["test"])) as _,
|
||||
];
|
||||
let err = f.eval(FunctionContext::mock(), &args).await.unwrap_err();
|
||||
assert_matches!(err, Error::UnsupportedInputDataType { .. });
|
||||
|
||||
// invalid function args
|
||||
let f = ReconcileCatalogFunction;
|
||||
let args = vec![
|
||||
Arc::new(StringVector::from(vec!["UseLatest"])) as _,
|
||||
Arc::new(UInt64Vector::from_slice([10])) as _,
|
||||
Arc::new(StringVector::from(vec!["10"])) as _,
|
||||
];
|
||||
let err = f.eval(FunctionContext::mock(), &args).await.unwrap_err();
|
||||
assert_matches!(err, Error::InvalidFuncArgs { .. });
|
||||
}
|
||||
}
|
||||
198
src/common/function/src/admin/reconcile_database.rs
Normal file
198
src/common/function/src/admin/reconcile_database.rs
Normal file
@@ -0,0 +1,198 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::meta::reconcile_request::Target;
|
||||
use api::v1::meta::{ReconcileDatabase, ReconcileRequest};
|
||||
use common_macro::admin_fn;
|
||||
use common_query::error::{
|
||||
InvalidFuncArgsSnafu, MissingProcedureServiceHandlerSnafu, Result,
|
||||
UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||
use common_telemetry::info;
|
||||
use datatypes::prelude::*;
|
||||
use session::context::QueryContextRef;
|
||||
|
||||
use crate::handlers::ProcedureServiceHandlerRef;
|
||||
use crate::helper::{
|
||||
cast_u32, default_parallelism, default_resolve_strategy, get_string_from_params,
|
||||
parse_resolve_strategy,
|
||||
};
|
||||
|
||||
const FN_NAME: &str = "reconcile_database";
|
||||
|
||||
/// A function to reconcile a database.
|
||||
/// Returns the procedure id if success.
|
||||
///
|
||||
/// - `reconcile_database(database_name)`.
|
||||
/// - `reconcile_database(database_name, resolve_strategy)`.
|
||||
/// - `reconcile_database(database_name, resolve_strategy, parallelism)`.
|
||||
///
|
||||
/// The parameters:
|
||||
/// - `database_name`: the database name
|
||||
#[admin_fn(
|
||||
name = ReconcileDatabaseFunction,
|
||||
display_name = reconcile_database,
|
||||
sig_fn = signature,
|
||||
ret = string
|
||||
)]
|
||||
pub(crate) async fn reconcile_database(
|
||||
procedure_service_handler: &ProcedureServiceHandlerRef,
|
||||
query_ctx: &QueryContextRef,
|
||||
params: &[ValueRef<'_>],
|
||||
) -> Result<Value> {
|
||||
let (database_name, resolve_strategy, parallelism) = match params.len() {
|
||||
1 => (
|
||||
get_string_from_params(params, 0, FN_NAME)?,
|
||||
default_resolve_strategy(),
|
||||
default_parallelism(),
|
||||
),
|
||||
2 => (
|
||||
get_string_from_params(params, 0, FN_NAME)?,
|
||||
parse_resolve_strategy(get_string_from_params(params, 1, FN_NAME)?)?,
|
||||
default_parallelism(),
|
||||
),
|
||||
3 => {
|
||||
let Some(parallelism) = cast_u32(¶ms[2])? else {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: FN_NAME,
|
||||
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
(
|
||||
get_string_from_params(params, 0, FN_NAME)?,
|
||||
parse_resolve_strategy(get_string_from_params(params, 1, FN_NAME)?)?,
|
||||
parallelism,
|
||||
)
|
||||
}
|
||||
size => {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 1, 2 or 3, have: {}",
|
||||
size
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
};
|
||||
info!(
|
||||
"Reconciling database: {}, resolve_strategy: {:?}, parallelism: {}",
|
||||
database_name, resolve_strategy, parallelism
|
||||
);
|
||||
let pid = procedure_service_handler
|
||||
.reconcile(ReconcileRequest {
|
||||
target: Some(Target::ReconcileDatabase(ReconcileDatabase {
|
||||
catalog_name: query_ctx.current_catalog().to_string(),
|
||||
database_name: database_name.to_string(),
|
||||
parallelism,
|
||||
resolve_strategy: resolve_strategy as i32,
|
||||
})),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
match pid {
|
||||
Some(pid) => Ok(Value::from(pid)),
|
||||
None => Ok(Value::Null),
|
||||
}
|
||||
}
|
||||
|
||||
fn signature() -> Signature {
|
||||
let nums = ConcreteDataType::numerics();
|
||||
let mut signs = Vec::with_capacity(2 + nums.len());
|
||||
signs.extend([
|
||||
// reconcile_database(datanode_name)
|
||||
TypeSignature::Exact(vec![ConcreteDataType::string_datatype()]),
|
||||
// reconcile_database(database_name, resolve_strategy)
|
||||
TypeSignature::Exact(vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::string_datatype(),
|
||||
]),
|
||||
]);
|
||||
for sign in nums {
|
||||
// reconcile_database(database_name, resolve_strategy, parallelism)
|
||||
signs.push(TypeSignature::Exact(vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::string_datatype(),
|
||||
sign,
|
||||
]));
|
||||
}
|
||||
Signature::one_of(signs, Volatility::Immutable)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::Error;
|
||||
use datatypes::vectors::{StringVector, UInt32Vector, VectorRef};
|
||||
|
||||
use crate::admin::reconcile_database::ReconcileDatabaseFunction;
|
||||
use crate::function::{AsyncFunction, FunctionContext};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_reconcile_catalog() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
// reconcile_database(database_name)
|
||||
let f = ReconcileDatabaseFunction;
|
||||
let args = vec![Arc::new(StringVector::from(vec!["test"])) as _];
|
||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
||||
let expect: VectorRef = Arc::new(StringVector::from(vec!["test_pid"]));
|
||||
assert_eq!(expect, result);
|
||||
|
||||
// reconcile_database(database_name, resolve_strategy)
|
||||
let f = ReconcileDatabaseFunction;
|
||||
let args = vec![
|
||||
Arc::new(StringVector::from(vec!["test"])) as _,
|
||||
Arc::new(StringVector::from(vec!["UseLatest"])) as _,
|
||||
];
|
||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
||||
let expect: VectorRef = Arc::new(StringVector::from(vec!["test_pid"]));
|
||||
assert_eq!(expect, result);
|
||||
|
||||
// reconcile_database(database_name, resolve_strategy, parallelism)
|
||||
let f = ReconcileDatabaseFunction;
|
||||
let args = vec![
|
||||
Arc::new(StringVector::from(vec!["test"])) as _,
|
||||
Arc::new(StringVector::from(vec!["UseLatest"])) as _,
|
||||
Arc::new(UInt32Vector::from_slice([10])) as _,
|
||||
];
|
||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
||||
let expect: VectorRef = Arc::new(StringVector::from(vec!["test_pid"]));
|
||||
assert_eq!(expect, result);
|
||||
|
||||
// invalid function args
|
||||
let f = ReconcileDatabaseFunction;
|
||||
let args = vec![
|
||||
Arc::new(StringVector::from(vec!["UseLatest"])) as _,
|
||||
Arc::new(UInt32Vector::from_slice([10])) as _,
|
||||
Arc::new(StringVector::from(vec!["v1"])) as _,
|
||||
Arc::new(StringVector::from(vec!["v2"])) as _,
|
||||
];
|
||||
let err = f.eval(FunctionContext::mock(), &args).await.unwrap_err();
|
||||
assert_matches!(err, Error::InvalidFuncArgs { .. });
|
||||
|
||||
// unsupported input data type
|
||||
let f = ReconcileDatabaseFunction;
|
||||
let args = vec![
|
||||
Arc::new(StringVector::from(vec!["UseLatest"])) as _,
|
||||
Arc::new(UInt32Vector::from_slice([10])) as _,
|
||||
Arc::new(StringVector::from(vec!["v1"])) as _,
|
||||
];
|
||||
let err = f.eval(FunctionContext::mock(), &args).await.unwrap_err();
|
||||
assert_matches!(err, Error::UnsupportedInputDataType { .. });
|
||||
}
|
||||
}
|
||||
149
src/common/function/src/admin/reconcile_table.rs
Normal file
149
src/common/function/src/admin/reconcile_table.rs
Normal file
@@ -0,0 +1,149 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::meta::reconcile_request::Target;
|
||||
use api::v1::meta::{ReconcileRequest, ReconcileTable, ResolveStrategy};
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_macro::admin_fn;
|
||||
use common_query::error::{
|
||||
MissingProcedureServiceHandlerSnafu, Result, TableMutationSnafu, UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||
use common_telemetry::info;
|
||||
use datatypes::prelude::*;
|
||||
use session::context::QueryContextRef;
|
||||
use session::table_name::table_name_to_full_name;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::handlers::ProcedureServiceHandlerRef;
|
||||
use crate::helper::parse_resolve_strategy;
|
||||
|
||||
const FN_NAME: &str = "reconcile_table";
|
||||
|
||||
/// A function to reconcile a table.
|
||||
/// Returns the procedure id if success.
|
||||
///
|
||||
/// - `reconcile_table(table_name)`.
|
||||
/// - `reconcile_table(table_name, resolve_strategy)`.
|
||||
///
|
||||
/// The parameters:
|
||||
/// - `table_name`: the table name
|
||||
#[admin_fn(
|
||||
name = ReconcileTableFunction,
|
||||
display_name = reconcile_table,
|
||||
sig_fn = signature,
|
||||
ret = string
|
||||
)]
|
||||
pub(crate) async fn reconcile_table(
|
||||
procedure_service_handler: &ProcedureServiceHandlerRef,
|
||||
query_ctx: &QueryContextRef,
|
||||
params: &[ValueRef<'_>],
|
||||
) -> Result<Value> {
|
||||
let (table_name, resolve_strategy) = match params {
|
||||
[ValueRef::String(table_name)] => (table_name, ResolveStrategy::UseLatest),
|
||||
[ValueRef::String(table_name), ValueRef::String(resolve_strategy)] => {
|
||||
(table_name, parse_resolve_strategy(resolve_strategy)?)
|
||||
}
|
||||
_ => {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: FN_NAME,
|
||||
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
};
|
||||
let (catalog_name, schema_name, table_name) = table_name_to_full_name(table_name, query_ctx)
|
||||
.map_err(BoxedError::new)
|
||||
.context(TableMutationSnafu)?;
|
||||
info!(
|
||||
"Reconciling table: {} with resolve_strategy: {:?}",
|
||||
format_full_table_name(&catalog_name, &schema_name, &table_name),
|
||||
resolve_strategy
|
||||
);
|
||||
let pid = procedure_service_handler
|
||||
.reconcile(ReconcileRequest {
|
||||
target: Some(Target::ReconcileTable(ReconcileTable {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
resolve_strategy: resolve_strategy as i32,
|
||||
})),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
match pid {
|
||||
Some(pid) => Ok(Value::from(pid)),
|
||||
None => Ok(Value::Null),
|
||||
}
|
||||
}
|
||||
|
||||
fn signature() -> Signature {
|
||||
Signature::one_of(
|
||||
vec![
|
||||
// reconcile_table(table_name)
|
||||
TypeSignature::Exact(vec![ConcreteDataType::string_datatype()]),
|
||||
// reconcile_table(table_name, resolve_strategy)
|
||||
TypeSignature::Exact(vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::string_datatype(),
|
||||
]),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::Error;
|
||||
use datatypes::vectors::{StringVector, VectorRef};
|
||||
|
||||
use crate::admin::reconcile_table::ReconcileTableFunction;
|
||||
use crate::function::{AsyncFunction, FunctionContext};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_reconcile_table() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
// reconcile_table(table_name)
|
||||
let f = ReconcileTableFunction;
|
||||
let args = vec![Arc::new(StringVector::from(vec!["test"])) as _];
|
||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
||||
let expect: VectorRef = Arc::new(StringVector::from(vec!["test_pid"]));
|
||||
assert_eq!(expect, result);
|
||||
|
||||
// reconcile_table(table_name, resolve_strategy)
|
||||
let f = ReconcileTableFunction;
|
||||
let args = vec![
|
||||
Arc::new(StringVector::from(vec!["test"])) as _,
|
||||
Arc::new(StringVector::from(vec!["UseMetasrv"])) as _,
|
||||
];
|
||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
||||
let expect: VectorRef = Arc::new(StringVector::from(vec!["test_pid"]));
|
||||
assert_eq!(expect, result);
|
||||
|
||||
// unsupported input data type
|
||||
let f = ReconcileTableFunction;
|
||||
let args = vec![
|
||||
Arc::new(StringVector::from(vec!["test"])) as _,
|
||||
Arc::new(StringVector::from(vec!["UseMetasrv"])) as _,
|
||||
Arc::new(StringVector::from(vec!["10"])) as _,
|
||||
];
|
||||
let err = f.eval(FunctionContext::mock(), &args).await.unwrap_err();
|
||||
assert_matches!(err, Error::UnsupportedInputDataType { .. });
|
||||
}
|
||||
}
|
||||
@@ -26,6 +26,8 @@ use std::sync::Arc;
|
||||
|
||||
use arrow::array::StructArray;
|
||||
use arrow_schema::Fields;
|
||||
use common_telemetry::debug;
|
||||
use datafusion::functions_aggregate::all_default_aggregate_functions;
|
||||
use datafusion::optimizer::analyzer::type_coercion::TypeCoercion;
|
||||
use datafusion::optimizer::AnalyzerRule;
|
||||
use datafusion::physical_planner::create_aggregate_expr_and_maybe_filter;
|
||||
@@ -39,6 +41,8 @@ use datafusion_expr::{
|
||||
use datafusion_physical_expr::aggregate::AggregateFunctionExpr;
|
||||
use datatypes::arrow::datatypes::{DataType, Field};
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
/// Returns the name of the state function for the given aggregate function name.
|
||||
/// The state function is used to compute the state of the aggregate function.
|
||||
/// The state function's name is in the format `__<aggr_name>_state
|
||||
@@ -65,9 +69,9 @@ pub struct StateMergeHelper;
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct StepAggrPlan {
|
||||
/// Upper merge plan, which is the aggregate plan that merges the states of the state function.
|
||||
pub upper_merge: Arc<LogicalPlan>,
|
||||
pub upper_merge: LogicalPlan,
|
||||
/// Lower state plan, which is the aggregate plan that computes the state of the aggregate function.
|
||||
pub lower_state: Arc<LogicalPlan>,
|
||||
pub lower_state: LogicalPlan,
|
||||
}
|
||||
|
||||
pub fn get_aggr_func(expr: &Expr) -> Option<&datafusion_expr::expr::AggregateFunction> {
|
||||
@@ -83,6 +87,36 @@ pub fn get_aggr_func(expr: &Expr) -> Option<&datafusion_expr::expr::AggregateFun
|
||||
}
|
||||
|
||||
impl StateMergeHelper {
|
||||
/// Register all the `state` function of supported aggregate functions.
|
||||
/// Note that can't register `merge` function here, as it needs to be created from the original aggregate function with given input types.
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
let all_default = all_default_aggregate_functions();
|
||||
let greptime_custom_aggr_functions = registry.aggregate_functions();
|
||||
|
||||
// if our custom aggregate function have the same name as the default aggregate function, we will override it.
|
||||
let supported = all_default
|
||||
.into_iter()
|
||||
.chain(greptime_custom_aggr_functions.into_iter().map(Arc::new))
|
||||
.collect::<Vec<_>>();
|
||||
debug!(
|
||||
"Registering state functions for supported: {:?}",
|
||||
supported.iter().map(|f| f.name()).collect::<Vec<_>>()
|
||||
);
|
||||
|
||||
let state_func = supported.into_iter().filter_map(|f| {
|
||||
StateWrapper::new((*f).clone())
|
||||
.inspect_err(
|
||||
|e| common_telemetry::error!(e; "Failed to register state function for {:?}", f),
|
||||
)
|
||||
.ok()
|
||||
.map(AggregateUDF::new_from_impl)
|
||||
});
|
||||
|
||||
for func in state_func {
|
||||
registry.register_aggr(func);
|
||||
}
|
||||
}
|
||||
|
||||
/// Split an aggregate plan into two aggregate plans, one for the state function and one for the merge function.
|
||||
pub fn split_aggr_node(aggr_plan: Aggregate) -> datafusion_common::Result<StepAggrPlan> {
|
||||
let aggr = {
|
||||
@@ -166,18 +200,18 @@ impl StateMergeHelper {
|
||||
let lower_plan = LogicalPlan::Aggregate(lower);
|
||||
|
||||
// update aggregate's output schema
|
||||
let lower_plan = Arc::new(lower_plan.recompute_schema()?);
|
||||
let lower_plan = lower_plan.recompute_schema()?;
|
||||
|
||||
let mut upper = aggr.clone();
|
||||
let aggr_plan = LogicalPlan::Aggregate(aggr);
|
||||
upper.aggr_expr = upper_aggr_exprs;
|
||||
upper.input = lower_plan.clone();
|
||||
upper.input = Arc::new(lower_plan.clone());
|
||||
// upper schema's output schema should be the same as the original aggregate plan's output schema
|
||||
let upper_check = upper.clone();
|
||||
let upper_plan = Arc::new(LogicalPlan::Aggregate(upper_check).recompute_schema()?);
|
||||
let upper_check = upper;
|
||||
let upper_plan = LogicalPlan::Aggregate(upper_check).recompute_schema()?;
|
||||
if *upper_plan.schema() != *aggr_plan.schema() {
|
||||
return Err(datafusion_common::DataFusionError::Internal(format!(
|
||||
"Upper aggregate plan's schema is not the same as the original aggregate plan's schema: \n[transformed]:{}\n[ original]{}",
|
||||
"Upper aggregate plan's schema is not the same as the original aggregate plan's schema: \n[transformed]:{}\n[original]:{}",
|
||||
upper_plan.schema(), aggr_plan.schema()
|
||||
)));
|
||||
}
|
||||
@@ -407,15 +441,18 @@ impl AggregateUDFImpl for MergeWrapper {
|
||||
&'a self,
|
||||
acc_args: datafusion_expr::function::AccumulatorArgs<'b>,
|
||||
) -> datafusion_common::Result<Box<dyn Accumulator>> {
|
||||
if acc_args.schema.fields().len() != 1
|
||||
|| !matches!(acc_args.schema.field(0).data_type(), DataType::Struct(_))
|
||||
if acc_args.exprs.len() != 1
|
||||
|| !matches!(
|
||||
acc_args.exprs[0].data_type(acc_args.schema)?,
|
||||
DataType::Struct(_)
|
||||
)
|
||||
{
|
||||
return Err(datafusion_common::DataFusionError::Internal(format!(
|
||||
"Expected one struct type as input, got: {:?}",
|
||||
acc_args.schema
|
||||
)));
|
||||
}
|
||||
let input_type = acc_args.schema.field(0).data_type();
|
||||
let input_type = acc_args.exprs[0].data_type(acc_args.schema)?;
|
||||
let DataType::Struct(fields) = input_type else {
|
||||
return Err(datafusion_common::DataFusionError::Internal(format!(
|
||||
"Expected a struct type for input, got: {:?}",
|
||||
@@ -424,7 +461,7 @@ impl AggregateUDFImpl for MergeWrapper {
|
||||
};
|
||||
|
||||
let inner_accum = self.original_phy_expr.create_accumulator()?;
|
||||
Ok(Box::new(MergeAccum::new(inner_accum, fields)))
|
||||
Ok(Box::new(MergeAccum::new(inner_accum, &fields)))
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
|
||||
@@ -258,7 +258,7 @@ async fn test_sum_udaf() {
|
||||
)
|
||||
.recompute_schema()
|
||||
.unwrap();
|
||||
assert_eq!(res.lower_state.as_ref(), &expected_lower_plan);
|
||||
assert_eq!(&res.lower_state, &expected_lower_plan);
|
||||
|
||||
let expected_merge_plan = LogicalPlan::Aggregate(
|
||||
Aggregate::try_new(
|
||||
@@ -297,7 +297,7 @@ async fn test_sum_udaf() {
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
assert_eq!(res.upper_merge.as_ref(), &expected_merge_plan);
|
||||
assert_eq!(&res.upper_merge, &expected_merge_plan);
|
||||
|
||||
let phy_aggr_state_plan = DefaultPhysicalPlanner::default()
|
||||
.create_physical_plan(&res.lower_state, &ctx.state())
|
||||
@@ -405,7 +405,7 @@ async fn test_avg_udaf() {
|
||||
let coerced_aggr_state_plan = TypeCoercion::new()
|
||||
.analyze(expected_aggr_state_plan.clone(), &Default::default())
|
||||
.unwrap();
|
||||
assert_eq!(res.lower_state.as_ref(), &coerced_aggr_state_plan);
|
||||
assert_eq!(&res.lower_state, &coerced_aggr_state_plan);
|
||||
assert_eq!(
|
||||
res.lower_state.schema().as_arrow(),
|
||||
&arrow_schema::Schema::new(vec![Field::new(
|
||||
@@ -456,7 +456,7 @@ async fn test_avg_udaf() {
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
assert_eq!(res.upper_merge.as_ref(), &expected_merge_plan);
|
||||
assert_eq!(&res.upper_merge, &expected_merge_plan);
|
||||
|
||||
let phy_aggr_state_plan = DefaultPhysicalPlanner::default()
|
||||
.create_physical_plan(&coerced_aggr_state_plan, &ctx.state())
|
||||
|
||||
@@ -20,6 +20,7 @@ use datafusion_expr::AggregateUDF;
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
use crate::admin::AdminFunction;
|
||||
use crate::aggrs::aggr_wrapper::StateMergeHelper;
|
||||
use crate::aggrs::approximate::ApproximateFunction;
|
||||
use crate::aggrs::count_hash::CountHash;
|
||||
use crate::aggrs::vector::VectorFunction as VectorAggrFunction;
|
||||
@@ -105,6 +106,10 @@ impl FunctionRegistry {
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn is_aggr_func_exist(&self, name: &str) -> bool {
|
||||
self.aggregate_functions.read().unwrap().contains_key(name)
|
||||
}
|
||||
}
|
||||
|
||||
pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
|
||||
@@ -148,6 +153,9 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
|
||||
// CountHash function
|
||||
CountHash::register(&function_registry);
|
||||
|
||||
// state function of supported aggregate functions
|
||||
StateMergeHelper::register(&function_registry);
|
||||
|
||||
Arc::new(function_registry)
|
||||
});
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::ReconcileRequest;
|
||||
use async_trait::async_trait;
|
||||
use catalog::CatalogManagerRef;
|
||||
use common_base::AffectedRows;
|
||||
@@ -65,6 +66,9 @@ pub trait ProcedureServiceHandler: Send + Sync {
|
||||
/// Migrate a region from source peer to target peer, returns the procedure id if success.
|
||||
async fn migrate_region(&self, request: MigrateRegionRequest) -> Result<Option<String>>;
|
||||
|
||||
/// Reconcile a table, database or catalog, returns the procedure id if success.
|
||||
async fn reconcile(&self, request: ReconcileRequest) -> Result<Option<String>>;
|
||||
|
||||
/// Query the procedure' state by its id
|
||||
async fn query_procedure_state(&self, pid: &str) -> Result<ProcedureStateResponse>;
|
||||
|
||||
|
||||
@@ -12,12 +12,15 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_query::error::{InvalidInputTypeSnafu, Result};
|
||||
use api::v1::meta::ResolveStrategy;
|
||||
use common_query::error::{
|
||||
InvalidFuncArgsSnafu, InvalidInputTypeSnafu, Result, UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::cast::cast;
|
||||
use datatypes::value::ValueRef;
|
||||
use snafu::ResultExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
/// Create a function signature with oneof signatures of interleaving two arguments.
|
||||
pub fn one_of_sigs2(args1: Vec<ConcreteDataType>, args2: Vec<ConcreteDataType>) -> Signature {
|
||||
@@ -43,3 +46,64 @@ pub fn cast_u64(value: &ValueRef) -> Result<Option<u64>> {
|
||||
})
|
||||
.map(|v| v.as_u64())
|
||||
}
|
||||
|
||||
/// Cast a [`ValueRef`] to u32, returns `None` if fails
|
||||
pub fn cast_u32(value: &ValueRef) -> Result<Option<u32>> {
|
||||
cast((*value).into(), &ConcreteDataType::uint32_datatype())
|
||||
.context(InvalidInputTypeSnafu {
|
||||
err_msg: format!(
|
||||
"Failed to cast input into uint32, actual type: {:#?}",
|
||||
value.data_type(),
|
||||
),
|
||||
})
|
||||
.map(|v| v.as_u64().map(|v| v as u32))
|
||||
}
|
||||
|
||||
/// Parse a resolve strategy from a string.
|
||||
pub fn parse_resolve_strategy(strategy: &str) -> Result<ResolveStrategy> {
|
||||
ResolveStrategy::from_str_name(strategy).context(InvalidFuncArgsSnafu {
|
||||
err_msg: format!("Invalid resolve strategy: {}", strategy),
|
||||
})
|
||||
}
|
||||
|
||||
/// Default parallelism for reconcile operations.
|
||||
pub fn default_parallelism() -> u32 {
|
||||
64
|
||||
}
|
||||
|
||||
/// Default resolve strategy for reconcile operations.
|
||||
pub fn default_resolve_strategy() -> ResolveStrategy {
|
||||
ResolveStrategy::UseLatest
|
||||
}
|
||||
|
||||
/// Get the string value from the params.
|
||||
///
|
||||
/// # Errors
|
||||
/// Returns an error if the input type is not a string.
|
||||
pub fn get_string_from_params<'a>(
|
||||
params: &'a [ValueRef<'a>],
|
||||
index: usize,
|
||||
fn_name: &'a str,
|
||||
) -> Result<&'a str> {
|
||||
let ValueRef::String(s) = ¶ms[index] else {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: fn_name,
|
||||
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
Ok(s)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_resolve_strategy() {
|
||||
assert_eq!(
|
||||
parse_resolve_strategy("UseLatest").unwrap(),
|
||||
ResolveStrategy::UseLatest
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
#![feature(let_chains)]
|
||||
#![feature(try_blocks)]
|
||||
#![feature(assert_matches)]
|
||||
|
||||
mod admin;
|
||||
mod flush_flow;
|
||||
|
||||
@@ -32,7 +32,7 @@ impl FunctionState {
|
||||
pub fn mock() -> Self {
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::ProcedureStatus;
|
||||
use api::v1::meta::{ProcedureStatus, ReconcileRequest};
|
||||
use async_trait::async_trait;
|
||||
use catalog::CatalogManagerRef;
|
||||
use common_base::AffectedRows;
|
||||
@@ -63,6 +63,10 @@ impl FunctionState {
|
||||
Ok(Some("test_pid".to_string()))
|
||||
}
|
||||
|
||||
async fn reconcile(&self, _request: ReconcileRequest) -> Result<Option<String>> {
|
||||
Ok(Some("test_pid".to_string()))
|
||||
}
|
||||
|
||||
async fn query_procedure_state(&self, _pid: &str) -> Result<ProcedureStateResponse> {
|
||||
Ok(ProcedureStateResponse {
|
||||
status: ProcedureStatus::Done.into(),
|
||||
|
||||
@@ -19,8 +19,8 @@ use std::io::BufReader;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use error::{
|
||||
BuildTempPathSnafu, DumpProfileDataSnafu, OpenTempFileSnafu, ProfilingNotEnabledSnafu,
|
||||
ReadOptProfSnafu,
|
||||
ActivateProfSnafu, BuildTempPathSnafu, DeactivateProfSnafu, DumpProfileDataSnafu,
|
||||
OpenTempFileSnafu, ProfilingNotEnabledSnafu, ReadOptProfSnafu, ReadProfActiveSnafu,
|
||||
};
|
||||
use jemalloc_pprof_mappings::MAPPINGS;
|
||||
use jemalloc_pprof_utils::{parse_jeheap, FlamegraphOptions, StackProfile};
|
||||
@@ -31,6 +31,7 @@ use crate::error::{FlamegraphSnafu, ParseJeHeapSnafu, Result};
|
||||
|
||||
const PROF_DUMP: &[u8] = b"prof.dump\0";
|
||||
const OPT_PROF: &[u8] = b"opt.prof\0";
|
||||
const PROF_ACTIVE: &[u8] = b"prof.active\0";
|
||||
|
||||
pub async fn dump_profile() -> Result<Vec<u8>> {
|
||||
ensure!(is_prof_enabled()?, ProfilingNotEnabledSnafu);
|
||||
@@ -93,6 +94,27 @@ pub async fn dump_flamegraph() -> Result<Vec<u8>> {
|
||||
let flamegraph = profile.to_flamegraph(&mut opts).context(FlamegraphSnafu)?;
|
||||
Ok(flamegraph)
|
||||
}
|
||||
|
||||
pub fn activate_heap_profile() -> Result<()> {
|
||||
ensure!(is_prof_enabled()?, ProfilingNotEnabledSnafu);
|
||||
unsafe {
|
||||
tikv_jemalloc_ctl::raw::update(PROF_ACTIVE, true).context(ActivateProfSnafu)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn deactivate_heap_profile() -> Result<()> {
|
||||
ensure!(is_prof_enabled()?, ProfilingNotEnabledSnafu);
|
||||
unsafe {
|
||||
tikv_jemalloc_ctl::raw::update(PROF_ACTIVE, false).context(DeactivateProfSnafu)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn is_heap_profile_active() -> Result<bool> {
|
||||
unsafe { Ok(tikv_jemalloc_ctl::raw::read::<bool>(PROF_ACTIVE).context(ReadProfActiveSnafu)?) }
|
||||
}
|
||||
|
||||
fn is_prof_enabled() -> Result<bool> {
|
||||
// safety: OPT_PROF variable, if present, is always a boolean value.
|
||||
Ok(unsafe { tikv_jemalloc_ctl::raw::read::<bool>(OPT_PROF).context(ReadOptProfSnafu)? })
|
||||
|
||||
@@ -53,6 +53,24 @@ pub enum Error {
|
||||
#[snafu(source)]
|
||||
error: tikv_jemalloc_ctl::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to activate heap profiling"))]
|
||||
ActivateProf {
|
||||
#[snafu(source)]
|
||||
error: tikv_jemalloc_ctl::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to deactivate heap profiling"))]
|
||||
DeactivateProf {
|
||||
#[snafu(source)]
|
||||
error: tikv_jemalloc_ctl::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to read heap profiling status"))]
|
||||
ReadProfActive {
|
||||
#[snafu(source)]
|
||||
error: tikv_jemalloc_ctl::Error,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
@@ -63,6 +81,9 @@ impl ErrorExt for Error {
|
||||
Error::BuildTempPath { .. } => StatusCode::Internal,
|
||||
Error::OpenTempFile { .. } => StatusCode::StorageUnavailable,
|
||||
Error::DumpProfileData { .. } => StatusCode::StorageUnavailable,
|
||||
Error::ActivateProf { .. } => StatusCode::Internal,
|
||||
Error::DeactivateProf { .. } => StatusCode::Internal,
|
||||
Error::ReadProfActive { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,10 @@ pub mod error;
|
||||
#[cfg(not(windows))]
|
||||
mod jemalloc;
|
||||
#[cfg(not(windows))]
|
||||
pub use jemalloc::{dump_flamegraph, dump_pprof, dump_profile};
|
||||
pub use jemalloc::{
|
||||
activate_heap_profile, deactivate_heap_profile, dump_flamegraph, dump_pprof, dump_profile,
|
||||
is_heap_profile_active,
|
||||
};
|
||||
|
||||
#[cfg(windows)]
|
||||
pub async fn dump_profile() -> error::Result<Vec<u8>> {
|
||||
@@ -33,3 +36,18 @@ pub async fn dump_pprof() -> error::Result<Vec<u8>> {
|
||||
pub async fn dump_flamegraph() -> error::Result<Vec<u8>> {
|
||||
error::ProfilingNotSupportedSnafu.fail()
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
pub fn activate_heap_profile() -> error::Result<()> {
|
||||
error::ProfilingNotSupportedSnafu.fail()
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
pub fn deactivate_heap_profile() -> error::Result<()> {
|
||||
error::ProfilingNotSupportedSnafu.fail()
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
pub fn is_heap_profile_active() -> error::Result<bool> {
|
||||
error::ProfilingNotSupportedSnafu.fail()
|
||||
}
|
||||
|
||||
@@ -6,7 +6,16 @@ license.workspace = true
|
||||
|
||||
[features]
|
||||
testing = []
|
||||
pg_kvbackend = ["dep:tokio-postgres", "dep:backon", "dep:deadpool-postgres", "dep:deadpool"]
|
||||
pg_kvbackend = [
|
||||
"dep:tokio-postgres",
|
||||
"dep:backon",
|
||||
"dep:deadpool-postgres",
|
||||
"dep:deadpool",
|
||||
"dep:tokio-postgres-rustls",
|
||||
"dep:rustls-pemfile",
|
||||
"dep:rustls-native-certs",
|
||||
"dep:rustls",
|
||||
]
|
||||
mysql_kvbackend = ["dep:sqlx", "dep:backon"]
|
||||
enterprise = []
|
||||
|
||||
@@ -32,6 +41,7 @@ common-procedure.workspace = true
|
||||
common-procedure-test.workspace = true
|
||||
common-query.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
common-runtime.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
common-wal.workspace = true
|
||||
@@ -57,6 +67,9 @@ prost.workspace = true
|
||||
rand.workspace = true
|
||||
regex.workspace = true
|
||||
rskafka.workspace = true
|
||||
rustls = { workspace = true, default-features = false, features = ["ring", "logging", "std", "tls12"], optional = true }
|
||||
rustls-native-certs = { version = "0.7", optional = true }
|
||||
rustls-pemfile = { version = "2.0", optional = true }
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
serde_with.workspace = true
|
||||
@@ -68,6 +81,7 @@ strum.workspace = true
|
||||
table = { workspace = true, features = ["testing"] }
|
||||
tokio.workspace = true
|
||||
tokio-postgres = { workspace = true, optional = true }
|
||||
tokio-postgres-rustls = { version = "0.12", optional = true }
|
||||
tonic.workspace = true
|
||||
tracing.workspace = true
|
||||
typetag.workspace = true
|
||||
|
||||
@@ -15,25 +15,17 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::ProcedureDetailResponse;
|
||||
use common_telemetry::tracing_context::W3cTrace;
|
||||
use store_api::storage::{RegionId, RegionNumber, TableId};
|
||||
|
||||
use crate::cache_invalidator::CacheInvalidatorRef;
|
||||
use crate::ddl::flow_meta::FlowMetadataAllocatorRef;
|
||||
use crate::ddl::table_meta::TableMetadataAllocatorRef;
|
||||
use crate::error::{Result, UnsupportedSnafu};
|
||||
use crate::key::flow::FlowMetadataManagerRef;
|
||||
use crate::key::table_route::PhysicalTableRouteValue;
|
||||
use crate::key::TableMetadataManagerRef;
|
||||
use crate::node_manager::NodeManagerRef;
|
||||
use crate::region_keeper::MemoryRegionKeeperRef;
|
||||
use crate::region_registry::LeaderRegionRegistryRef;
|
||||
use crate::rpc::ddl::{SubmitDdlTaskRequest, SubmitDdlTaskResponse};
|
||||
use crate::rpc::procedure::{
|
||||
AddRegionFollowerRequest, MigrateRegionRequest, MigrateRegionResponse, ProcedureStateResponse,
|
||||
RemoveRegionFollowerRequest,
|
||||
};
|
||||
use crate::DatanodeId;
|
||||
|
||||
pub mod alter_database;
|
||||
@@ -59,64 +51,6 @@ pub(crate) mod tests;
|
||||
pub mod truncate_table;
|
||||
pub mod utils;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct ExecutorContext {
|
||||
pub tracing_context: Option<W3cTrace>,
|
||||
}
|
||||
|
||||
/// The procedure executor that accepts ddl, region migration task etc.
|
||||
#[async_trait::async_trait]
|
||||
pub trait ProcedureExecutor: Send + Sync {
|
||||
/// Submit a ddl task
|
||||
async fn submit_ddl_task(
|
||||
&self,
|
||||
ctx: &ExecutorContext,
|
||||
request: SubmitDdlTaskRequest,
|
||||
) -> Result<SubmitDdlTaskResponse>;
|
||||
|
||||
/// Add a region follower
|
||||
async fn add_region_follower(
|
||||
&self,
|
||||
_ctx: &ExecutorContext,
|
||||
_request: AddRegionFollowerRequest,
|
||||
) -> Result<()> {
|
||||
UnsupportedSnafu {
|
||||
operation: "add_region_follower",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
/// Remove a region follower
|
||||
async fn remove_region_follower(
|
||||
&self,
|
||||
_ctx: &ExecutorContext,
|
||||
_request: RemoveRegionFollowerRequest,
|
||||
) -> Result<()> {
|
||||
UnsupportedSnafu {
|
||||
operation: "remove_region_follower",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
/// Submit a region migration task
|
||||
async fn migrate_region(
|
||||
&self,
|
||||
ctx: &ExecutorContext,
|
||||
request: MigrateRegionRequest,
|
||||
) -> Result<MigrateRegionResponse>;
|
||||
|
||||
/// Query the procedure state by its id
|
||||
async fn query_procedure_state(
|
||||
&self,
|
||||
ctx: &ExecutorContext,
|
||||
pid: &str,
|
||||
) -> Result<ProcedureStateResponse>;
|
||||
|
||||
async fn list_procedures(&self, ctx: &ExecutorContext) -> Result<ProcedureDetailResponse>;
|
||||
}
|
||||
|
||||
pub type ProcedureExecutorRef = Arc<dyn ProcedureExecutor>;
|
||||
|
||||
/// Metadata allocated to a table.
|
||||
#[derive(Default)]
|
||||
pub struct TableMetadata {
|
||||
|
||||
@@ -12,8 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::helper::to_pb_time_ranges;
|
||||
use api::v1::region::{
|
||||
region_request, RegionRequest, RegionRequestHeader, TruncateRequest as PbTruncateRegionRequest,
|
||||
region_request, truncate_request, RegionRequest, RegionRequestHeader,
|
||||
TruncateRequest as PbTruncateRegionRequest,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use common_procedure::error::{FromJsonSnafu, ToJsonSnafu};
|
||||
@@ -33,7 +35,7 @@ use table::table_reference::TableReference;
|
||||
|
||||
use crate::ddl::utils::{add_peer_context_if_needed, map_to_procedure_error};
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{Result, TableNotFoundSnafu};
|
||||
use crate::error::{ConvertTimeRangesSnafu, Result, TableNotFoundSnafu};
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::DeserializedValueWithBytes;
|
||||
@@ -153,6 +155,15 @@ impl TruncateTableProcedure {
|
||||
datanode
|
||||
);
|
||||
|
||||
let time_ranges = &self.data.task.time_ranges;
|
||||
let kind = if time_ranges.is_empty() {
|
||||
truncate_request::Kind::All(api::v1::region::All {})
|
||||
} else {
|
||||
let pb_time_ranges =
|
||||
to_pb_time_ranges(time_ranges).context(ConvertTimeRangesSnafu)?;
|
||||
truncate_request::Kind::TimeRanges(pb_time_ranges)
|
||||
};
|
||||
|
||||
let request = RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
tracing_context: TracingContext::from_current_span().to_w3c(),
|
||||
@@ -160,6 +171,7 @@ impl TruncateTableProcedure {
|
||||
}),
|
||||
body: Some(region_request::Body::Truncate(PbTruncateRegionRequest {
|
||||
region_id: region_id.as_u64(),
|
||||
kind: Some(kind),
|
||||
})),
|
||||
};
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::ProcedureDetailResponse;
|
||||
use common_procedure::{
|
||||
watcher, BoxedProcedureLoader, Output, ProcedureId, ProcedureManagerRef, ProcedureWithId,
|
||||
};
|
||||
@@ -37,16 +36,16 @@ use crate::ddl::drop_flow::DropFlowProcedure;
|
||||
use crate::ddl::drop_table::DropTableProcedure;
|
||||
use crate::ddl::drop_view::DropViewProcedure;
|
||||
use crate::ddl::truncate_table::TruncateTableProcedure;
|
||||
use crate::ddl::{utils, DdlContext, ExecutorContext, ProcedureExecutor};
|
||||
use crate::ddl::{utils, DdlContext};
|
||||
use crate::error::{
|
||||
EmptyDdlTasksSnafu, ParseProcedureIdSnafu, ProcedureNotFoundSnafu, ProcedureOutputSnafu,
|
||||
QueryProcedureSnafu, RegisterProcedureLoaderSnafu, Result, SubmitProcedureSnafu,
|
||||
TableInfoNotFoundSnafu, TableNotFoundSnafu, TableRouteNotFoundSnafu,
|
||||
UnexpectedLogicalRouteTableSnafu, UnsupportedSnafu, WaitProcedureSnafu,
|
||||
EmptyDdlTasksSnafu, ProcedureOutputSnafu, RegisterProcedureLoaderSnafu, Result,
|
||||
SubmitProcedureSnafu, TableInfoNotFoundSnafu, TableNotFoundSnafu, TableRouteNotFoundSnafu,
|
||||
UnexpectedLogicalRouteTableSnafu, WaitProcedureSnafu,
|
||||
};
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::{DeserializedValueWithBytes, TableMetadataManagerRef};
|
||||
use crate::procedure_executor::ExecutorContext;
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::rpc::ddl::trigger::CreateTriggerTask;
|
||||
#[cfg(feature = "enterprise")]
|
||||
@@ -65,8 +64,6 @@ use crate::rpc::ddl::{
|
||||
CreateViewTask, DropDatabaseTask, DropFlowTask, DropTableTask, DropViewTask, QueryContext,
|
||||
SubmitDdlTaskRequest, SubmitDdlTaskResponse, TruncateTableTask,
|
||||
};
|
||||
use crate::rpc::procedure;
|
||||
use crate::rpc::procedure::{MigrateRegionRequest, MigrateRegionResponse, ProcedureStateResponse};
|
||||
use crate::rpc::router::RegionRoute;
|
||||
|
||||
pub type DdlManagerRef = Arc<DdlManager>;
|
||||
@@ -418,6 +415,75 @@ impl DdlManager {
|
||||
|
||||
Ok((procedure_id, output))
|
||||
}
|
||||
|
||||
pub async fn submit_ddl_task(
|
||||
&self,
|
||||
ctx: &ExecutorContext,
|
||||
request: SubmitDdlTaskRequest,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let span = ctx
|
||||
.tracing_context
|
||||
.as_ref()
|
||||
.map(TracingContext::from_w3c)
|
||||
.unwrap_or_else(TracingContext::from_current_span)
|
||||
.attach(tracing::info_span!("DdlManager::submit_ddl_task"));
|
||||
async move {
|
||||
debug!("Submitting Ddl task: {:?}", request.task);
|
||||
match request.task {
|
||||
CreateTable(create_table_task) => {
|
||||
handle_create_table_task(self, create_table_task).await
|
||||
}
|
||||
DropTable(drop_table_task) => handle_drop_table_task(self, drop_table_task).await,
|
||||
AlterTable(alter_table_task) => {
|
||||
handle_alter_table_task(self, alter_table_task).await
|
||||
}
|
||||
TruncateTable(truncate_table_task) => {
|
||||
handle_truncate_table_task(self, truncate_table_task).await
|
||||
}
|
||||
CreateLogicalTables(create_table_tasks) => {
|
||||
handle_create_logical_table_tasks(self, create_table_tasks).await
|
||||
}
|
||||
AlterLogicalTables(alter_table_tasks) => {
|
||||
handle_alter_logical_table_tasks(self, alter_table_tasks).await
|
||||
}
|
||||
DropLogicalTables(_) => todo!(),
|
||||
CreateDatabase(create_database_task) => {
|
||||
handle_create_database_task(self, create_database_task).await
|
||||
}
|
||||
DropDatabase(drop_database_task) => {
|
||||
handle_drop_database_task(self, drop_database_task).await
|
||||
}
|
||||
AlterDatabase(alter_database_task) => {
|
||||
handle_alter_database_task(self, alter_database_task).await
|
||||
}
|
||||
CreateFlow(create_flow_task) => {
|
||||
handle_create_flow_task(self, create_flow_task, request.query_context.into())
|
||||
.await
|
||||
}
|
||||
DropFlow(drop_flow_task) => handle_drop_flow_task(self, drop_flow_task).await,
|
||||
CreateView(create_view_task) => {
|
||||
handle_create_view_task(self, create_view_task).await
|
||||
}
|
||||
DropView(drop_view_task) => handle_drop_view_task(self, drop_view_task).await,
|
||||
#[cfg(feature = "enterprise")]
|
||||
CreateTrigger(create_trigger_task) => {
|
||||
handle_create_trigger_task(
|
||||
self,
|
||||
create_trigger_task,
|
||||
request.query_context.into(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
#[cfg(feature = "enterprise")]
|
||||
DropTrigger(drop_trigger_task) => {
|
||||
handle_drop_trigger_task(self, drop_trigger_task, request.query_context.into())
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
.trace(span)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_truncate_table_task(
|
||||
@@ -667,6 +733,8 @@ async fn handle_drop_trigger_task(
|
||||
query_context: QueryContext,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let Some(m) = ddl_manager.trigger_ddl_manager.as_ref() else {
|
||||
use crate::error::UnsupportedSnafu;
|
||||
|
||||
return UnsupportedSnafu {
|
||||
operation: "drop trigger",
|
||||
}
|
||||
@@ -746,6 +814,8 @@ async fn handle_create_trigger_task(
|
||||
query_context: QueryContext,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let Some(m) = ddl_manager.trigger_ddl_manager.as_ref() else {
|
||||
use crate::error::UnsupportedSnafu;
|
||||
|
||||
return UnsupportedSnafu {
|
||||
operation: "create trigger",
|
||||
}
|
||||
@@ -822,119 +892,6 @@ async fn handle_create_view_task(
|
||||
})
|
||||
}
|
||||
|
||||
/// TODO(dennis): let [`DdlManager`] implement [`ProcedureExecutor`] looks weird, find some way to refactor it.
|
||||
#[async_trait::async_trait]
|
||||
impl ProcedureExecutor for DdlManager {
|
||||
async fn submit_ddl_task(
|
||||
&self,
|
||||
ctx: &ExecutorContext,
|
||||
request: SubmitDdlTaskRequest,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let span = ctx
|
||||
.tracing_context
|
||||
.as_ref()
|
||||
.map(TracingContext::from_w3c)
|
||||
.unwrap_or(TracingContext::from_current_span())
|
||||
.attach(tracing::info_span!("DdlManager::submit_ddl_task"));
|
||||
async move {
|
||||
debug!("Submitting Ddl task: {:?}", request.task);
|
||||
match request.task {
|
||||
CreateTable(create_table_task) => {
|
||||
handle_create_table_task(self, create_table_task).await
|
||||
}
|
||||
DropTable(drop_table_task) => handle_drop_table_task(self, drop_table_task).await,
|
||||
AlterTable(alter_table_task) => {
|
||||
handle_alter_table_task(self, alter_table_task).await
|
||||
}
|
||||
TruncateTable(truncate_table_task) => {
|
||||
handle_truncate_table_task(self, truncate_table_task).await
|
||||
}
|
||||
CreateLogicalTables(create_table_tasks) => {
|
||||
handle_create_logical_table_tasks(self, create_table_tasks).await
|
||||
}
|
||||
AlterLogicalTables(alter_table_tasks) => {
|
||||
handle_alter_logical_table_tasks(self, alter_table_tasks).await
|
||||
}
|
||||
DropLogicalTables(_) => todo!(),
|
||||
CreateDatabase(create_database_task) => {
|
||||
handle_create_database_task(self, create_database_task).await
|
||||
}
|
||||
DropDatabase(drop_database_task) => {
|
||||
handle_drop_database_task(self, drop_database_task).await
|
||||
}
|
||||
AlterDatabase(alter_database_task) => {
|
||||
handle_alter_database_task(self, alter_database_task).await
|
||||
}
|
||||
CreateFlow(create_flow_task) => {
|
||||
handle_create_flow_task(self, create_flow_task, request.query_context.into())
|
||||
.await
|
||||
}
|
||||
DropFlow(drop_flow_task) => handle_drop_flow_task(self, drop_flow_task).await,
|
||||
CreateView(create_view_task) => {
|
||||
handle_create_view_task(self, create_view_task).await
|
||||
}
|
||||
DropView(drop_view_task) => handle_drop_view_task(self, drop_view_task).await,
|
||||
#[cfg(feature = "enterprise")]
|
||||
CreateTrigger(create_trigger_task) => {
|
||||
handle_create_trigger_task(
|
||||
self,
|
||||
create_trigger_task,
|
||||
request.query_context.into(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
#[cfg(feature = "enterprise")]
|
||||
DropTrigger(drop_trigger_task) => {
|
||||
handle_drop_trigger_task(self, drop_trigger_task, request.query_context.into())
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
.trace(span)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn migrate_region(
|
||||
&self,
|
||||
_ctx: &ExecutorContext,
|
||||
_request: MigrateRegionRequest,
|
||||
) -> Result<MigrateRegionResponse> {
|
||||
UnsupportedSnafu {
|
||||
operation: "migrate_region",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
async fn query_procedure_state(
|
||||
&self,
|
||||
_ctx: &ExecutorContext,
|
||||
pid: &str,
|
||||
) -> Result<ProcedureStateResponse> {
|
||||
let pid =
|
||||
ProcedureId::parse_str(pid).with_context(|_| ParseProcedureIdSnafu { key: pid })?;
|
||||
|
||||
let state = self
|
||||
.procedure_manager
|
||||
.procedure_state(pid)
|
||||
.await
|
||||
.context(QueryProcedureSnafu)?
|
||||
.context(ProcedureNotFoundSnafu {
|
||||
pid: pid.to_string(),
|
||||
})?;
|
||||
|
||||
Ok(procedure::procedure_state_to_pb_response(&state))
|
||||
}
|
||||
|
||||
async fn list_procedures(&self, _ctx: &ExecutorContext) -> Result<ProcedureDetailResponse> {
|
||||
let metas = self
|
||||
.procedure_manager
|
||||
.list_procedures()
|
||||
.await
|
||||
.context(QueryProcedureSnafu)?;
|
||||
Ok(procedure::procedure_details_to_pb_response(metas))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
@@ -996,6 +953,7 @@ mod tests {
|
||||
state_store,
|
||||
poison_manager,
|
||||
None,
|
||||
None,
|
||||
));
|
||||
|
||||
let _ = DdlManager::try_new(
|
||||
|
||||
@@ -403,6 +403,13 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Catalog not found, catalog: {}", catalog))]
|
||||
CatalogNotFound {
|
||||
catalog: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid metadata, err: {}", err_msg))]
|
||||
InvalidMetadata {
|
||||
err_msg: String,
|
||||
@@ -733,6 +740,32 @@ pub enum Error {
|
||||
operation: String,
|
||||
},
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Failed to setup PostgreSQL TLS configuration: {}", reason))]
|
||||
PostgresTlsConfig {
|
||||
reason: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Failed to load TLS certificate from path: {}", path))]
|
||||
LoadTlsCertificate {
|
||||
path: String,
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Invalid TLS configuration: {}", reason))]
|
||||
InvalidTlsConfig {
|
||||
reason: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
#[snafu(display("Failed to execute via MySql, sql: {}", sql))]
|
||||
MySqlExecution {
|
||||
@@ -938,6 +971,13 @@ pub enum Error {
|
||||
source: api::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to convert time ranges"))]
|
||||
ConvertTimeRanges {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: api::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Column metadata inconsistencies found in table: {}, table_id: {}",
|
||||
table_name,
|
||||
@@ -1012,7 +1052,8 @@ impl ErrorExt for Error {
|
||||
| KafkaGetOffset { .. }
|
||||
| ReadFlexbuffers { .. }
|
||||
| SerializeFlexbuffers { .. }
|
||||
| DeserializeFlexbuffers { .. } => StatusCode::Unexpected,
|
||||
| DeserializeFlexbuffers { .. }
|
||||
| ConvertTimeRanges { .. } => StatusCode::Unexpected,
|
||||
|
||||
SendMessage { .. } | GetKvCache { .. } | CacheNotGet { .. } => StatusCode::Internal,
|
||||
|
||||
@@ -1062,6 +1103,7 @@ impl ErrorExt for Error {
|
||||
ParseProcedureId { .. }
|
||||
| InvalidNumTopics { .. }
|
||||
| SchemaNotFound { .. }
|
||||
| CatalogNotFound { .. }
|
||||
| InvalidNodeInfoKey { .. }
|
||||
| InvalidStatKey { .. }
|
||||
| ParseNum { .. }
|
||||
@@ -1072,7 +1114,10 @@ impl ErrorExt for Error {
|
||||
PostgresExecution { .. }
|
||||
| CreatePostgresPool { .. }
|
||||
| GetPostgresConnection { .. }
|
||||
| PostgresTransaction { .. } => StatusCode::Internal,
|
||||
| PostgresTransaction { .. }
|
||||
| PostgresTlsConfig { .. }
|
||||
| LoadTlsCertificate { .. }
|
||||
| InvalidTlsConfig { .. } => StatusCode::Internal,
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
MySqlExecution { .. } | CreateMySqlPool { .. } | MySqlTransaction { .. } => {
|
||||
StatusCode::Internal
|
||||
|
||||
@@ -40,7 +40,7 @@ const RDS_STORE_OP_RANGE_DELETE: &str = "range_delete";
|
||||
const RDS_STORE_OP_BATCH_DELETE: &str = "batch_delete";
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
mod postgres;
|
||||
pub mod postgres;
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
pub use postgres::PgStore;
|
||||
|
||||
@@ -118,7 +118,7 @@ impl<T: Executor> ExecutorImpl<'_, T> {
|
||||
}
|
||||
}
|
||||
|
||||
#[warn(dead_code)] // Used in #[cfg(feature = "mysql_kvbackend")]
|
||||
#[allow(dead_code)] // Used in #[cfg(feature = "mysql_kvbackend")]
|
||||
async fn execute(&mut self, query: &str, params: &Vec<&Vec<u8>>) -> Result<()> {
|
||||
match self {
|
||||
Self::Default(executor) => executor.execute(query, params).await,
|
||||
|
||||
@@ -12,19 +12,29 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fs::File;
|
||||
use std::io::BufReader;
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_telemetry::debug;
|
||||
use deadpool_postgres::{Config, Pool, Runtime};
|
||||
use rustls::client::danger::{HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier};
|
||||
use rustls::pki_types::{CertificateDer, ServerName, UnixTime};
|
||||
use rustls::server::ParsedCertificate;
|
||||
// TLS-related imports (feature-gated)
|
||||
use rustls::ClientConfig;
|
||||
use rustls::{DigitallySignedStruct, Error as TlsError, SignatureScheme};
|
||||
use rustls_pemfile::{certs, private_key};
|
||||
use snafu::ResultExt;
|
||||
use strum::AsRefStr;
|
||||
use tokio_postgres::types::ToSql;
|
||||
use tokio_postgres::{IsolationLevel, NoTls, Row};
|
||||
use tokio_postgres_rustls::MakeRustlsConnect;
|
||||
|
||||
use crate::error::{
|
||||
CreatePostgresPoolSnafu, GetPostgresConnectionSnafu, PostgresExecutionSnafu,
|
||||
PostgresTransactionSnafu, Result,
|
||||
CreatePostgresPoolSnafu, GetPostgresConnectionSnafu, LoadTlsCertificateSnafu,
|
||||
PostgresExecutionSnafu, PostgresTlsConfigSnafu, PostgresTransactionSnafu, Result,
|
||||
};
|
||||
use crate::kv_backend::rds::{
|
||||
Executor, ExecutorFactory, ExecutorImpl, KvQueryExecutor, RdsStore, Transaction,
|
||||
@@ -38,6 +48,41 @@ use crate::rpc::store::{
|
||||
};
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
/// TLS mode configuration for PostgreSQL connections.
|
||||
/// This mirrors the TlsMode from servers::tls to avoid circular dependencies.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Default)]
|
||||
pub enum TlsMode {
|
||||
Disable,
|
||||
#[default]
|
||||
Prefer,
|
||||
Require,
|
||||
VerifyCa,
|
||||
VerifyFull,
|
||||
}
|
||||
|
||||
/// TLS configuration for PostgreSQL connections.
|
||||
/// This mirrors the TlsOption from servers::tls to avoid circular dependencies.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct TlsOption {
|
||||
pub mode: TlsMode,
|
||||
pub cert_path: String,
|
||||
pub key_path: String,
|
||||
pub ca_cert_path: String,
|
||||
pub watch: bool,
|
||||
}
|
||||
|
||||
impl Default for TlsOption {
|
||||
fn default() -> Self {
|
||||
TlsOption {
|
||||
mode: TlsMode::Prefer,
|
||||
cert_path: String::new(),
|
||||
key_path: String::new(),
|
||||
ca_cert_path: String::new(),
|
||||
watch: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const PG_STORE_NAME: &str = "pg_store";
|
||||
|
||||
pub struct PgClient(deadpool::managed::Object<deadpool_postgres::Manager>);
|
||||
@@ -348,6 +393,265 @@ impl ExecutorFactory<PgClient> for PgExecutorFactory {
|
||||
/// It uses [deadpool_postgres::Pool] as the connection pool for [RdsStore].
|
||||
pub type PgStore = RdsStore<PgClient, PgExecutorFactory, PgSqlTemplateSet>;
|
||||
|
||||
/// Creates a PostgreSQL TLS connector based on the provided configuration.
|
||||
///
|
||||
/// This function creates a rustls-based TLS connector for PostgreSQL connections,
|
||||
/// following PostgreSQL's TLS mode specifications exactly:
|
||||
///
|
||||
/// # TLS Modes (PostgreSQL Specification)
|
||||
///
|
||||
/// - `Disable`: No TLS connection attempted
|
||||
/// - `Prefer`: Try TLS first, fallback to plaintext if TLS fails (handled by connection logic)
|
||||
/// - `Require`: Only TLS connections, but NO certificate verification (accept any cert)
|
||||
/// - `VerifyCa`: TLS + verify certificate is signed by trusted CA (no hostname verification)
|
||||
/// - `VerifyFull`: TLS + verify CA + verify hostname matches certificate SAN
|
||||
///
|
||||
pub fn create_postgres_tls_connector(tls_config: &TlsOption) -> Result<MakeRustlsConnect> {
|
||||
common_telemetry::info!(
|
||||
"Creating PostgreSQL TLS connector with mode: {:?}",
|
||||
tls_config.mode
|
||||
);
|
||||
|
||||
let config_builder = match tls_config.mode {
|
||||
TlsMode::Disable => {
|
||||
return PostgresTlsConfigSnafu {
|
||||
reason: "Cannot create TLS connector for Disable mode".to_string(),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
TlsMode::Prefer | TlsMode::Require => {
|
||||
// For Prefer/Require: Accept any certificate (no verification)
|
||||
let verifier = Arc::new(AcceptAnyVerifier);
|
||||
ClientConfig::builder()
|
||||
.dangerous()
|
||||
.with_custom_certificate_verifier(verifier)
|
||||
}
|
||||
TlsMode::VerifyCa => {
|
||||
// For VerifyCa: Verify server cert against CA store, but skip hostname verification
|
||||
let ca_store = load_ca(&tls_config.ca_cert_path)?;
|
||||
let verifier = Arc::new(NoHostnameVerification { roots: ca_store });
|
||||
ClientConfig::builder()
|
||||
.dangerous()
|
||||
.with_custom_certificate_verifier(verifier)
|
||||
}
|
||||
TlsMode::VerifyFull => {
|
||||
let ca_store = load_ca(&tls_config.ca_cert_path)?;
|
||||
ClientConfig::builder().with_root_certificates(ca_store)
|
||||
}
|
||||
};
|
||||
|
||||
// Create the TLS client configuration based on the mode and client cert requirements
|
||||
let client_config = if !tls_config.cert_path.is_empty() && !tls_config.key_path.is_empty() {
|
||||
// Client certificate authentication required
|
||||
common_telemetry::info!("Loading client certificate for mutual TLS");
|
||||
let cert_chain = load_certs(&tls_config.cert_path)?;
|
||||
let private_key = load_private_key(&tls_config.key_path)?;
|
||||
|
||||
config_builder
|
||||
.with_client_auth_cert(cert_chain, private_key)
|
||||
.map_err(|e| {
|
||||
PostgresTlsConfigSnafu {
|
||||
reason: format!("Failed to configure client authentication: {}", e),
|
||||
}
|
||||
.build()
|
||||
})?
|
||||
} else {
|
||||
common_telemetry::info!("No client certificate provided, skip client authentication");
|
||||
config_builder.with_no_client_auth()
|
||||
};
|
||||
|
||||
common_telemetry::info!("Successfully created PostgreSQL TLS connector");
|
||||
Ok(MakeRustlsConnect::new(client_config))
|
||||
}
|
||||
|
||||
/// For Prefer/Require mode, we accept any server certificate without verification.
|
||||
#[derive(Debug)]
|
||||
struct AcceptAnyVerifier;
|
||||
|
||||
impl ServerCertVerifier for AcceptAnyVerifier {
|
||||
fn verify_server_cert(
|
||||
&self,
|
||||
_end_entity: &CertificateDer<'_>,
|
||||
_intermediates: &[CertificateDer<'_>],
|
||||
_server_name: &ServerName<'_>,
|
||||
_ocsp_response: &[u8],
|
||||
_now: UnixTime,
|
||||
) -> std::result::Result<ServerCertVerified, TlsError> {
|
||||
common_telemetry::debug!(
|
||||
"Accepting server certificate without verification (Prefer/Require mode)"
|
||||
);
|
||||
Ok(ServerCertVerified::assertion())
|
||||
}
|
||||
|
||||
fn verify_tls12_signature(
|
||||
&self,
|
||||
_message: &[u8],
|
||||
_cert: &CertificateDer<'_>,
|
||||
_dss: &DigitallySignedStruct,
|
||||
) -> std::result::Result<HandshakeSignatureValid, TlsError> {
|
||||
// Accept any signature without verification
|
||||
Ok(HandshakeSignatureValid::assertion())
|
||||
}
|
||||
|
||||
fn verify_tls13_signature(
|
||||
&self,
|
||||
_message: &[u8],
|
||||
_cert: &CertificateDer<'_>,
|
||||
_dss: &DigitallySignedStruct,
|
||||
) -> std::result::Result<HandshakeSignatureValid, TlsError> {
|
||||
// Accept any signature without verification
|
||||
Ok(HandshakeSignatureValid::assertion())
|
||||
}
|
||||
|
||||
fn supported_verify_schemes(&self) -> Vec<SignatureScheme> {
|
||||
// Support all signature schemes
|
||||
rustls::crypto::ring::default_provider()
|
||||
.signature_verification_algorithms
|
||||
.supported_schemes()
|
||||
}
|
||||
}
|
||||
|
||||
/// For VerifyCa mode, we verify the server certificate against our CA store
|
||||
/// and skip verify server's HostName.
|
||||
#[derive(Debug)]
|
||||
struct NoHostnameVerification {
|
||||
roots: Arc<rustls::RootCertStore>,
|
||||
}
|
||||
|
||||
impl ServerCertVerifier for NoHostnameVerification {
|
||||
fn verify_server_cert(
|
||||
&self,
|
||||
end_entity: &CertificateDer<'_>,
|
||||
intermediates: &[CertificateDer<'_>],
|
||||
_server_name: &ServerName<'_>,
|
||||
_ocsp_response: &[u8],
|
||||
now: UnixTime,
|
||||
) -> std::result::Result<ServerCertVerified, TlsError> {
|
||||
let cert = ParsedCertificate::try_from(end_entity)?;
|
||||
rustls::client::verify_server_cert_signed_by_trust_anchor(
|
||||
&cert,
|
||||
&self.roots,
|
||||
intermediates,
|
||||
now,
|
||||
rustls::crypto::ring::default_provider()
|
||||
.signature_verification_algorithms
|
||||
.all,
|
||||
)?;
|
||||
|
||||
Ok(ServerCertVerified::assertion())
|
||||
}
|
||||
|
||||
fn verify_tls12_signature(
|
||||
&self,
|
||||
message: &[u8],
|
||||
cert: &CertificateDer<'_>,
|
||||
dss: &DigitallySignedStruct,
|
||||
) -> std::result::Result<HandshakeSignatureValid, TlsError> {
|
||||
rustls::crypto::verify_tls12_signature(
|
||||
message,
|
||||
cert,
|
||||
dss,
|
||||
&rustls::crypto::ring::default_provider().signature_verification_algorithms,
|
||||
)
|
||||
}
|
||||
|
||||
fn verify_tls13_signature(
|
||||
&self,
|
||||
message: &[u8],
|
||||
cert: &CertificateDer<'_>,
|
||||
dss: &DigitallySignedStruct,
|
||||
) -> std::result::Result<HandshakeSignatureValid, TlsError> {
|
||||
rustls::crypto::verify_tls13_signature(
|
||||
message,
|
||||
cert,
|
||||
dss,
|
||||
&rustls::crypto::ring::default_provider().signature_verification_algorithms,
|
||||
)
|
||||
}
|
||||
|
||||
fn supported_verify_schemes(&self) -> Vec<SignatureScheme> {
|
||||
// Support all signature schemes
|
||||
rustls::crypto::ring::default_provider()
|
||||
.signature_verification_algorithms
|
||||
.supported_schemes()
|
||||
}
|
||||
}
|
||||
|
||||
fn load_certs(path: &str) -> Result<Vec<rustls::pki_types::CertificateDer<'static>>> {
|
||||
let file = File::open(path).context(LoadTlsCertificateSnafu { path })?;
|
||||
let mut reader = BufReader::new(file);
|
||||
let certs = certs(&mut reader)
|
||||
.collect::<std::result::Result<Vec<_>, _>>()
|
||||
.map_err(|e| {
|
||||
PostgresTlsConfigSnafu {
|
||||
reason: format!("Failed to parse certificates from {}: {}", path, e),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
Ok(certs)
|
||||
}
|
||||
|
||||
fn load_private_key(path: &str) -> Result<rustls::pki_types::PrivateKeyDer<'static>> {
|
||||
let file = File::open(path).context(LoadTlsCertificateSnafu { path })?;
|
||||
let mut reader = BufReader::new(file);
|
||||
let key = private_key(&mut reader)
|
||||
.map_err(|e| {
|
||||
PostgresTlsConfigSnafu {
|
||||
reason: format!("Failed to parse private key from {}: {}", path, e),
|
||||
}
|
||||
.build()
|
||||
})?
|
||||
.ok_or_else(|| {
|
||||
PostgresTlsConfigSnafu {
|
||||
reason: format!("No private key found in {}", path),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
Ok(key)
|
||||
}
|
||||
|
||||
fn load_ca(path: &str) -> Result<Arc<rustls::RootCertStore>> {
|
||||
let mut root_store = rustls::RootCertStore::empty();
|
||||
|
||||
// Add system root certificates
|
||||
match rustls_native_certs::load_native_certs() {
|
||||
Ok(certs) => {
|
||||
let num_certs = certs.len();
|
||||
for cert in certs {
|
||||
if let Err(e) = root_store.add(cert) {
|
||||
return PostgresTlsConfigSnafu {
|
||||
reason: format!("Failed to add root certificate: {}", e),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
}
|
||||
common_telemetry::info!("Loaded {num_certs} system root certificates successfully");
|
||||
}
|
||||
Err(e) => {
|
||||
return PostgresTlsConfigSnafu {
|
||||
reason: format!("Failed to load system root certificates: {}", e),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
}
|
||||
|
||||
// Try add custom CA certificate if provided
|
||||
if !path.is_empty() {
|
||||
let ca_certs = load_certs(path)?;
|
||||
for cert in ca_certs {
|
||||
if let Err(e) = root_store.add(cert) {
|
||||
return PostgresTlsConfigSnafu {
|
||||
reason: format!("Failed to add custom CA certificate: {}", e),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
}
|
||||
common_telemetry::info!("Added custom CA certificate from {}", path);
|
||||
}
|
||||
|
||||
Ok(Arc::new(root_store))
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvQueryExecutor<PgClient> for PgStore {
|
||||
async fn range_with_query_executor(
|
||||
@@ -491,17 +795,54 @@ impl KvQueryExecutor<PgClient> for PgStore {
|
||||
}
|
||||
|
||||
impl PgStore {
|
||||
/// Create [PgStore] impl of [KvBackendRef] from url.
|
||||
pub async fn with_url(url: &str, table_name: &str, max_txn_ops: usize) -> Result<KvBackendRef> {
|
||||
/// Create [PgStore] impl of [KvBackendRef] from url with optional TLS support.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `url` - PostgreSQL connection URL
|
||||
/// * `table_name` - Name of the table to use for key-value storage
|
||||
/// * `max_txn_ops` - Maximum number of operations per transaction
|
||||
/// * `tls_config` - Optional TLS configuration. If None, uses plaintext connection.
|
||||
pub async fn with_url_and_tls(
|
||||
url: &str,
|
||||
table_name: &str,
|
||||
max_txn_ops: usize,
|
||||
tls_config: Option<TlsOption>,
|
||||
) -> Result<KvBackendRef> {
|
||||
let mut cfg = Config::new();
|
||||
cfg.url = Some(url.to_string());
|
||||
// TODO(weny, CookiePie): add tls support
|
||||
let pool = cfg
|
||||
.create_pool(Some(Runtime::Tokio1), NoTls)
|
||||
.context(CreatePostgresPoolSnafu)?;
|
||||
|
||||
let pool = match tls_config {
|
||||
Some(tls_config) if tls_config.mode != TlsMode::Disable => {
|
||||
match create_postgres_tls_connector(&tls_config) {
|
||||
Ok(tls_connector) => cfg
|
||||
.create_pool(Some(Runtime::Tokio1), tls_connector)
|
||||
.context(CreatePostgresPoolSnafu)?,
|
||||
Err(e) => {
|
||||
if tls_config.mode == TlsMode::Prefer {
|
||||
// Fallback to insecure connection if TLS fails
|
||||
common_telemetry::info!("Failed to create TLS connector, falling back to insecure connection");
|
||||
cfg.create_pool(Some(Runtime::Tokio1), NoTls)
|
||||
.context(CreatePostgresPoolSnafu)?
|
||||
} else {
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => cfg
|
||||
.create_pool(Some(Runtime::Tokio1), NoTls)
|
||||
.context(CreatePostgresPoolSnafu)?,
|
||||
};
|
||||
|
||||
Self::with_pg_pool(pool, table_name, max_txn_ops).await
|
||||
}
|
||||
|
||||
/// Create [PgStore] impl of [KvBackendRef] from url (backward compatibility).
|
||||
pub async fn with_url(url: &str, table_name: &str, max_txn_ops: usize) -> Result<KvBackendRef> {
|
||||
Self::with_url_and_tls(url, table_name, max_txn_ops, None).await
|
||||
}
|
||||
|
||||
/// Create [PgStore] impl of [KvBackendRef] from [deadpool_postgres::Pool].
|
||||
pub async fn with_pg_pool(
|
||||
pool: Pool,
|
||||
|
||||
@@ -37,6 +37,7 @@ pub mod node_expiry_listener;
|
||||
pub mod node_manager;
|
||||
pub mod peer;
|
||||
pub mod poison_key;
|
||||
pub mod procedure_executor;
|
||||
pub mod range_stream;
|
||||
pub mod reconciliation;
|
||||
pub mod region_keeper;
|
||||
|
||||
173
src/common/meta/src/procedure_executor.rs
Normal file
173
src/common/meta/src/procedure_executor.rs
Normal file
@@ -0,0 +1,173 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::{ProcedureDetailResponse, ReconcileRequest, ReconcileResponse};
|
||||
use common_procedure::{ProcedureId, ProcedureManagerRef};
|
||||
use common_telemetry::tracing_context::W3cTrace;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::ddl_manager::DdlManagerRef;
|
||||
use crate::error::{
|
||||
ParseProcedureIdSnafu, ProcedureNotFoundSnafu, QueryProcedureSnafu, Result, UnsupportedSnafu,
|
||||
};
|
||||
use crate::rpc::ddl::{SubmitDdlTaskRequest, SubmitDdlTaskResponse};
|
||||
use crate::rpc::procedure::{
|
||||
self, AddRegionFollowerRequest, MigrateRegionRequest, MigrateRegionResponse,
|
||||
ProcedureStateResponse, RemoveRegionFollowerRequest,
|
||||
};
|
||||
|
||||
/// The context of procedure executor.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct ExecutorContext {
|
||||
pub tracing_context: Option<W3cTrace>,
|
||||
}
|
||||
|
||||
/// The procedure executor that accepts ddl, region migration task etc.
|
||||
#[async_trait::async_trait]
|
||||
pub trait ProcedureExecutor: Send + Sync {
|
||||
/// Submit a ddl task
|
||||
async fn submit_ddl_task(
|
||||
&self,
|
||||
ctx: &ExecutorContext,
|
||||
request: SubmitDdlTaskRequest,
|
||||
) -> Result<SubmitDdlTaskResponse>;
|
||||
|
||||
/// Add a region follower
|
||||
async fn add_region_follower(
|
||||
&self,
|
||||
_ctx: &ExecutorContext,
|
||||
_request: AddRegionFollowerRequest,
|
||||
) -> Result<()> {
|
||||
UnsupportedSnafu {
|
||||
operation: "add_region_follower",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
/// Remove a region follower
|
||||
async fn remove_region_follower(
|
||||
&self,
|
||||
_ctx: &ExecutorContext,
|
||||
_request: RemoveRegionFollowerRequest,
|
||||
) -> Result<()> {
|
||||
UnsupportedSnafu {
|
||||
operation: "remove_region_follower",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
/// Submit a region migration task
|
||||
async fn migrate_region(
|
||||
&self,
|
||||
ctx: &ExecutorContext,
|
||||
request: MigrateRegionRequest,
|
||||
) -> Result<MigrateRegionResponse>;
|
||||
|
||||
/// Submit a reconcile task.
|
||||
async fn reconcile(
|
||||
&self,
|
||||
_ctx: &ExecutorContext,
|
||||
request: ReconcileRequest,
|
||||
) -> Result<ReconcileResponse>;
|
||||
|
||||
/// Query the procedure state by its id
|
||||
async fn query_procedure_state(
|
||||
&self,
|
||||
ctx: &ExecutorContext,
|
||||
pid: &str,
|
||||
) -> Result<ProcedureStateResponse>;
|
||||
|
||||
async fn list_procedures(&self, ctx: &ExecutorContext) -> Result<ProcedureDetailResponse>;
|
||||
}
|
||||
|
||||
pub type ProcedureExecutorRef = Arc<dyn ProcedureExecutor>;
|
||||
|
||||
/// The local procedure executor that accepts ddl, region migration task etc.
|
||||
pub struct LocalProcedureExecutor {
|
||||
pub ddl_manager: DdlManagerRef,
|
||||
pub procedure_manager: ProcedureManagerRef,
|
||||
}
|
||||
|
||||
impl LocalProcedureExecutor {
|
||||
pub fn new(ddl_manager: DdlManagerRef, procedure_manager: ProcedureManagerRef) -> Self {
|
||||
Self {
|
||||
ddl_manager,
|
||||
procedure_manager,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl ProcedureExecutor for LocalProcedureExecutor {
|
||||
async fn submit_ddl_task(
|
||||
&self,
|
||||
ctx: &ExecutorContext,
|
||||
request: SubmitDdlTaskRequest,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
self.ddl_manager.submit_ddl_task(ctx, request).await
|
||||
}
|
||||
|
||||
async fn migrate_region(
|
||||
&self,
|
||||
_ctx: &ExecutorContext,
|
||||
_request: MigrateRegionRequest,
|
||||
) -> Result<MigrateRegionResponse> {
|
||||
UnsupportedSnafu {
|
||||
operation: "migrate_region",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
async fn reconcile(
|
||||
&self,
|
||||
_ctx: &ExecutorContext,
|
||||
_request: ReconcileRequest,
|
||||
) -> Result<ReconcileResponse> {
|
||||
UnsupportedSnafu {
|
||||
operation: "reconcile",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
async fn query_procedure_state(
|
||||
&self,
|
||||
_ctx: &ExecutorContext,
|
||||
pid: &str,
|
||||
) -> Result<ProcedureStateResponse> {
|
||||
let pid =
|
||||
ProcedureId::parse_str(pid).with_context(|_| ParseProcedureIdSnafu { key: pid })?;
|
||||
|
||||
let state = self
|
||||
.procedure_manager
|
||||
.procedure_state(pid)
|
||||
.await
|
||||
.context(QueryProcedureSnafu)?
|
||||
.with_context(|| ProcedureNotFoundSnafu {
|
||||
pid: pid.to_string(),
|
||||
})?;
|
||||
|
||||
Ok(procedure::procedure_state_to_pb_response(&state))
|
||||
}
|
||||
|
||||
async fn list_procedures(&self, _ctx: &ExecutorContext) -> Result<ProcedureDetailResponse> {
|
||||
let metas = self
|
||||
.procedure_manager
|
||||
.list_procedures()
|
||||
.await
|
||||
.context(QueryProcedureSnafu)?;
|
||||
Ok(procedure::procedure_details_to_pb_response(metas))
|
||||
}
|
||||
}
|
||||
@@ -12,15 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// TODO(weny): Remove it
|
||||
#[allow(dead_code)]
|
||||
pub mod manager;
|
||||
pub(crate) mod reconcile_catalog;
|
||||
pub(crate) mod reconcile_database;
|
||||
// TODO(weny): Remove it
|
||||
#[allow(dead_code)]
|
||||
pub(crate) mod reconcile_table;
|
||||
// TODO(weny): Remove it
|
||||
#[allow(dead_code)]
|
||||
pub(crate) mod reconcile_logical_tables;
|
||||
// TODO(weny): Remove it
|
||||
#[allow(dead_code)]
|
||||
pub(crate) mod reconcile_table;
|
||||
pub(crate) mod utils;
|
||||
|
||||
246
src/common/meta/src/reconciliation/manager.rs
Normal file
246
src/common/meta/src/reconciliation/manager.rs
Normal file
@@ -0,0 +1,246 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_procedure::{
|
||||
watcher, BoxedProcedure, ProcedureId, ProcedureManagerRef, ProcedureWithId,
|
||||
};
|
||||
use common_telemetry::{error, info, warn};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::TableId;
|
||||
use table::table_name::TableName;
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::cache_invalidator::CacheInvalidatorRef;
|
||||
use crate::error::{self, Result, TableNotFoundSnafu};
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::TableMetadataManagerRef;
|
||||
use crate::node_manager::NodeManagerRef;
|
||||
use crate::reconciliation::reconcile_catalog::ReconcileCatalogProcedure;
|
||||
use crate::reconciliation::reconcile_database::{ReconcileDatabaseProcedure, DEFAULT_PARALLELISM};
|
||||
use crate::reconciliation::reconcile_logical_tables::ReconcileLogicalTablesProcedure;
|
||||
use crate::reconciliation::reconcile_table::resolve_column_metadata::ResolveStrategy;
|
||||
use crate::reconciliation::reconcile_table::ReconcileTableProcedure;
|
||||
use crate::reconciliation::utils::Context;
|
||||
|
||||
pub type ReconciliationManagerRef = Arc<ReconciliationManager>;
|
||||
|
||||
/// The manager for reconciliation procedures.
|
||||
pub struct ReconciliationManager {
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
context: Context,
|
||||
}
|
||||
|
||||
macro_rules! register_reconcile_loader {
|
||||
($self:ident, $procedure:ty) => {{
|
||||
let context = $self.context.clone();
|
||||
$self
|
||||
.procedure_manager
|
||||
.register_loader(
|
||||
<$procedure>::TYPE_NAME,
|
||||
Box::new(move |json| {
|
||||
let context = context.clone();
|
||||
let procedure = <$procedure>::from_json(context, json)?;
|
||||
Ok(Box::new(procedure))
|
||||
}),
|
||||
)
|
||||
.context(error::RegisterProcedureLoaderSnafu {
|
||||
type_name: <$procedure>::TYPE_NAME,
|
||||
})?;
|
||||
}};
|
||||
}
|
||||
|
||||
impl ReconciliationManager {
|
||||
pub fn new(
|
||||
node_manager: NodeManagerRef,
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
cache_invalidator: CacheInvalidatorRef,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
procedure_manager,
|
||||
context: Context {
|
||||
node_manager,
|
||||
table_metadata_manager,
|
||||
cache_invalidator,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to start the reconciliation manager.
|
||||
///
|
||||
/// This function will register the procedure loaders for the reconciliation procedures.
|
||||
/// Returns an error if the procedure loaders are already registered.
|
||||
pub fn try_start(&self) -> Result<()> {
|
||||
register_reconcile_loader!(self, ReconcileLogicalTablesProcedure);
|
||||
register_reconcile_loader!(self, ReconcileTableProcedure);
|
||||
register_reconcile_loader!(self, ReconcileDatabaseProcedure);
|
||||
register_reconcile_loader!(self, ReconcileCatalogProcedure);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reconcile a table.
|
||||
///
|
||||
/// Returns the procedure id of the reconciliation procedure.
|
||||
pub async fn reconcile_table(
|
||||
&self,
|
||||
table_ref: TableReference<'_>,
|
||||
resolve_strategy: ResolveStrategy,
|
||||
) -> Result<ProcedureId> {
|
||||
let table_name_key =
|
||||
TableNameKey::new(table_ref.catalog, table_ref.schema, table_ref.table);
|
||||
let table_metadata_manager = &self.context.table_metadata_manager;
|
||||
let table_id = table_metadata_manager
|
||||
.table_name_manager()
|
||||
.get(table_name_key)
|
||||
.await?
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
table_name: table_ref.to_string(),
|
||||
})?
|
||||
.table_id();
|
||||
let (physical_table_id, _) = table_metadata_manager
|
||||
.table_route_manager()
|
||||
.get_physical_table_route(table_id)
|
||||
.await?;
|
||||
|
||||
if physical_table_id == table_id {
|
||||
Ok(self.reconcile_physical_table(table_id, table_ref.into(), resolve_strategy))
|
||||
} else {
|
||||
let physical_table_info = table_metadata_manager
|
||||
.table_info_manager()
|
||||
.get(physical_table_id)
|
||||
.await?
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
table_name: format!("table_id: {}", physical_table_id),
|
||||
})?;
|
||||
|
||||
Ok(self.reconcile_logical_tables(
|
||||
physical_table_id,
|
||||
physical_table_info.table_name(),
|
||||
vec![(table_id, table_ref.into())],
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Reconcile a database.
|
||||
///
|
||||
/// Returns the procedure id of the reconciliation procedure.
|
||||
pub fn reconcile_database(
|
||||
&self,
|
||||
catalog: String,
|
||||
schema: String,
|
||||
resolve_strategy: ResolveStrategy,
|
||||
parallelism: usize,
|
||||
) -> ProcedureId {
|
||||
let parallelism = normalize_parallelism(parallelism);
|
||||
let procedure = ReconcileDatabaseProcedure::new(
|
||||
self.context.clone(),
|
||||
catalog,
|
||||
schema,
|
||||
false,
|
||||
parallelism,
|
||||
resolve_strategy,
|
||||
false,
|
||||
);
|
||||
self.spawn_procedure(Box::new(procedure))
|
||||
}
|
||||
|
||||
fn reconcile_physical_table(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
table_name: TableName,
|
||||
resolve_strategy: ResolveStrategy,
|
||||
) -> ProcedureId {
|
||||
let procedure = ReconcileTableProcedure::new(
|
||||
self.context.clone(),
|
||||
table_id,
|
||||
table_name,
|
||||
resolve_strategy,
|
||||
false,
|
||||
);
|
||||
self.spawn_procedure(Box::new(procedure))
|
||||
}
|
||||
|
||||
fn reconcile_logical_tables(
|
||||
&self,
|
||||
physical_table_id: TableId,
|
||||
physical_table_name: TableName,
|
||||
logical_tables: Vec<(TableId, TableName)>,
|
||||
) -> ProcedureId {
|
||||
let procedure = ReconcileLogicalTablesProcedure::new(
|
||||
self.context.clone(),
|
||||
physical_table_id,
|
||||
physical_table_name,
|
||||
logical_tables,
|
||||
false,
|
||||
);
|
||||
self.spawn_procedure(Box::new(procedure))
|
||||
}
|
||||
|
||||
/// Reconcile a catalog.
|
||||
///
|
||||
/// Returns the procedure id of the reconciliation procedure.
|
||||
pub fn reconcile_catalog(
|
||||
&self,
|
||||
catalog: String,
|
||||
resolve_strategy: ResolveStrategy,
|
||||
parallelism: usize,
|
||||
) -> ProcedureId {
|
||||
let parallelism = normalize_parallelism(parallelism);
|
||||
let procedure = ReconcileCatalogProcedure::new(
|
||||
self.context.clone(),
|
||||
catalog,
|
||||
false,
|
||||
resolve_strategy,
|
||||
parallelism,
|
||||
);
|
||||
self.spawn_procedure(Box::new(procedure))
|
||||
}
|
||||
|
||||
fn spawn_procedure(&self, procedure: BoxedProcedure) -> ProcedureId {
|
||||
let procedure_manager = self.procedure_manager.clone();
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(procedure);
|
||||
let procedure_id = procedure_with_id.id;
|
||||
common_runtime::spawn_global(async move {
|
||||
let watcher = &mut match procedure_manager.submit(procedure_with_id).await {
|
||||
Ok(watcher) => watcher,
|
||||
Err(e) => {
|
||||
error!(e; "Failed to submit reconciliation procedure {procedure_id}");
|
||||
return;
|
||||
}
|
||||
};
|
||||
if let Err(e) = watcher::wait(watcher).await {
|
||||
error!(e; "Failed to wait reconciliation procedure {procedure_id}");
|
||||
return;
|
||||
}
|
||||
|
||||
info!("Reconciliation procedure {procedure_id} is finished successfully!");
|
||||
});
|
||||
procedure_id
|
||||
}
|
||||
}
|
||||
|
||||
fn normalize_parallelism(parallelism: usize) -> usize {
|
||||
if parallelism == 0 {
|
||||
warn!(
|
||||
"Parallelism is 0, using default parallelism: {}",
|
||||
DEFAULT_PARALLELISM
|
||||
);
|
||||
DEFAULT_PARALLELISM
|
||||
} else {
|
||||
parallelism
|
||||
}
|
||||
}
|
||||
207
src/common/meta/src/reconciliation/reconcile_catalog.rs
Normal file
207
src/common/meta/src/reconciliation/reconcile_catalog.rs
Normal file
@@ -0,0 +1,207 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::fmt::Debug;
|
||||
|
||||
use common_procedure::error::FromJsonSnafu;
|
||||
use common_procedure::{
|
||||
Context as ProcedureContext, Error as ProcedureError, LockKey, Procedure, ProcedureId,
|
||||
Result as ProcedureResult, Status,
|
||||
};
|
||||
use futures::stream::BoxStream;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::cache_invalidator::CacheInvalidatorRef;
|
||||
use crate::error::Result;
|
||||
use crate::key::TableMetadataManagerRef;
|
||||
use crate::lock_key::CatalogLock;
|
||||
use crate::node_manager::NodeManagerRef;
|
||||
use crate::reconciliation::reconcile_catalog::start::ReconcileCatalogStart;
|
||||
use crate::reconciliation::reconcile_database::utils::wait_for_inflight_subprocedures;
|
||||
use crate::reconciliation::reconcile_table::resolve_column_metadata::ResolveStrategy;
|
||||
use crate::reconciliation::utils::Context;
|
||||
|
||||
pub(crate) mod end;
|
||||
pub(crate) mod reconcile_databases;
|
||||
pub(crate) mod start;
|
||||
|
||||
pub(crate) struct ReconcileCatalogContext {
|
||||
pub node_manager: NodeManagerRef,
|
||||
pub table_metadata_manager: TableMetadataManagerRef,
|
||||
pub cache_invalidator: CacheInvalidatorRef,
|
||||
persistent_ctx: PersistentContext,
|
||||
volatile_ctx: VolatileContext,
|
||||
}
|
||||
|
||||
impl ReconcileCatalogContext {
|
||||
pub fn new(ctx: Context, persistent_ctx: PersistentContext) -> Self {
|
||||
Self {
|
||||
node_manager: ctx.node_manager,
|
||||
table_metadata_manager: ctx.table_metadata_manager,
|
||||
cache_invalidator: ctx.cache_invalidator,
|
||||
persistent_ctx,
|
||||
volatile_ctx: VolatileContext::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn wait_for_inflight_subprocedure(
|
||||
&mut self,
|
||||
procedure_ctx: &ProcedureContext,
|
||||
) -> Result<()> {
|
||||
if let Some(procedure_id) = self.volatile_ctx.inflight_subprocedure {
|
||||
wait_for_inflight_subprocedures(
|
||||
procedure_ctx,
|
||||
&[procedure_id],
|
||||
self.persistent_ctx.fast_fail,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub(crate) struct PersistentContext {
|
||||
catalog: String,
|
||||
fast_fail: bool,
|
||||
resolve_strategy: ResolveStrategy,
|
||||
parallelism: usize,
|
||||
}
|
||||
|
||||
impl PersistentContext {
|
||||
pub fn new(
|
||||
catalog: String,
|
||||
fast_fail: bool,
|
||||
resolve_strategy: ResolveStrategy,
|
||||
parallelism: usize,
|
||||
) -> Self {
|
||||
Self {
|
||||
catalog,
|
||||
fast_fail,
|
||||
resolve_strategy,
|
||||
parallelism,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct VolatileContext {
|
||||
/// Stores the stream of catalogs.
|
||||
schemas: Option<BoxStream<'static, Result<String>>>,
|
||||
/// Stores the inflight subprocedure.
|
||||
inflight_subprocedure: Option<ProcedureId>,
|
||||
}
|
||||
|
||||
pub struct ReconcileCatalogProcedure {
|
||||
pub context: ReconcileCatalogContext,
|
||||
state: Box<dyn State>,
|
||||
}
|
||||
|
||||
impl ReconcileCatalogProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::ReconcileCatalog";
|
||||
|
||||
pub fn new(
|
||||
ctx: Context,
|
||||
catalog: String,
|
||||
fast_fail: bool,
|
||||
resolve_strategy: ResolveStrategy,
|
||||
parallelism: usize,
|
||||
) -> Self {
|
||||
let persistent_ctx =
|
||||
PersistentContext::new(catalog, fast_fail, resolve_strategy, parallelism);
|
||||
let context = ReconcileCatalogContext::new(ctx, persistent_ctx);
|
||||
let state = Box::new(ReconcileCatalogStart);
|
||||
Self { context, state }
|
||||
}
|
||||
|
||||
pub(crate) fn from_json(ctx: Context, json: &str) -> ProcedureResult<Self> {
|
||||
let ProcedureDataOwned {
|
||||
state,
|
||||
persistent_ctx,
|
||||
} = serde_json::from_str(json).context(FromJsonSnafu)?;
|
||||
let context = ReconcileCatalogContext::new(ctx, persistent_ctx);
|
||||
Ok(Self { context, state })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
struct ProcedureData<'a> {
|
||||
state: &'a dyn State,
|
||||
persistent_ctx: &'a PersistentContext,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct ProcedureDataOwned {
|
||||
state: Box<dyn State>,
|
||||
persistent_ctx: PersistentContext,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Procedure for ReconcileCatalogProcedure {
|
||||
fn type_name(&self) -> &str {
|
||||
Self::TYPE_NAME
|
||||
}
|
||||
|
||||
async fn execute(&mut self, _ctx: &ProcedureContext) -> ProcedureResult<Status> {
|
||||
let state = &mut self.state;
|
||||
|
||||
match state.next(&mut self.context, _ctx).await {
|
||||
Ok((next, status)) => {
|
||||
*state = next;
|
||||
Ok(status)
|
||||
}
|
||||
Err(e) => {
|
||||
if e.is_retry_later() {
|
||||
Err(ProcedureError::retry_later(e))
|
||||
} else {
|
||||
Err(ProcedureError::external(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn dump(&self) -> ProcedureResult<String> {
|
||||
let data = ProcedureData {
|
||||
state: self.state.as_ref(),
|
||||
persistent_ctx: &self.context.persistent_ctx,
|
||||
};
|
||||
serde_json::to_string(&data).context(FromJsonSnafu)
|
||||
}
|
||||
|
||||
fn lock_key(&self) -> LockKey {
|
||||
let catalog = &self.context.persistent_ctx.catalog;
|
||||
|
||||
LockKey::new(vec![CatalogLock::Write(catalog).into()])
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
#[typetag::serde(tag = "reconcile_catalog_state")]
|
||||
pub(crate) trait State: Sync + Send + Debug {
|
||||
fn name(&self) -> &'static str {
|
||||
let type_name = std::any::type_name::<Self>();
|
||||
// short name
|
||||
type_name.split("::").last().unwrap_or(type_name)
|
||||
}
|
||||
|
||||
async fn next(
|
||||
&mut self,
|
||||
ctx: &mut ReconcileCatalogContext,
|
||||
procedure_ctx: &ProcedureContext,
|
||||
) -> Result<(Box<dyn State>, Status)>;
|
||||
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
}
|
||||
40
src/common/meta/src/reconciliation/reconcile_catalog/end.rs
Normal file
40
src/common/meta/src/reconciliation/reconcile_catalog/end.rs
Normal file
@@ -0,0 +1,40 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use common_procedure::{Context as ProcedureContext, Status};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::reconciliation::reconcile_catalog::{ReconcileCatalogContext, State};
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub(crate) struct ReconcileCatalogEnd;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
#[typetag::serde]
|
||||
impl State for ReconcileCatalogEnd {
|
||||
async fn next(
|
||||
&mut self,
|
||||
_ctx: &mut ReconcileCatalogContext,
|
||||
_procedure_ctx: &ProcedureContext,
|
||||
) -> Result<(Box<dyn State>, Status)> {
|
||||
Ok((Box::new(ReconcileCatalogEnd), Status::done()))
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,99 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use common_procedure::{Context as ProcedureContext, ProcedureWithId, Status};
|
||||
use common_telemetry::info;
|
||||
use futures::TryStreamExt;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::reconciliation::reconcile_catalog::end::ReconcileCatalogEnd;
|
||||
use crate::reconciliation::reconcile_catalog::{ReconcileCatalogContext, State};
|
||||
use crate::reconciliation::reconcile_database::ReconcileDatabaseProcedure;
|
||||
use crate::reconciliation::utils::Context;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub(crate) struct ReconcileDatabases;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
#[typetag::serde]
|
||||
impl State for ReconcileDatabases {
|
||||
async fn next(
|
||||
&mut self,
|
||||
ctx: &mut ReconcileCatalogContext,
|
||||
procedure_ctx: &ProcedureContext,
|
||||
) -> Result<(Box<dyn State>, Status)> {
|
||||
// Waits for inflight subprocedure first.
|
||||
ctx.wait_for_inflight_subprocedure(procedure_ctx).await?;
|
||||
|
||||
if ctx.volatile_ctx.schemas.as_deref().is_none() {
|
||||
let schemas = ctx
|
||||
.table_metadata_manager
|
||||
.schema_manager()
|
||||
.schema_names(&ctx.persistent_ctx.catalog);
|
||||
ctx.volatile_ctx.schemas = Some(schemas);
|
||||
}
|
||||
|
||||
if let Some(catalog) = ctx
|
||||
.volatile_ctx
|
||||
.schemas
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.try_next()
|
||||
.await?
|
||||
{
|
||||
return Self::schedule_reconcile_database(ctx, catalog);
|
||||
}
|
||||
|
||||
Ok((Box::new(ReconcileCatalogEnd), Status::executing(false)))
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl ReconcileDatabases {
|
||||
fn schedule_reconcile_database(
|
||||
ctx: &mut ReconcileCatalogContext,
|
||||
schema: String,
|
||||
) -> Result<(Box<dyn State>, Status)> {
|
||||
let context = Context {
|
||||
node_manager: ctx.node_manager.clone(),
|
||||
table_metadata_manager: ctx.table_metadata_manager.clone(),
|
||||
cache_invalidator: ctx.cache_invalidator.clone(),
|
||||
};
|
||||
info!(
|
||||
"Scheduling reconcile database: {}, catalog: {}",
|
||||
schema, ctx.persistent_ctx.catalog
|
||||
);
|
||||
let procedure = ReconcileDatabaseProcedure::new(
|
||||
context,
|
||||
ctx.persistent_ctx.catalog.clone(),
|
||||
schema,
|
||||
ctx.persistent_ctx.fast_fail,
|
||||
ctx.persistent_ctx.parallelism,
|
||||
ctx.persistent_ctx.resolve_strategy,
|
||||
true,
|
||||
);
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
Ok((
|
||||
Box::new(ReconcileDatabases),
|
||||
Status::suspended(vec![procedure_with_id], false),
|
||||
))
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,58 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use common_procedure::{Context as ProcedureContext, Status};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::catalog_name::CatalogNameKey;
|
||||
use crate::reconciliation::reconcile_catalog::reconcile_databases::ReconcileDatabases;
|
||||
use crate::reconciliation::reconcile_catalog::{ReconcileCatalogContext, State};
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub(crate) struct ReconcileCatalogStart;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
#[typetag::serde]
|
||||
impl State for ReconcileCatalogStart {
|
||||
async fn next(
|
||||
&mut self,
|
||||
ctx: &mut ReconcileCatalogContext,
|
||||
_procedure_ctx: &ProcedureContext,
|
||||
) -> Result<(Box<dyn State>, Status)> {
|
||||
let exists = ctx
|
||||
.table_metadata_manager
|
||||
.catalog_manager()
|
||||
.exists(CatalogNameKey {
|
||||
catalog: &ctx.persistent_ctx.catalog,
|
||||
})
|
||||
.await?;
|
||||
|
||||
ensure!(
|
||||
exists,
|
||||
error::CatalogNotFoundSnafu {
|
||||
catalog: &ctx.persistent_ctx.catalog
|
||||
},
|
||||
);
|
||||
|
||||
Ok((Box::new(ReconcileDatabases), Status::executing(true)))
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
@@ -45,6 +45,8 @@ use crate::reconciliation::reconcile_database::utils::wait_for_inflight_subproce
|
||||
use crate::reconciliation::reconcile_table::resolve_column_metadata::ResolveStrategy;
|
||||
use crate::reconciliation::utils::Context;
|
||||
|
||||
pub(crate) const DEFAULT_PARALLELISM: usize = 64;
|
||||
|
||||
pub(crate) struct ReconcileDatabaseContext {
|
||||
pub node_manager: NodeManagerRef,
|
||||
pub table_metadata_manager: TableMetadataManagerRef,
|
||||
@@ -89,6 +91,7 @@ pub(crate) struct PersistentContext {
|
||||
fail_fast: bool,
|
||||
parallelism: usize,
|
||||
resolve_strategy: ResolveStrategy,
|
||||
is_subprocedure: bool,
|
||||
}
|
||||
|
||||
impl PersistentContext {
|
||||
@@ -98,6 +101,7 @@ impl PersistentContext {
|
||||
fail_fast: bool,
|
||||
parallelism: usize,
|
||||
resolve_strategy: ResolveStrategy,
|
||||
is_subprocedure: bool,
|
||||
) -> Self {
|
||||
Self {
|
||||
catalog,
|
||||
@@ -105,6 +109,7 @@ impl PersistentContext {
|
||||
fail_fast,
|
||||
parallelism,
|
||||
resolve_strategy,
|
||||
is_subprocedure,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -139,9 +144,16 @@ impl ReconcileDatabaseProcedure {
|
||||
fail_fast: bool,
|
||||
parallelism: usize,
|
||||
resolve_strategy: ResolveStrategy,
|
||||
is_subprocedure: bool,
|
||||
) -> Self {
|
||||
let persistent_ctx =
|
||||
PersistentContext::new(catalog, schema, fail_fast, parallelism, resolve_strategy);
|
||||
let persistent_ctx = PersistentContext::new(
|
||||
catalog,
|
||||
schema,
|
||||
fail_fast,
|
||||
parallelism,
|
||||
resolve_strategy,
|
||||
is_subprocedure,
|
||||
);
|
||||
let context = ReconcileDatabaseContext::new(ctx, persistent_ctx);
|
||||
let state = Box::new(ReconcileDatabaseStart);
|
||||
Self { context, state }
|
||||
@@ -204,6 +216,10 @@ impl Procedure for ReconcileDatabaseProcedure {
|
||||
fn lock_key(&self) -> LockKey {
|
||||
let catalog = &self.context.persistent_ctx.catalog;
|
||||
let schema = &self.context.persistent_ctx.schema;
|
||||
// If the procedure is a subprocedure, only lock the schema.
|
||||
if self.context.persistent_ctx.is_subprocedure {
|
||||
return LockKey::new(vec![SchemaLock::write(catalog, schema).into()]);
|
||||
}
|
||||
|
||||
LockKey::new(vec![
|
||||
CatalogLock::Read(catalog).into(),
|
||||
|
||||
@@ -28,6 +28,7 @@ use crate::error::{Result, TableInfoNotFoundSnafu};
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::reconciliation::reconcile_database::end::ReconcileDatabaseEnd;
|
||||
use crate::reconciliation::reconcile_database::{ReconcileDatabaseContext, State};
|
||||
use crate::reconciliation::reconcile_logical_tables::ReconcileLogicalTablesProcedure;
|
||||
use crate::reconciliation::utils::Context;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
@@ -201,7 +202,7 @@ impl ReconcileLogicalTables {
|
||||
async fn build_reconcile_logical_tables_procedure(
|
||||
ctx: &Context,
|
||||
physical_table_id: TableId,
|
||||
_logical_tables: Vec<(TableId, TableName)>,
|
||||
logical_tables: Vec<(TableId, TableName)>,
|
||||
) -> Result<ProcedureWithId> {
|
||||
let table_info = ctx
|
||||
.table_metadata_manager
|
||||
@@ -212,8 +213,16 @@ impl ReconcileLogicalTables {
|
||||
table: format!("table_id: {}", physical_table_id),
|
||||
})?;
|
||||
|
||||
let _physical_table_name = table_info.table_name();
|
||||
todo!()
|
||||
let physical_table_name = table_info.table_name();
|
||||
let procedure = ReconcileLogicalTablesProcedure::new(
|
||||
ctx.clone(),
|
||||
physical_table_id,
|
||||
physical_table_name,
|
||||
logical_tables,
|
||||
true,
|
||||
);
|
||||
|
||||
Ok(ProcedureWithId::with_random_id(Box::new(procedure)))
|
||||
}
|
||||
|
||||
fn enqueue_logical_table(
|
||||
|
||||
@@ -31,18 +31,29 @@ use crate::reconciliation::utils::{
|
||||
};
|
||||
|
||||
/// Strategy for resolving column metadata inconsistencies.
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, Copy)]
|
||||
pub(crate) enum ResolveStrategy {
|
||||
/// Always uses the column metadata from metasrv.
|
||||
UseMetasrv,
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, Copy, Default)]
|
||||
pub enum ResolveStrategy {
|
||||
#[default]
|
||||
/// Trusts the latest column metadata from datanode.
|
||||
UseLatest,
|
||||
|
||||
/// Always uses the column metadata from metasrv.
|
||||
UseMetasrv,
|
||||
|
||||
/// Aborts the resolution process if inconsistencies are detected.
|
||||
AbortOnConflict,
|
||||
}
|
||||
|
||||
impl From<api::v1::meta::ResolveStrategy> for ResolveStrategy {
|
||||
fn from(strategy: api::v1::meta::ResolveStrategy) -> Self {
|
||||
match strategy {
|
||||
api::v1::meta::ResolveStrategy::UseMetasrv => Self::UseMetasrv,
|
||||
api::v1::meta::ResolveStrategy::UseLatest => Self::UseLatest,
|
||||
api::v1::meta::ResolveStrategy::AbortOnConflict => Self::AbortOnConflict,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// State responsible for resolving inconsistencies in column metadata across physical regions.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ResolveColumnMetadata {
|
||||
|
||||
@@ -22,14 +22,12 @@ use snafu::{ensure, OptionExt};
|
||||
use store_api::metadata::{ColumnMetadata, RegionMetadata};
|
||||
use store_api::storage::{RegionId, TableId};
|
||||
use table::metadata::{RawTableInfo, RawTableMeta};
|
||||
use table::table_name::TableName;
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::cache_invalidator::CacheInvalidatorRef;
|
||||
use crate::error::{
|
||||
self, MismatchColumnIdSnafu, MissingColumnInColumnMetadataSnafu, Result, UnexpectedSnafu,
|
||||
MismatchColumnIdSnafu, MissingColumnInColumnMetadataSnafu, Result, UnexpectedSnafu,
|
||||
};
|
||||
use crate::key::table_name::{TableNameKey, TableNameManager};
|
||||
use crate::key::TableMetadataManagerRef;
|
||||
use crate::node_manager::NodeManagerRef;
|
||||
|
||||
@@ -397,87 +395,6 @@ pub(crate) fn build_table_meta_from_column_metadatas(
|
||||
Ok(new_raw_table_meta)
|
||||
}
|
||||
|
||||
/// Validates the table id and name consistency.
|
||||
///
|
||||
/// It will check the table id and table name consistency.
|
||||
/// If the table id and table name are not consistent, it will return an error.
|
||||
pub(crate) async fn validate_table_id_and_name(
|
||||
table_name_manager: &TableNameManager,
|
||||
table_id: TableId,
|
||||
table_name: &TableName,
|
||||
) -> Result<()> {
|
||||
let table_name_key = TableNameKey::new(
|
||||
&table_name.catalog_name,
|
||||
&table_name.schema_name,
|
||||
&table_name.table_name,
|
||||
);
|
||||
let table_name_value = table_name_manager
|
||||
.get(table_name_key)
|
||||
.await?
|
||||
.with_context(|| error::TableNotFoundSnafu {
|
||||
table_name: table_name.to_string(),
|
||||
})?;
|
||||
|
||||
ensure!(
|
||||
table_name_value.table_id() == table_id,
|
||||
error::UnexpectedSnafu {
|
||||
err_msg: format!(
|
||||
"The table id mismatch for table: {}, expected {}, actual {}",
|
||||
table_name,
|
||||
table_id,
|
||||
table_name_value.table_id()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Checks whether the column metadata invariants hold for the logical table.
|
||||
///
|
||||
/// Invariants:
|
||||
/// - Primary key (Tag) columns must exist in the new metadata.
|
||||
/// - Timestamp column must remain exactly the same in name and ID.
|
||||
///
|
||||
/// TODO(weny): add tests
|
||||
pub(crate) fn check_column_metadatas_invariants_for_logical_table(
|
||||
column_metadatas: &[ColumnMetadata],
|
||||
table_info: &RawTableInfo,
|
||||
) -> bool {
|
||||
let new_primary_keys = column_metadatas
|
||||
.iter()
|
||||
.filter(|c| c.semantic_type == SemanticType::Tag)
|
||||
.map(|c| c.column_schema.name.as_str())
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
let old_primary_keys = table_info
|
||||
.meta
|
||||
.primary_key_indices
|
||||
.iter()
|
||||
.map(|i| table_info.meta.schema.column_schemas[*i].name.as_str());
|
||||
|
||||
for name in old_primary_keys {
|
||||
if !new_primary_keys.contains(name) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
let old_timestamp_column_name = table_info
|
||||
.meta
|
||||
.schema
|
||||
.column_schemas
|
||||
.iter()
|
||||
.find(|c| c.is_time_index())
|
||||
.map(|c| c.name.as_str());
|
||||
|
||||
let new_timestamp_column_name = column_metadatas
|
||||
.iter()
|
||||
.find(|c| c.semantic_type == SemanticType::Timestamp)
|
||||
.map(|c| c.column_schema.name.as_str());
|
||||
|
||||
old_timestamp_column_name != new_timestamp_column_name
|
||||
}
|
||||
|
||||
/// Returns true if the logical table info needs to be updated.
|
||||
///
|
||||
/// The logical table only support to add columns, so we can check the length of column metadatas
|
||||
|
||||
@@ -18,6 +18,7 @@ pub mod trigger;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::result;
|
||||
|
||||
use api::helper::{from_pb_time_ranges, to_pb_time_ranges};
|
||||
use api::v1::alter_database_expr::Kind as PbAlterDatabaseKind;
|
||||
use api::v1::meta::ddl_task_request::Task;
|
||||
use api::v1::meta::{
|
||||
@@ -38,7 +39,8 @@ use api::v1::{
|
||||
};
|
||||
use base64::engine::general_purpose;
|
||||
use base64::Engine as _;
|
||||
use common_time::{DatabaseTimeToLive, Timezone};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_time::{DatabaseTimeToLive, Timestamp, Timezone};
|
||||
use prost::Message;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_with::{serde_as, DefaultOnNull};
|
||||
@@ -49,8 +51,8 @@ use table::table_name::TableName;
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::error::{
|
||||
self, InvalidSetDatabaseOptionSnafu, InvalidTimeZoneSnafu, InvalidUnsetDatabaseOptionSnafu,
|
||||
Result,
|
||||
self, ConvertTimeRangesSnafu, ExternalSnafu, InvalidSetDatabaseOptionSnafu,
|
||||
InvalidTimeZoneSnafu, InvalidUnsetDatabaseOptionSnafu, Result,
|
||||
};
|
||||
use crate::key::FlowId;
|
||||
|
||||
@@ -179,12 +181,14 @@ impl DdlTask {
|
||||
schema: String,
|
||||
table: String,
|
||||
table_id: TableId,
|
||||
time_ranges: Vec<(Timestamp, Timestamp)>,
|
||||
) -> Self {
|
||||
DdlTask::TruncateTable(TruncateTableTask {
|
||||
catalog,
|
||||
schema,
|
||||
table,
|
||||
table_id,
|
||||
time_ranges,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -826,6 +830,7 @@ pub struct TruncateTableTask {
|
||||
pub schema: String,
|
||||
pub table: String,
|
||||
pub table_id: TableId,
|
||||
pub time_ranges: Vec<(Timestamp, Timestamp)>,
|
||||
}
|
||||
|
||||
impl TruncateTableTask {
|
||||
@@ -864,6 +869,13 @@ impl TryFrom<PbTruncateTableTask> for TruncateTableTask {
|
||||
err_msg: "expected table_id",
|
||||
})?
|
||||
.id,
|
||||
time_ranges: truncate_table
|
||||
.time_ranges
|
||||
.map(from_pb_time_ranges)
|
||||
.transpose()
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?
|
||||
.unwrap_or_default(),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -878,6 +890,9 @@ impl TryFrom<TruncateTableTask> for PbTruncateTableTask {
|
||||
schema_name: task.schema,
|
||||
table_name: task.table,
|
||||
table_id: Some(api::v1::TableId { id: task.table_id }),
|
||||
time_ranges: Some(
|
||||
to_pb_time_ranges(&task.time_ranges).context(ConvertTimeRangesSnafu)?,
|
||||
),
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
use std::ops::Range;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_telemetry::warn;
|
||||
use common_telemetry::{debug, warn};
|
||||
use snafu::ensure;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
@@ -95,13 +95,27 @@ impl Sequence {
|
||||
inner.initial..inner.max
|
||||
}
|
||||
|
||||
/// Returns the next value without incrementing the sequence.
|
||||
pub async fn peek(&self) -> u64 {
|
||||
/// Returns the current value stored in the remote storage without incrementing the sequence.
|
||||
///
|
||||
/// This function always fetches the true current state from the remote storage (KV backend),
|
||||
/// ignoring any local cache to provide the most accurate view of the sequence's remote state.
|
||||
/// It does not consume or advance the sequence value.
|
||||
///
|
||||
/// Note: Since this always queries the remote storage, it may be slower than `next()` but
|
||||
/// provides the most accurate and up-to-date information about the sequence state.
|
||||
pub async fn peek(&self) -> Result<u64> {
|
||||
let inner = self.inner.lock().await;
|
||||
inner.next
|
||||
inner.peek().await
|
||||
}
|
||||
|
||||
/// Jumps to the given value.
|
||||
///
|
||||
/// The next value must be greater than both:
|
||||
/// 1. The current local next value
|
||||
/// 2. The current value stored in the remote storage (KV backend)
|
||||
///
|
||||
/// This ensures the sequence can only move forward and maintains consistency
|
||||
/// across different instances accessing the same sequence.
|
||||
pub async fn jump_to(&self, next: u64) -> Result<()> {
|
||||
let mut inner = self.inner.lock().await;
|
||||
inner.jump_to(next).await
|
||||
@@ -136,6 +150,7 @@ impl Inner {
|
||||
if range.contains(&self.next) {
|
||||
let res = Ok(self.next);
|
||||
self.next += 1;
|
||||
debug!("sequence {} next: {}", self.name, self.next);
|
||||
return res;
|
||||
}
|
||||
self.range = None;
|
||||
@@ -144,6 +159,10 @@ impl Inner {
|
||||
let range = self.next_range().await?;
|
||||
self.next = range.start;
|
||||
self.range = Some(range);
|
||||
debug!(
|
||||
"sequence {} next: {}, range: {:?}",
|
||||
self.name, self.next, self.range
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -154,6 +173,26 @@ impl Inner {
|
||||
.fail()
|
||||
}
|
||||
|
||||
/// Returns the current value from remote storage without advancing the sequence.
|
||||
/// If no value exists in remote storage, returns the initial value.
|
||||
pub async fn peek(&self) -> Result<u64> {
|
||||
let key = self.name.as_bytes();
|
||||
let value = self.generator.get(key).await?.map(|kv| kv.value);
|
||||
let next = if let Some(value) = value {
|
||||
let next = self.initial.max(self.parse_sequence_value(value)?);
|
||||
debug!("The next value of sequence {} is {}", self.name, next);
|
||||
next
|
||||
} else {
|
||||
debug!(
|
||||
"The next value of sequence {} is not set, use initial value {}",
|
||||
self.name, self.initial
|
||||
);
|
||||
self.initial
|
||||
};
|
||||
|
||||
Ok(next)
|
||||
}
|
||||
|
||||
pub async fn next_range(&self) -> Result<Range<u64>> {
|
||||
let key = self.name.as_bytes();
|
||||
let mut start = self.next;
|
||||
@@ -187,16 +226,7 @@ impl Inner {
|
||||
|
||||
if !res.success {
|
||||
if let Some(kv) = res.prev_kv {
|
||||
let v: [u8; 8] = match kv.value.clone().try_into() {
|
||||
Ok(a) => a,
|
||||
Err(v) => {
|
||||
return error::UnexpectedSequenceValueSnafu {
|
||||
err_msg: format!("Not a valid u64 for '{}': {v:?}", self.name),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
};
|
||||
let v = u64::from_le_bytes(v);
|
||||
let v = self.parse_sequence_value(kv.value.clone())?;
|
||||
// If the existed value is smaller than the initial, we should start from the initial.
|
||||
start = v.max(self.initial);
|
||||
expect = kv.value;
|
||||
@@ -220,25 +250,33 @@ impl Inner {
|
||||
}
|
||||
|
||||
/// Jumps to the given value.
|
||||
/// The next value must be greater than the current next value.
|
||||
///
|
||||
/// The next value must be greater than both:
|
||||
/// 1. The current local next value (self.next)
|
||||
/// 2. The current value stored in the remote storage (KV backend)
|
||||
///
|
||||
/// This ensures the sequence can only move forward and maintains consistency
|
||||
/// across different instances accessing the same sequence.
|
||||
pub async fn jump_to(&mut self, next: u64) -> Result<()> {
|
||||
let key = self.name.as_bytes();
|
||||
let current = self.generator.get(key).await?.map(|kv| kv.value);
|
||||
|
||||
let curr_val = match ¤t {
|
||||
Some(val) => self.initial.max(self.parse_sequence_value(val.clone())?),
|
||||
None => self.initial,
|
||||
};
|
||||
|
||||
ensure!(
|
||||
next > self.next,
|
||||
next > curr_val,
|
||||
error::UnexpectedSnafu {
|
||||
err_msg: format!(
|
||||
"The next value {} is not greater than the current next value {}",
|
||||
next, self.next
|
||||
next, curr_val
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let key = self.name.as_bytes();
|
||||
let expect = self
|
||||
.generator
|
||||
.get(key)
|
||||
.await?
|
||||
.map(|kv| kv.value)
|
||||
.unwrap_or_default();
|
||||
let expect = current.unwrap_or_default();
|
||||
|
||||
let req = CompareAndPutRequest {
|
||||
key: key.to_vec(),
|
||||
@@ -260,6 +298,20 @@ impl Inner {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Converts a Vec<u8> to u64 with proper error handling for sequence values
|
||||
fn parse_sequence_value(&self, value: Vec<u8>) -> Result<u64> {
|
||||
let v: [u8; 8] = match value.try_into() {
|
||||
Ok(a) => a,
|
||||
Err(v) => {
|
||||
return error::UnexpectedSequenceValueSnafu {
|
||||
err_msg: format!("Not a valid u64 for '{}': {v:?}", self.name),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
};
|
||||
Ok(u64::from_le_bytes(v))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -388,7 +440,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_sequence_out_of_rage() {
|
||||
async fn test_sequence_out_of_range() {
|
||||
let seq = SequenceBuilder::new("test_seq", Arc::new(MemoryKvBackend::default()))
|
||||
.initial(u64::MAX - 10)
|
||||
.step(10)
|
||||
@@ -458,4 +510,139 @@ mod tests {
|
||||
let next = seq.next().await;
|
||||
assert!(next.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_sequence_peek() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||
let seq = SequenceBuilder::new("test_seq", kv_backend.clone())
|
||||
.step(10)
|
||||
.initial(1024)
|
||||
.build();
|
||||
// The sequence value in the kv backend is not set, so the peek value should be the initial value.
|
||||
assert_eq!(seq.peek().await.unwrap(), 1024);
|
||||
|
||||
for i in 0..11 {
|
||||
let v = seq.next().await.unwrap();
|
||||
assert_eq!(v, 1024 + i);
|
||||
}
|
||||
let seq = SequenceBuilder::new("test_seq", kv_backend)
|
||||
.initial(1024)
|
||||
.build();
|
||||
// The sequence is not initialized, it will fetch the value from the kv backend.
|
||||
assert_eq!(seq.peek().await.unwrap(), 1044);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_sequence_peek_shared_storage() {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||
let shared_seq = "shared_seq";
|
||||
|
||||
// Create two sequence instances with the SAME name but DIFFERENT configs
|
||||
let seq1 = SequenceBuilder::new(shared_seq, kv_backend.clone())
|
||||
.initial(100)
|
||||
.step(5)
|
||||
.build();
|
||||
let seq2 = SequenceBuilder::new(shared_seq, kv_backend.clone())
|
||||
.initial(200) // different initial
|
||||
.step(3) // different step
|
||||
.build();
|
||||
|
||||
// Initially both return their own initial values when no remote value exists
|
||||
assert_eq!(seq1.peek().await.unwrap(), 100);
|
||||
assert_eq!(seq2.peek().await.unwrap(), 200);
|
||||
|
||||
// seq1 calls next() to allocate range and update remote storage
|
||||
assert_eq!(seq1.next().await.unwrap(), 100);
|
||||
// After seq1.next(), remote storage has 100 + seq1.step(5) = 105
|
||||
|
||||
// seq2 should now see the updated remote value through peek(), not its own initial(200)
|
||||
assert_eq!(seq1.peek().await.unwrap(), 105);
|
||||
assert_eq!(seq2.peek().await.unwrap(), 200); // sees seq1's update, but use its own initial(200)
|
||||
|
||||
// seq2 calls next(), should start from its initial(200)
|
||||
assert_eq!(seq2.next().await.unwrap(), 200);
|
||||
// After seq2.next(), remote storage updated to 200 + seq2.step(3) = 203
|
||||
|
||||
// Both should see the new remote value (seq2's step was used)
|
||||
assert_eq!(seq1.peek().await.unwrap(), 203);
|
||||
assert_eq!(seq2.peek().await.unwrap(), 203);
|
||||
|
||||
// seq1 calls next(), should start from its next(105)
|
||||
assert_eq!(seq1.next().await.unwrap(), 101);
|
||||
assert_eq!(seq1.next().await.unwrap(), 102);
|
||||
assert_eq!(seq1.next().await.unwrap(), 103);
|
||||
assert_eq!(seq1.next().await.unwrap(), 104);
|
||||
assert_eq!(seq1.next().await.unwrap(), 203);
|
||||
// After seq1.next(), remote storage updated to 203 + seq1.step(5) = 208
|
||||
assert_eq!(seq1.peek().await.unwrap(), 208);
|
||||
assert_eq!(seq2.peek().await.unwrap(), 208);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_sequence_peek_initial_max_logic() {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||
|
||||
// Manually set a small value in storage
|
||||
let key = seq_name("test_max").into_bytes();
|
||||
kv_backend
|
||||
.put(
|
||||
PutRequest::new()
|
||||
.with_key(key)
|
||||
.with_value(u64::to_le_bytes(50)),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create sequence with larger initial value
|
||||
let seq = SequenceBuilder::new("test_max", kv_backend)
|
||||
.initial(100) // larger than remote value (50)
|
||||
.build();
|
||||
|
||||
// peek() should return max(initial, remote) = max(100, 50) = 100
|
||||
assert_eq!(seq.peek().await.unwrap(), 100);
|
||||
|
||||
// next() should start from the larger initial value
|
||||
assert_eq!(seq.next().await.unwrap(), 100);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_sequence_initial_greater_than_storage() {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||
|
||||
// Test sequence behavior when initial > storage value
|
||||
// This verifies the max(storage, initial) logic works correctly
|
||||
|
||||
// Step 1: Establish a low value in storage
|
||||
let seq1 = SequenceBuilder::new("max_test", kv_backend.clone())
|
||||
.initial(10)
|
||||
.step(5)
|
||||
.build();
|
||||
assert_eq!(seq1.next().await.unwrap(), 10); // storage: 15
|
||||
|
||||
// Step 2: Create sequence with much larger initial
|
||||
let seq2 = SequenceBuilder::new("max_test", kv_backend.clone())
|
||||
.initial(100) // much larger than storage (15)
|
||||
.step(5)
|
||||
.build();
|
||||
|
||||
// seq2 should start from max(15, 100) = 100 (its initial value)
|
||||
assert_eq!(seq2.next().await.unwrap(), 100); // storage updated to: 105
|
||||
assert_eq!(seq2.peek().await.unwrap(), 105);
|
||||
|
||||
// Step 3: Verify subsequent sequences continue from updated storage
|
||||
let seq3 = SequenceBuilder::new("max_test", kv_backend)
|
||||
.initial(50) // smaller than current storage (105)
|
||||
.step(1)
|
||||
.build();
|
||||
|
||||
// seq3 should use max(105, 50) = 105 (storage value)
|
||||
assert_eq!(seq3.peek().await.unwrap(), 105);
|
||||
assert_eq!(seq3.next().await.unwrap(), 105); // storage: 106
|
||||
|
||||
// This demonstrates the correct max(storage, initial) behavior:
|
||||
// - Sequences never generate values below their initial requirement
|
||||
// - Storage always reflects the highest allocated value
|
||||
// - Value gaps (15-99) are acceptable to maintain minimum constraints
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,3 +13,4 @@
|
||||
// limitations under the License.
|
||||
|
||||
pub mod datanode;
|
||||
pub mod memory;
|
||||
|
||||
33
src/common/options/src/memory.rs
Normal file
33
src/common/options/src/memory.rs
Normal file
@@ -0,0 +1,33 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(default)]
|
||||
pub struct MemoryOptions {
|
||||
/// Whether to enable heap profiling activation.
|
||||
/// When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
/// is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
/// Default is true.
|
||||
pub enable_heap_profiling: bool,
|
||||
}
|
||||
|
||||
impl Default for MemoryOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
enable_heap_profiling: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -11,11 +11,13 @@ testing = []
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
async-stream.workspace = true
|
||||
async-trait.workspace = true
|
||||
backon.workspace = true
|
||||
common-base.workspace = true
|
||||
common-error.workspace = true
|
||||
common-event-recorder.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-runtime.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
|
||||
116
src/common/procedure/src/event.rs
Normal file
116
src/common/procedure/src/event.rs
Normal file
@@ -0,0 +1,116 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use api::v1::value::ValueData;
|
||||
use api::v1::{ColumnDataType, ColumnSchema, Row, SemanticType};
|
||||
use common_event_recorder::error::Result;
|
||||
use common_event_recorder::Event;
|
||||
use common_time::timestamp::{TimeUnit, Timestamp};
|
||||
|
||||
use crate::{ProcedureId, ProcedureState};
|
||||
|
||||
pub const EVENTS_TABLE_PROCEDURE_ID_COLUMN_NAME: &str = "procedure_id";
|
||||
pub const EVENTS_TABLE_PROCEDURE_STATE_COLUMN_NAME: &str = "procedure_state";
|
||||
pub const EVENTS_TABLE_PROCEDURE_ERROR_COLUMN_NAME: &str = "procedure_error";
|
||||
|
||||
/// `ProcedureEvent` represents an event emitted by a procedure during its execution lifecycle.
|
||||
#[derive(Debug)]
|
||||
pub struct ProcedureEvent {
|
||||
/// Unique identifier associated with the originating procedure instance.
|
||||
pub procedure_id: ProcedureId,
|
||||
/// The timestamp of the event.
|
||||
pub timestamp: Timestamp,
|
||||
/// The state of the procedure.
|
||||
pub state: ProcedureState,
|
||||
/// The event emitted by the procedure. It's generated by [Procedure::event].
|
||||
pub internal_event: Box<dyn Event>,
|
||||
}
|
||||
|
||||
impl ProcedureEvent {
|
||||
pub fn new(
|
||||
procedure_id: ProcedureId,
|
||||
internal_event: Box<dyn Event>,
|
||||
state: ProcedureState,
|
||||
) -> Self {
|
||||
Self {
|
||||
procedure_id,
|
||||
internal_event,
|
||||
timestamp: Timestamp::current_time(TimeUnit::Nanosecond),
|
||||
state,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Event for ProcedureEvent {
|
||||
fn event_type(&self) -> &str {
|
||||
self.internal_event.event_type()
|
||||
}
|
||||
|
||||
fn timestamp(&self) -> Timestamp {
|
||||
self.timestamp
|
||||
}
|
||||
|
||||
fn json_payload(&self) -> Result<String> {
|
||||
self.internal_event.json_payload()
|
||||
}
|
||||
|
||||
fn extra_schema(&self) -> Vec<ColumnSchema> {
|
||||
let mut schema = vec![
|
||||
ColumnSchema {
|
||||
column_name: EVENTS_TABLE_PROCEDURE_ID_COLUMN_NAME.to_string(),
|
||||
datatype: ColumnDataType::String.into(),
|
||||
semantic_type: SemanticType::Field.into(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnSchema {
|
||||
column_name: EVENTS_TABLE_PROCEDURE_STATE_COLUMN_NAME.to_string(),
|
||||
datatype: ColumnDataType::String.into(),
|
||||
semantic_type: SemanticType::Field.into(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnSchema {
|
||||
column_name: EVENTS_TABLE_PROCEDURE_ERROR_COLUMN_NAME.to_string(),
|
||||
datatype: ColumnDataType::String.into(),
|
||||
semantic_type: SemanticType::Field.into(),
|
||||
..Default::default()
|
||||
},
|
||||
];
|
||||
schema.append(&mut self.internal_event.extra_schema());
|
||||
schema
|
||||
}
|
||||
|
||||
fn extra_row(&self) -> Result<Row> {
|
||||
let error_str = match &self.state {
|
||||
ProcedureState::Failed { error } => format!("{:?}", error),
|
||||
ProcedureState::PrepareRollback { error } => format!("{:?}", error),
|
||||
ProcedureState::RollingBack { error } => format!("{:?}", error),
|
||||
ProcedureState::Retrying { error } => format!("{:?}", error),
|
||||
ProcedureState::Poisoned { error, .. } => format!("{:?}", error),
|
||||
_ => "".to_string(),
|
||||
};
|
||||
let mut row = vec![
|
||||
ValueData::StringValue(self.procedure_id.to_string()).into(),
|
||||
ValueData::StringValue(self.state.as_str_name().to_string()).into(),
|
||||
ValueData::StringValue(error_str).into(),
|
||||
];
|
||||
row.append(&mut self.internal_event.extra_row()?.values);
|
||||
Ok(Row { values: row })
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
@@ -17,6 +17,7 @@
|
||||
#![feature(assert_matches)]
|
||||
|
||||
pub mod error;
|
||||
pub mod event;
|
||||
pub mod local;
|
||||
pub mod options;
|
||||
mod procedure;
|
||||
@@ -28,9 +29,11 @@ pub mod watcher;
|
||||
pub mod test_util;
|
||||
|
||||
pub use crate::error::{Error, Result};
|
||||
pub use crate::event::ProcedureEvent;
|
||||
pub use crate::procedure::{
|
||||
BoxedProcedure, BoxedProcedureLoader, Context, ContextProvider, ContextProviderRef, LockKey,
|
||||
Output, ParseIdError, PoisonKey, PoisonKeys, Procedure, ProcedureId, ProcedureInfo,
|
||||
ProcedureManager, ProcedureManagerRef, ProcedureState, ProcedureWithId, Status, StringKey,
|
||||
UserMetadata,
|
||||
};
|
||||
pub use crate::watcher::Watcher;
|
||||
|
||||
@@ -23,6 +23,7 @@ use std::time::{Duration, Instant};
|
||||
use async_trait::async_trait;
|
||||
use backon::ExponentialBuilder;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_event_recorder::EventRecorderRef;
|
||||
use common_runtime::{RepeatedTask, TaskFunction};
|
||||
use common_telemetry::tracing_context::{FutureExt, TracingContext};
|
||||
use common_telemetry::{error, info, tracing};
|
||||
@@ -36,6 +37,7 @@ use crate::error::{
|
||||
Result, StartRemoveOutdatedMetaTaskSnafu, StopRemoveOutdatedMetaTaskSnafu,
|
||||
TooManyRunningProceduresSnafu,
|
||||
};
|
||||
use crate::event::ProcedureEvent;
|
||||
use crate::local::runner::Runner;
|
||||
use crate::procedure::{BoxedProcedureLoader, InitProcedureState, PoisonKeys, ProcedureInfo};
|
||||
use crate::rwlock::{KeyRwLock, OwnedKeyRwLockGuard};
|
||||
@@ -43,7 +45,7 @@ use crate::store::poison_store::PoisonStoreRef;
|
||||
use crate::store::{ProcedureMessage, ProcedureMessages, ProcedureStore, StateStoreRef};
|
||||
use crate::{
|
||||
BoxedProcedure, ContextProvider, LockKey, PoisonKey, ProcedureId, ProcedureManager,
|
||||
ProcedureState, ProcedureWithId, StringKey, Watcher,
|
||||
ProcedureState, ProcedureWithId, StringKey, UserMetadata, Watcher,
|
||||
};
|
||||
|
||||
/// The expired time of a procedure's metadata.
|
||||
@@ -81,9 +83,14 @@ pub(crate) struct ProcedureMeta {
|
||||
start_time_ms: AtomicI64,
|
||||
/// End execution time of this procedure.
|
||||
end_time_ms: AtomicI64,
|
||||
/// Event recorder.
|
||||
event_recorder: Option<EventRecorderRef>,
|
||||
/// The user metadata of the procedure. It's generated by [Procedure::user_metadata].
|
||||
user_metadata: Option<UserMetadata>,
|
||||
}
|
||||
|
||||
impl ProcedureMeta {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn new(
|
||||
id: ProcedureId,
|
||||
procedure_state: ProcedureState,
|
||||
@@ -91,6 +98,8 @@ impl ProcedureMeta {
|
||||
lock_key: LockKey,
|
||||
poison_keys: PoisonKeys,
|
||||
type_name: &str,
|
||||
event_recorder: Option<EventRecorderRef>,
|
||||
user_metadata: Option<UserMetadata>,
|
||||
) -> ProcedureMeta {
|
||||
let (state_sender, state_receiver) = watch::channel(procedure_state);
|
||||
ProcedureMeta {
|
||||
@@ -105,6 +114,8 @@ impl ProcedureMeta {
|
||||
start_time_ms: AtomicI64::new(0),
|
||||
end_time_ms: AtomicI64::new(0),
|
||||
type_name: type_name.to_string(),
|
||||
event_recorder,
|
||||
user_metadata,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -115,6 +126,15 @@ impl ProcedureMeta {
|
||||
|
||||
/// Update current [ProcedureState].
|
||||
fn set_state(&self, state: ProcedureState) {
|
||||
// Emit the event to the event recorder if the user metadata contains the eventable object.
|
||||
if let (Some(event_recorder), Some(user_metadata)) =
|
||||
(&self.event_recorder, &self.user_metadata)
|
||||
{
|
||||
if let Some(event) = user_metadata.to_event() {
|
||||
event_recorder.record(Box::new(ProcedureEvent::new(self.id, event, state.clone())));
|
||||
}
|
||||
}
|
||||
|
||||
// Safety: ProcedureMeta also holds the receiver, so `send()` should never fail.
|
||||
self.state_sender.send(state).unwrap();
|
||||
}
|
||||
@@ -557,6 +577,7 @@ pub struct LocalManager {
|
||||
remove_outdated_meta_task: TokioMutex<Option<RepeatedTask<Error>>>,
|
||||
config: ManagerConfig,
|
||||
pause_aware: Option<PauseAwareRef>,
|
||||
event_recorder: Option<EventRecorderRef>,
|
||||
}
|
||||
|
||||
impl LocalManager {
|
||||
@@ -566,6 +587,7 @@ impl LocalManager {
|
||||
state_store: StateStoreRef,
|
||||
poison_store: PoisonStoreRef,
|
||||
pause_aware: Option<PauseAwareRef>,
|
||||
event_recorder: Option<EventRecorderRef>,
|
||||
) -> LocalManager {
|
||||
let manager_ctx = Arc::new(ManagerContext::new(poison_store));
|
||||
|
||||
@@ -577,6 +599,7 @@ impl LocalManager {
|
||||
remove_outdated_meta_task: TokioMutex::new(None),
|
||||
config,
|
||||
pause_aware,
|
||||
event_recorder,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -601,6 +624,7 @@ impl LocalManager {
|
||||
) -> Result<Watcher> {
|
||||
ensure!(self.manager_ctx.running(), ManagerNotStartSnafu);
|
||||
|
||||
let user_metadata = procedure.user_metadata();
|
||||
let meta = Arc::new(ProcedureMeta::new(
|
||||
procedure_id,
|
||||
procedure_state,
|
||||
@@ -608,6 +632,8 @@ impl LocalManager {
|
||||
procedure.lock_key(),
|
||||
procedure.poison_keys(),
|
||||
procedure.type_name(),
|
||||
self.event_recorder.clone(),
|
||||
user_metadata.clone(),
|
||||
));
|
||||
let runner = Runner {
|
||||
meta: meta.clone(),
|
||||
@@ -619,8 +645,20 @@ impl LocalManager {
|
||||
.with_max_times(self.max_retry_times),
|
||||
store: self.procedure_store.clone(),
|
||||
rolling_back: false,
|
||||
event_recorder: self.event_recorder.clone(),
|
||||
};
|
||||
|
||||
if let (Some(event_recorder), Some(event)) = (
|
||||
self.event_recorder.as_ref(),
|
||||
user_metadata.and_then(|m| m.to_event()),
|
||||
) {
|
||||
event_recorder.record(Box::new(ProcedureEvent::new(
|
||||
procedure_id,
|
||||
event,
|
||||
ProcedureState::Running,
|
||||
)));
|
||||
}
|
||||
|
||||
let watcher = meta.state_receiver.clone();
|
||||
|
||||
ensure!(
|
||||
@@ -870,6 +908,8 @@ pub(crate) mod test_util {
|
||||
LockKey::default(),
|
||||
PoisonKeys::default(),
|
||||
"ProcedureAdapter",
|
||||
None,
|
||||
None,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1019,7 +1059,7 @@ mod tests {
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::new());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None);
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None, None);
|
||||
manager.manager_ctx.start();
|
||||
|
||||
manager
|
||||
@@ -1044,7 +1084,7 @@ mod tests {
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(object_store.clone()));
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::new());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None);
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None, None);
|
||||
manager.manager_ctx.start();
|
||||
|
||||
manager
|
||||
@@ -1098,7 +1138,7 @@ mod tests {
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::new());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None);
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None, None);
|
||||
manager.manager_ctx.start();
|
||||
|
||||
let procedure_id = ProcedureId::random();
|
||||
@@ -1150,7 +1190,7 @@ mod tests {
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::new());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None);
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None, None);
|
||||
manager.manager_ctx.start();
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -1231,7 +1271,7 @@ mod tests {
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::new());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None);
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None, None);
|
||||
|
||||
let mut procedure = ProcedureToLoad::new("submit");
|
||||
procedure.lock_key = LockKey::single_exclusive("test.submit");
|
||||
@@ -1259,7 +1299,7 @@ mod tests {
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::new());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None);
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None, None);
|
||||
|
||||
manager.start().await.unwrap();
|
||||
manager.stop().await.unwrap();
|
||||
@@ -1296,7 +1336,7 @@ mod tests {
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(object_store.clone()));
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::new());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None);
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None, None);
|
||||
manager.manager_ctx.set_running();
|
||||
|
||||
let mut procedure = ProcedureToLoad::new("submit");
|
||||
@@ -1378,7 +1418,7 @@ mod tests {
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::new());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None);
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None, None);
|
||||
manager.manager_ctx.set_running();
|
||||
|
||||
manager
|
||||
@@ -1503,7 +1543,7 @@ mod tests {
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(object_store.clone()));
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::new());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None);
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None, None);
|
||||
manager.manager_ctx.start();
|
||||
|
||||
let notify = Arc::new(Notify::new());
|
||||
|
||||
@@ -17,6 +17,7 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use backon::{BackoffBuilder, ExponentialBuilder};
|
||||
use common_event_recorder::EventRecorderRef;
|
||||
use common_telemetry::{debug, error, info};
|
||||
use rand::Rng;
|
||||
use snafu::ResultExt;
|
||||
@@ -96,6 +97,7 @@ pub(crate) struct Runner {
|
||||
pub(crate) exponential_builder: ExponentialBuilder,
|
||||
pub(crate) store: Arc<ProcedureStore>,
|
||||
pub(crate) rolling_back: bool,
|
||||
pub(crate) event_recorder: Option<EventRecorderRef>,
|
||||
}
|
||||
|
||||
impl Runner {
|
||||
@@ -425,6 +427,8 @@ impl Runner {
|
||||
procedure.lock_key(),
|
||||
procedure.poison_keys(),
|
||||
procedure.type_name(),
|
||||
self.event_recorder.clone(),
|
||||
procedure.user_metadata(),
|
||||
));
|
||||
let runner = Runner {
|
||||
meta: meta.clone(),
|
||||
@@ -434,6 +438,7 @@ impl Runner {
|
||||
exponential_builder: self.exponential_builder,
|
||||
store: self.store.clone(),
|
||||
rolling_back: false,
|
||||
event_recorder: self.event_recorder.clone(),
|
||||
};
|
||||
|
||||
// Insert the procedure. We already check the procedure existence before inserting
|
||||
@@ -627,6 +632,7 @@ mod tests {
|
||||
exponential_builder: ExponentialBuilder::default(),
|
||||
store,
|
||||
rolling_back: false,
|
||||
event_recorder: None,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_event_recorder::{Event, Eventable};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use smallvec::{smallvec, SmallVec};
|
||||
use snafu::{ResultExt, Snafu};
|
||||
@@ -214,6 +215,29 @@ pub trait Procedure: Send {
|
||||
fn poison_keys(&self) -> PoisonKeys {
|
||||
PoisonKeys::default()
|
||||
}
|
||||
|
||||
/// Returns the user metadata of the procedure. If the metadata contains the eventable object, you can use [UserMetadata::to_event] to get the event and emit it to the event recorder.
|
||||
fn user_metadata(&self) -> Option<UserMetadata> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// The user metadata injected by the procedure caller. It can be used to emit events to the event recorder.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct UserMetadata {
|
||||
event_object: Arc<dyn Eventable>,
|
||||
}
|
||||
|
||||
impl UserMetadata {
|
||||
/// Creates a new [UserMetadata] with the given event object.
|
||||
pub fn new(event_object: Arc<dyn Eventable>) -> Self {
|
||||
Self { event_object }
|
||||
}
|
||||
|
||||
/// Returns the event of the procedure. It can be None if the procedure does not emit any event.
|
||||
pub fn to_event(&self) -> Option<Box<dyn Event>> {
|
||||
self.event_object.to_event()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
|
||||
@@ -83,7 +83,7 @@ mod tests {
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::default());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None);
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None, None);
|
||||
manager.start().await.unwrap();
|
||||
|
||||
#[derive(Debug)]
|
||||
|
||||
@@ -19,7 +19,8 @@ use datafusion::execution::registry::SerializerRegistry;
|
||||
use datafusion_common::DataFusionError;
|
||||
use datafusion_expr::UserDefinedLogicalNode;
|
||||
use promql::extension_plan::{
|
||||
EmptyMetric, InstantManipulate, RangeManipulate, ScalarCalculate, SeriesDivide, SeriesNormalize,
|
||||
Absent, EmptyMetric, InstantManipulate, RangeManipulate, ScalarCalculate, SeriesDivide,
|
||||
SeriesNormalize,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -65,6 +66,13 @@ impl SerializerRegistry for ExtensionSerializer {
|
||||
.expect("Failed to downcast to SeriesDivide");
|
||||
Ok(series_divide.serialize())
|
||||
}
|
||||
name if name == Absent::name() => {
|
||||
let absent = node
|
||||
.as_any()
|
||||
.downcast_ref::<Absent>()
|
||||
.expect("Failed to downcast to Absent");
|
||||
Ok(absent.serialize())
|
||||
}
|
||||
name if name == EmptyMetric::name() => Err(DataFusionError::Substrait(
|
||||
"EmptyMetric should not be serialized".to_string(),
|
||||
)),
|
||||
@@ -103,6 +111,10 @@ impl SerializerRegistry for ExtensionSerializer {
|
||||
let scalar_calculate = ScalarCalculate::deserialize(bytes)?;
|
||||
Ok(Arc::new(scalar_calculate))
|
||||
}
|
||||
name if name == Absent::name() => {
|
||||
let absent = Absent::deserialize(bytes)?;
|
||||
Ok(Arc::new(absent))
|
||||
}
|
||||
name if name == EmptyMetric::name() => Err(DataFusionError::Substrait(
|
||||
"EmptyMetric should not be deserialized".to_string(),
|
||||
)),
|
||||
|
||||
@@ -558,7 +558,7 @@ impl fmt::Debug for Timestamp {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub enum TimeUnit {
|
||||
Second,
|
||||
#[default]
|
||||
|
||||
@@ -26,6 +26,7 @@ common-greptimedb-telemetry.workspace = true
|
||||
common-grpc.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
common-options.workspace = true
|
||||
common-procedure.workspace = true
|
||||
common-query.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_config::{Configurable, DEFAULT_DATA_HOME};
|
||||
use common_options::memory::MemoryOptions;
|
||||
pub use common_procedure::options::ProcedureConfig;
|
||||
use common_telemetry::logging::{LoggingOptions, TracingOptions};
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
@@ -85,6 +86,7 @@ pub struct DatanodeOptions {
|
||||
pub export_metrics: ExportMetricsOption,
|
||||
pub tracing: TracingOptions,
|
||||
pub query: QueryOptions,
|
||||
pub memory: MemoryOptions,
|
||||
|
||||
/// Deprecated options, please use the new options instead.
|
||||
#[deprecated(note = "Please use `grpc.addr` instead.")]
|
||||
@@ -131,6 +133,7 @@ impl Default for DatanodeOptions {
|
||||
export_metrics: ExportMetricsOption::default(),
|
||||
tracing: TracingOptions::default(),
|
||||
query: QueryOptions::default(),
|
||||
memory: MemoryOptions::default(),
|
||||
|
||||
// Deprecated options
|
||||
rpc_addr: None,
|
||||
|
||||
@@ -1462,7 +1462,10 @@ mod tests {
|
||||
);
|
||||
|
||||
let err = mock_region_server
|
||||
.handle_request(region_id, RegionRequest::Truncate(RegionTruncateRequest {}))
|
||||
.handle_request(
|
||||
region_id,
|
||||
RegionRequest::Truncate(RegionTruncateRequest::All),
|
||||
)
|
||||
.await
|
||||
.unwrap_err();
|
||||
|
||||
|
||||
@@ -28,6 +28,7 @@ common-function.workspace = true
|
||||
common-grpc.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
common-options.workspace = true
|
||||
common-query.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
common-runtime.workspace = true
|
||||
@@ -77,7 +78,7 @@ tonic.workspace = true
|
||||
[dev-dependencies]
|
||||
catalog.workspace = true
|
||||
common-catalog.workspace = true
|
||||
pretty_assertions = "1.4.0"
|
||||
pretty_assertions.workspace = true
|
||||
prost.workspace = true
|
||||
query.workspace = true
|
||||
serde_json = "1.0"
|
||||
|
||||
@@ -24,6 +24,7 @@ use api::v1::{RowDeleteRequest, RowDeleteRequests, RowInsertRequest, RowInsertRe
|
||||
use common_config::Configurable;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::key::TableMetadataManagerRef;
|
||||
use common_options::memory::MemoryOptions;
|
||||
use common_runtime::JoinHandle;
|
||||
use common_telemetry::logging::{LoggingOptions, TracingOptions};
|
||||
use common_telemetry::{debug, info, trace};
|
||||
@@ -111,6 +112,7 @@ pub struct FlownodeOptions {
|
||||
pub heartbeat: HeartbeatOptions,
|
||||
pub query: QueryOptions,
|
||||
pub user_provider: Option<String>,
|
||||
pub memory: MemoryOptions,
|
||||
}
|
||||
|
||||
impl Default for FlownodeOptions {
|
||||
@@ -131,6 +133,7 @@ impl Default for FlownodeOptions {
|
||||
allow_query_fallback: false,
|
||||
},
|
||||
user_provider: None,
|
||||
memory: MemoryOptions::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,11 +24,11 @@ use catalog::CatalogManagerRef;
|
||||
use common_base::Plugins;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache::{LayeredCacheRegistryRef, TableFlownodeSetCacheRef, TableRouteCacheRef};
|
||||
use common_meta::ddl::ProcedureExecutorRef;
|
||||
use common_meta::key::flow::FlowMetadataManagerRef;
|
||||
use common_meta::key::TableMetadataManagerRef;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::node_manager::{Flownode, NodeManagerRef};
|
||||
use common_meta::procedure_executor::ProcedureExecutorRef;
|
||||
use common_query::Output;
|
||||
use common_runtime::JoinHandle;
|
||||
use common_telemetry::tracing::info;
|
||||
|
||||
@@ -17,6 +17,7 @@ use std::sync::Arc;
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_config::config::Configurable;
|
||||
use common_options::datanode::DatanodeClientOptions;
|
||||
use common_options::memory::MemoryOptions;
|
||||
use common_telemetry::logging::{LoggingOptions, SlowQueryOptions, TracingOptions};
|
||||
use meta_client::MetaClientOptions;
|
||||
use query::options::QueryOptions;
|
||||
@@ -62,6 +63,7 @@ pub struct FrontendOptions {
|
||||
pub query: QueryOptions,
|
||||
pub max_in_flight_write_bytes: Option<ReadableSize>,
|
||||
pub slow_query: Option<SlowQueryOptions>,
|
||||
pub memory: MemoryOptions,
|
||||
}
|
||||
|
||||
impl Default for FrontendOptions {
|
||||
@@ -88,6 +90,7 @@ impl Default for FrontendOptions {
|
||||
query: QueryOptions::default(),
|
||||
max_in_flight_write_bytes: None,
|
||||
slow_query: Some(SlowQueryOptions::default()),
|
||||
memory: MemoryOptions::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ use std::time::{Duration, SystemTime};
|
||||
use async_stream::stream;
|
||||
use async_trait::async_trait;
|
||||
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
|
||||
use catalog::process_manager::ProcessManagerRef;
|
||||
use catalog::process_manager::{ProcessManagerRef, QueryStatement as CatalogQueryStatement};
|
||||
use catalog::CatalogManagerRef;
|
||||
use client::OutputData;
|
||||
use common_base::cancellation::CancellableFuture;
|
||||
@@ -40,12 +40,12 @@ use common_base::Plugins;
|
||||
use common_config::KvBackendConfig;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_meta::cache_invalidator::CacheInvalidatorRef;
|
||||
use common_meta::ddl::ProcedureExecutorRef;
|
||||
use common_meta::key::runtime_switch::RuntimeSwitchManager;
|
||||
use common_meta::key::table_name::TableNameKey;
|
||||
use common_meta::key::TableMetadataManagerRef;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::node_manager::NodeManagerRef;
|
||||
use common_meta::procedure_executor::ProcedureExecutorRef;
|
||||
use common_meta::state_store::KvStateStore;
|
||||
use common_procedure::local::{LocalManager, ManagerConfig};
|
||||
use common_procedure::options::ProcedureConfig;
|
||||
@@ -73,7 +73,7 @@ use query::query_engine::DescribeResult;
|
||||
use query::QueryEngineRef;
|
||||
use servers::error::{
|
||||
self as server_error, AuthSnafu, CommonMetaSnafu, ExecuteQuerySnafu,
|
||||
OtlpMetricModeIncompatibleSnafu, ParsePromQLSnafu,
|
||||
OtlpMetricModeIncompatibleSnafu, ParsePromQLSnafu, UnexpectedResultSnafu,
|
||||
};
|
||||
use servers::interceptor::{
|
||||
PromQueryInterceptor, PromQueryInterceptorRef, SqlQueryInterceptor, SqlQueryInterceptorRef,
|
||||
@@ -159,6 +159,7 @@ impl Instance {
|
||||
kv_state_store.clone(),
|
||||
kv_state_store,
|
||||
Some(runtime_switch_manager),
|
||||
None,
|
||||
));
|
||||
|
||||
Ok((kv_backend, procedure_manager))
|
||||
@@ -220,36 +221,40 @@ impl Instance {
|
||||
let query_interceptor = self.plugins.get::<SqlQueryInterceptorRef<Error>>();
|
||||
let query_interceptor = query_interceptor.as_ref();
|
||||
|
||||
let _slow_query_timer = if let Some(recorder) = &self.slow_query_recorder {
|
||||
recorder.start(QueryStatement::Sql(stmt.clone()), query_ctx.clone())
|
||||
if should_capture_statement(Some(&stmt)) {
|
||||
let slow_query_timer = self.slow_query_recorder.as_ref().and_then(|recorder| {
|
||||
recorder.start(CatalogQueryStatement::Sql(stmt.clone()), query_ctx.clone())
|
||||
});
|
||||
|
||||
let ticket = self.process_manager.register_query(
|
||||
query_ctx.current_catalog().to_string(),
|
||||
vec![query_ctx.current_schema()],
|
||||
stmt.to_string(),
|
||||
query_ctx.conn_info().to_string(),
|
||||
Some(query_ctx.process_id()),
|
||||
slow_query_timer,
|
||||
);
|
||||
|
||||
let query_fut = self.exec_statement_with_timeout(stmt, query_ctx, query_interceptor);
|
||||
|
||||
CancellableFuture::new(query_fut, ticket.cancellation_handle.clone())
|
||||
.await
|
||||
.map_err(|_| error::CancelledSnafu.build())?
|
||||
.map(|output| {
|
||||
let Output { meta, data } = output;
|
||||
|
||||
let data = match data {
|
||||
OutputData::Stream(stream) => OutputData::Stream(Box::pin(
|
||||
CancellableStreamWrapper::new(stream, ticket),
|
||||
)),
|
||||
other => other,
|
||||
};
|
||||
Output { data, meta }
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let ticket = self.process_manager.register_query(
|
||||
query_ctx.current_catalog().to_string(),
|
||||
vec![query_ctx.current_schema()],
|
||||
stmt.to_string(),
|
||||
query_ctx.conn_info().to_string(),
|
||||
Some(query_ctx.process_id()),
|
||||
);
|
||||
|
||||
let query_fut = self.exec_statement_with_timeout(stmt, query_ctx, query_interceptor);
|
||||
|
||||
CancellableFuture::new(query_fut, ticket.cancellation_handle.clone())
|
||||
.await
|
||||
.map_err(|_| error::CancelledSnafu.build())?
|
||||
.map(|output| {
|
||||
let Output { meta, data } = output;
|
||||
|
||||
let data = match data {
|
||||
OutputData::Stream(stream) => {
|
||||
OutputData::Stream(Box::pin(CancellableStreamWrapper::new(stream, ticket)))
|
||||
}
|
||||
other => other,
|
||||
};
|
||||
Output { data, meta }
|
||||
})
|
||||
self.exec_statement_with_timeout(stmt, query_ctx, query_interceptor)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
async fn exec_statement_with_timeout(
|
||||
@@ -571,13 +576,54 @@ impl SqlQueryHandler for Instance {
|
||||
}
|
||||
}
|
||||
|
||||
async fn do_exec_plan(&self, plan: LogicalPlan, query_ctx: QueryContextRef) -> Result<Output> {
|
||||
// plan should be prepared before exec
|
||||
// we'll do check there
|
||||
self.query_engine
|
||||
.execute(plan.clone(), query_ctx)
|
||||
.await
|
||||
.context(ExecLogicalPlanSnafu)
|
||||
async fn do_exec_plan(
|
||||
&self,
|
||||
stmt: Option<Statement>,
|
||||
plan: LogicalPlan,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<Output> {
|
||||
if should_capture_statement(stmt.as_ref()) {
|
||||
// It's safe to unwrap here because we've already checked the type.
|
||||
let stmt = stmt.unwrap();
|
||||
let query = stmt.to_string();
|
||||
let slow_query_timer = self.slow_query_recorder.as_ref().and_then(|recorder| {
|
||||
recorder.start(CatalogQueryStatement::Sql(stmt), query_ctx.clone())
|
||||
});
|
||||
|
||||
let ticket = self.process_manager.register_query(
|
||||
query_ctx.current_catalog().to_string(),
|
||||
vec![query_ctx.current_schema()],
|
||||
query,
|
||||
query_ctx.conn_info().to_string(),
|
||||
Some(query_ctx.process_id()),
|
||||
slow_query_timer,
|
||||
);
|
||||
|
||||
let query_fut = self.query_engine.execute(plan.clone(), query_ctx);
|
||||
|
||||
CancellableFuture::new(query_fut, ticket.cancellation_handle.clone())
|
||||
.await
|
||||
.map_err(|_| error::CancelledSnafu.build())?
|
||||
.map(|output| {
|
||||
let Output { meta, data } = output;
|
||||
|
||||
let data = match data {
|
||||
OutputData::Stream(stream) => OutputData::Stream(Box::pin(
|
||||
CancellableStreamWrapper::new(stream, ticket),
|
||||
)),
|
||||
other => other,
|
||||
};
|
||||
Output { data, meta }
|
||||
})
|
||||
.context(ExecLogicalPlanSnafu)
|
||||
} else {
|
||||
// plan should be prepared before exec
|
||||
// we'll do check there
|
||||
self.query_engine
|
||||
.execute(plan.clone(), query_ctx)
|
||||
.await
|
||||
.context(ExecLogicalPlanSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
@@ -671,12 +717,6 @@ impl PrometheusHandler for Instance {
|
||||
}
|
||||
})?;
|
||||
|
||||
let _slow_query_timer = if let Some(recorder) = &self.slow_query_recorder {
|
||||
recorder.start(stmt.clone(), query_ctx.clone())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let plan = self
|
||||
.statement_executor
|
||||
.plan(&stmt, query_ctx.clone())
|
||||
@@ -686,10 +726,47 @@ impl PrometheusHandler for Instance {
|
||||
|
||||
interceptor.pre_execute(query, Some(&plan), query_ctx.clone())?;
|
||||
|
||||
let output = self
|
||||
.statement_executor
|
||||
.exec_plan(plan, query_ctx.clone())
|
||||
// Take the EvalStmt from the original QueryStatement and use it to create the CatalogQueryStatement.
|
||||
let query_statement = if let QueryStatement::Promql(eval_stmt) = stmt {
|
||||
CatalogQueryStatement::Promql(eval_stmt)
|
||||
} else {
|
||||
// It should not happen since the query is already parsed successfully.
|
||||
return UnexpectedResultSnafu {
|
||||
reason: "The query should always be promql.".to_string(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
let query = query_statement.to_string();
|
||||
|
||||
let slow_query_timer = self
|
||||
.slow_query_recorder
|
||||
.as_ref()
|
||||
.and_then(|recorder| recorder.start(query_statement, query_ctx.clone()));
|
||||
|
||||
let ticket = self.process_manager.register_query(
|
||||
query_ctx.current_catalog().to_string(),
|
||||
vec![query_ctx.current_schema()],
|
||||
query,
|
||||
query_ctx.conn_info().to_string(),
|
||||
Some(query_ctx.process_id()),
|
||||
slow_query_timer,
|
||||
);
|
||||
|
||||
let query_fut = self.statement_executor.exec_plan(plan, query_ctx.clone());
|
||||
|
||||
let output = CancellableFuture::new(query_fut, ticket.cancellation_handle.clone())
|
||||
.await
|
||||
.map_err(|_| servers::error::CancelledSnafu.build())?
|
||||
.map(|output| {
|
||||
let Output { meta, data } = output;
|
||||
let data = match data {
|
||||
OutputData::Stream(stream) => {
|
||||
OutputData::Stream(Box::pin(CancellableStreamWrapper::new(stream, ticket)))
|
||||
}
|
||||
other => other,
|
||||
};
|
||||
Output { data, meta }
|
||||
})
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExecuteQuerySnafu)?;
|
||||
|
||||
@@ -913,6 +990,15 @@ fn validate_database(name: &ObjectName, query_ctx: &QueryContextRef) -> Result<(
|
||||
.context(SqlExecInterceptedSnafu)
|
||||
}
|
||||
|
||||
// Create a query ticket and slow query timer if the statement is a query or readonly statement.
|
||||
fn should_capture_statement(stmt: Option<&Statement>) -> bool {
|
||||
if let Some(stmt) = stmt {
|
||||
matches!(stmt, Statement::Query(_)) || stmt.is_readonly()
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::HashMap;
|
||||
|
||||
@@ -20,11 +20,11 @@ use catalog::CatalogManagerRef;
|
||||
use common_base::Plugins;
|
||||
use common_meta::cache::{LayeredCacheRegistryRef, TableRouteCacheRef};
|
||||
use common_meta::cache_invalidator::{CacheInvalidatorRef, DummyCacheInvalidator};
|
||||
use common_meta::ddl::ProcedureExecutorRef;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::node_manager::NodeManagerRef;
|
||||
use common_meta::procedure_executor::ProcedureExecutorRef;
|
||||
use dashmap::DashMap;
|
||||
use operator::delete::Deleter;
|
||||
use operator::flow::FlowServiceOperator;
|
||||
@@ -158,7 +158,8 @@ impl FrontendBuilder {
|
||||
self.catalog_manager.clone(),
|
||||
));
|
||||
|
||||
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(kv_backend.clone()));
|
||||
let flow_metadata_manager: Arc<FlowMetadataManager> =
|
||||
Arc::new(FlowMetadataManager::new(kv_backend.clone()));
|
||||
let flow_service = FlowServiceOperator::new(flow_metadata_manager, node_manager.clone());
|
||||
|
||||
let query_engine = QueryEngineFactory::new_with_plugins(
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user