mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-03 20:02:54 +00:00
Compare commits
10 Commits
v0.10.2
...
v0.11.0-ni
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c049ce6ab1 | ||
|
|
6308e86e21 | ||
|
|
36263830bb | ||
|
|
d931389a4c | ||
|
|
8bdef776b3 | ||
|
|
91e933517a | ||
|
|
a617e0dbef | ||
|
|
6130c70b63 | ||
|
|
fae141ad0a | ||
|
|
57f31d14c8 |
@@ -7,6 +7,8 @@
|
||||
* [NiwakaDev](https://github.com/NiwakaDev)
|
||||
* [etolbakov](https://github.com/etolbakov)
|
||||
* [irenjj](https://github.com/irenjj)
|
||||
* [tisonkun](https://github.com/tisonkun)
|
||||
* [Lanqing Yang](https://github.com/lyang24)
|
||||
|
||||
## Team Members (in alphabetical order)
|
||||
|
||||
@@ -30,7 +32,6 @@
|
||||
* [shuiyisong](https://github.com/shuiyisong)
|
||||
* [sunchanglong](https://github.com/sunchanglong)
|
||||
* [sunng87](https://github.com/sunng87)
|
||||
* [tisonkun](https://github.com/tisonkun)
|
||||
* [v0y4g3r](https://github.com/v0y4g3r)
|
||||
* [waynexia](https://github.com/waynexia)
|
||||
* [xtang](https://github.com/xtang)
|
||||
|
||||
164
Cargo.lock
generated
164
Cargo.lock
generated
@@ -74,26 +74,6 @@ dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "aide"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "befdff0b4683a0824fc8719ce639a252d9d62cd89c8d0004c39e2417128c1eb8"
|
||||
dependencies = [
|
||||
"axum",
|
||||
"bytes",
|
||||
"cfg-if",
|
||||
"http 0.2.12",
|
||||
"indexmap 1.9.3",
|
||||
"schemars",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"thiserror",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "alloc-no-stdlib"
|
||||
version = "2.0.4"
|
||||
@@ -208,7 +188,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
|
||||
|
||||
[[package]]
|
||||
name = "api"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-decimal",
|
||||
@@ -769,7 +749,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "auth"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -1379,7 +1359,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cache"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"catalog",
|
||||
"common-error",
|
||||
@@ -1387,7 +1367,7 @@ dependencies = [
|
||||
"common-meta",
|
||||
"moka",
|
||||
"snafu 0.8.5",
|
||||
"substrait 0.10.1",
|
||||
"substrait 0.11.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1414,7 +1394,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "catalog"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow",
|
||||
@@ -1753,7 +1733,7 @@ checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97"
|
||||
|
||||
[[package]]
|
||||
name = "client"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -1782,7 +1762,7 @@ dependencies = [
|
||||
"rand",
|
||||
"serde_json",
|
||||
"snafu 0.8.5",
|
||||
"substrait 0.10.1",
|
||||
"substrait 0.11.0",
|
||||
"substrait 0.37.3",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -1823,7 +1803,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cmd"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"auth",
|
||||
@@ -1882,7 +1862,7 @@ dependencies = [
|
||||
"similar-asserts",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.10.1",
|
||||
"substrait 0.11.0",
|
||||
"table",
|
||||
"temp-env",
|
||||
"tempfile",
|
||||
@@ -1928,7 +1908,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
|
||||
|
||||
[[package]]
|
||||
name = "common-base"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"async-trait",
|
||||
@@ -1949,7 +1929,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-catalog"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"common-error",
|
||||
@@ -1960,7 +1940,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-config"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -1983,7 +1963,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-datasource"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-schema",
|
||||
@@ -2020,7 +2000,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-decimal"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"bigdecimal 0.4.5",
|
||||
"common-error",
|
||||
@@ -2033,7 +2013,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-error"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"snafu 0.8.5",
|
||||
"strum 0.25.0",
|
||||
@@ -2042,7 +2022,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-frontend"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -2057,7 +2037,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-function"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"approx 0.5.1",
|
||||
@@ -2102,7 +2082,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-greptimedb-telemetry"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-runtime",
|
||||
@@ -2119,7 +2099,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -2145,7 +2125,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc-expr"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"common-base",
|
||||
@@ -2164,7 +2144,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-macro"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-query",
|
||||
@@ -2178,7 +2158,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-mem-prof"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2191,7 +2171,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-meta"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"api",
|
||||
@@ -2248,7 +2228,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-options"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"common-grpc",
|
||||
"humantime-serde",
|
||||
@@ -2257,11 +2237,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-plugins"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
|
||||
[[package]]
|
||||
name = "common-pprof"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2273,7 +2253,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -2300,7 +2280,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure-test"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-procedure",
|
||||
@@ -2308,7 +2288,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-query"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -2334,7 +2314,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-recordbatch"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-error",
|
||||
@@ -2353,7 +2333,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-runtime"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.19",
|
||||
@@ -2383,7 +2363,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-telemetry"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"atty",
|
||||
"backtrace",
|
||||
@@ -2411,7 +2391,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-test-util"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"client",
|
||||
"common-query",
|
||||
@@ -2423,7 +2403,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-time"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"chrono",
|
||||
@@ -2439,18 +2419,17 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-version"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"build-data",
|
||||
"const_format",
|
||||
"schemars",
|
||||
"serde",
|
||||
"shadow-rs",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "common-wal"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -3259,7 +3238,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datanode"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -3309,7 +3288,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.10.1",
|
||||
"substrait 0.11.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"toml 0.8.19",
|
||||
@@ -3318,7 +3297,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datatypes"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -3936,7 +3915,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "file-engine"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -4053,7 +4032,7 @@ checksum = "8bf7cc16383c4b8d58b9905a8509f02926ce3058053c056376248d958c9df1e8"
|
||||
|
||||
[[package]]
|
||||
name = "flow"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow",
|
||||
@@ -4110,7 +4089,7 @@ dependencies = [
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"strum 0.25.0",
|
||||
"substrait 0.10.1",
|
||||
"substrait 0.11.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tonic 0.11.0",
|
||||
@@ -4172,7 +4151,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
|
||||
|
||||
[[package]]
|
||||
name = "frontend"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -5312,7 +5291,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "index"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"asynchronous-codec",
|
||||
@@ -6156,7 +6135,7 @@ checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
|
||||
|
||||
[[package]]
|
||||
name = "log-store"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -6486,7 +6465,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-client"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -6513,7 +6492,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-srv"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -6592,7 +6571,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "metric-engine"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -6695,7 +6674,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mito2"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -7459,7 +7438,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "object-store"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bytes",
|
||||
@@ -7750,7 +7729,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "operator"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-stream",
|
||||
@@ -7797,7 +7776,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlparser 0.45.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=54a267ac89c09b11c0c88934690530807185d3e7)",
|
||||
"store-api",
|
||||
"substrait 0.10.1",
|
||||
"substrait 0.11.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -8047,7 +8026,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "partition"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -8348,7 +8327,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "pipeline"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -8511,7 +8490,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "plugins"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"auth",
|
||||
"common-base",
|
||||
@@ -8785,7 +8764,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "promql"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"async-trait",
|
||||
@@ -9023,7 +9002,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "puffin"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"async-compression 0.4.13",
|
||||
"async-trait",
|
||||
@@ -9147,7 +9126,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "query"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -9214,7 +9193,7 @@ dependencies = [
|
||||
"stats-cli",
|
||||
"store-api",
|
||||
"streaming-stats",
|
||||
"substrait 0.10.1",
|
||||
"substrait 0.11.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -10651,7 +10630,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09c024468a378b7e36765cd36702b7a90cc3cba11654f6685c8f233408e89e92"
|
||||
dependencies = [
|
||||
"dyn-clone",
|
||||
"indexmap 1.9.3",
|
||||
"schemars_derive",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -10677,7 +10655,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "script"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -10971,10 +10949,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "servers"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"aide",
|
||||
"api",
|
||||
"arrow",
|
||||
"arrow-flight",
|
||||
@@ -11056,7 +11033,6 @@ dependencies = [
|
||||
"rustls 0.23.13",
|
||||
"rustls-pemfile 2.2.0",
|
||||
"rustls-pki-types",
|
||||
"schemars",
|
||||
"script",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -11086,7 +11062,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "session"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -11432,7 +11408,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sql"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"chrono",
|
||||
@@ -11495,7 +11471,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlness-runner"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.19",
|
||||
@@ -11715,7 +11691,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "store-api"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -11886,7 +11862,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "substrait"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
@@ -12085,7 +12061,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "table"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -12351,7 +12327,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
|
||||
|
||||
[[package]]
|
||||
name = "tests-fuzz"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"arbitrary",
|
||||
"async-trait",
|
||||
@@ -12393,7 +12369,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tests-integration"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -12457,7 +12433,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlx",
|
||||
"store-api",
|
||||
"substrait 0.10.1",
|
||||
"substrait 0.11.0",
|
||||
"table",
|
||||
"tempfile",
|
||||
"time",
|
||||
|
||||
@@ -66,7 +66,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -167,7 +167,6 @@ rstest = "0.21"
|
||||
rstest_reuse = "0.7"
|
||||
rust_decimal = "1.33"
|
||||
rustc-hash = "2.0"
|
||||
schemars = "0.8"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
||||
serde_with = "3"
|
||||
|
||||
62
README.md
62
README.md
@@ -56,7 +56,7 @@
|
||||
- [Project Status](#project-status)
|
||||
- [Join the community](#community)
|
||||
- [Contributing](#contributing)
|
||||
- [Extension](#extension )
|
||||
- [Tools & Extensions](#tools--extensions)
|
||||
- [License](#license)
|
||||
- [Acknowledgement](#acknowledgement)
|
||||
|
||||
@@ -66,31 +66,33 @@
|
||||
|
||||
## Why GreptimeDB
|
||||
|
||||
Our core developers have been building time-series data platforms for years. Based on our best-practices, GreptimeDB is born to give you:
|
||||
Our core developers have been building time-series data platforms for years. Based on our best practices, GreptimeDB was born to give you:
|
||||
|
||||
* **Unified all kinds of time series**
|
||||
* **Unified Processing of Metrics, Logs, and Events**
|
||||
|
||||
GreptimeDB treats all time series as contextual events with timestamp, and thus unifies the processing of metrics, logs, and events. It supports analyzing metrics, logs, and events with SQL and PromQL, and doing streaming with continuous aggregation.
|
||||
GreptimeDB unifies time series data processing by treating all data - whether metrics, logs, or events - as timestamped events with context. Users can analyze this data using either [SQL](https://docs.greptime.com/user-guide/query-data/sql) or [PromQL](https://docs.greptime.com/user-guide/query-data/promql) and leverage stream processing ([Flow](https://docs.greptime.com/user-guide/continuous-aggregation/overview)) to enable continuous aggregation. [Read more](https://docs.greptime.com/user-guide/concepts/data-model).
|
||||
|
||||
* **Cloud-Edge collaboration**
|
||||
* **Cloud-native Distributed Database**
|
||||
|
||||
GreptimeDB can be deployed on ARM architecture-compatible Android/Linux systems as well as cloud environments from various vendors. Both sides run the same software, providing identical APIs and control planes, so your application can run at the edge or on the cloud without modification, and data synchronization also becomes extremely easy and efficient.
|
||||
|
||||
* **Cloud-native distributed database**
|
||||
|
||||
By leveraging object storage (S3 and others), separating compute and storage, scaling stateless compute nodes arbitrarily, GreptimeDB implements seamless scalability. It also supports cross-cloud deployment with a built-in unified data access layer over different object storages.
|
||||
Built for [Kubernetes](https://docs.greptime.com/user-guide/deployments/deploy-on-kubernetes/greptimedb-operator-management). GreptimeDB achieves seamless scalability with its [cloud-native architecture](https://docs.greptime.com/user-guide/concepts/architecture) of separated compute and storage, built on object storage (AWS S3, Azure Blob Storage, etc.) while enabling cross-cloud deployment through a unified data access layer.
|
||||
|
||||
* **Performance and Cost-effective**
|
||||
|
||||
Flexible indexing capabilities and distributed, parallel-processing query engine, tackling high cardinality issues down. Optimized columnar layout for handling time-series data; compacted, compressed, and stored on various storage backends, particularly cloud object storage with 50x cost efficiency.
|
||||
Written in pure Rust for superior performance and reliability. GreptimeDB features a distributed query engine with intelligent indexing to handle high cardinality data efficiently. Its optimized columnar storage achieves 50x cost efficiency on cloud object storage through advanced compression. [Benchmark reports](https://www.greptime.com/blogs/2024-09-09-report-summary).
|
||||
|
||||
* **Compatible with InfluxDB, Prometheus and more protocols**
|
||||
* **Cloud-Edge Collaboration**
|
||||
|
||||
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, and Prometheus Remote Storage, etc. [Read more](https://docs.greptime.com/user-guide/protocols/overview).
|
||||
GreptimeDB seamlessly operates across cloud and edge (ARM/Android/Linux), providing consistent APIs and control plane for unified data management and efficient synchronization. [Learn how to run on Android](https://docs.greptime.com/user-guide/deployments/run-on-android/).
|
||||
|
||||
* **Multi-protocol Ingestion, SQL & PromQL Ready**
|
||||
|
||||
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, InfluxDB, OpenTelemetry, Loki and Prometheus, etc. Effortless Adoption & Seamless Migration. [Supported Protocols Overview](https://docs.greptime.com/user-guide/protocols/overview).
|
||||
|
||||
For more detailed info please read [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why-greptimedb).
|
||||
|
||||
## Try GreptimeDB
|
||||
|
||||
### 1. [GreptimePlay](https://greptime.com/playground)
|
||||
### 1. [Live Demo](https://greptime.com/playground)
|
||||
|
||||
Try out the features of GreptimeDB right from your browser.
|
||||
|
||||
@@ -109,9 +111,18 @@ docker pull greptime/greptimedb
|
||||
Start a GreptimeDB container with:
|
||||
|
||||
```shell
|
||||
docker run --rm --name greptime --net=host greptime/greptimedb standalone start
|
||||
docker run -p 127.0.0.1:4000-4003:4000-4003 \
|
||||
-v "$(pwd)/greptimedb:/tmp/greptimedb" \
|
||||
--name greptime --rm \
|
||||
greptime/greptimedb:latest standalone start \
|
||||
--http-addr 0.0.0.0:4000 \
|
||||
--rpc-addr 0.0.0.0:4001 \
|
||||
--mysql-addr 0.0.0.0:4002 \
|
||||
--postgres-addr 0.0.0.0:4003
|
||||
```
|
||||
|
||||
Access the dashboard via `http://localhost:4000/dashboard`.
|
||||
|
||||
Read more about [Installation](https://docs.greptime.com/getting-started/installation/overview) on docs.
|
||||
|
||||
## Getting Started
|
||||
@@ -141,7 +152,7 @@ Run a standalone server:
|
||||
cargo run -- standalone start
|
||||
```
|
||||
|
||||
## Extension
|
||||
## Tools & Extensions
|
||||
|
||||
### Dashboard
|
||||
|
||||
@@ -158,14 +169,19 @@ cargo run -- standalone start
|
||||
|
||||
### Grafana Dashboard
|
||||
|
||||
Our official Grafana dashboard is available at [grafana](grafana/README.md) directory.
|
||||
Our official Grafana dashboard for monitoring GreptimeDB is available at [grafana](grafana/README.md) directory.
|
||||
|
||||
## Project Status
|
||||
|
||||
The current version has not yet reached the standards for General Availability.
|
||||
According to our Greptime 2024 Roadmap, we aim to achieve a production-level version with the release of v1.0 by the end of 2024. [Join Us](https://github.com/GreptimeTeam/greptimedb/issues/3412)
|
||||
GreptimeDB is currently in Beta. We are targeting GA (General Availability) with v1.0 release by Early 2025.
|
||||
|
||||
We welcome you to test and use GreptimeDB. Some users have already adopted it in their production environments. If you're interested in trying it out, please use the latest stable release available.
|
||||
While in Beta, GreptimeDB is already:
|
||||
|
||||
* Being used in production by early adopters
|
||||
* Actively maintained with regular releases, [about version number](https://docs.greptime.com/nightly/reference/about-greptimedb-version)
|
||||
* Suitable for testing and evaluation
|
||||
|
||||
For production use, we recommend using the latest stable release.
|
||||
|
||||
## Community
|
||||
|
||||
@@ -184,12 +200,12 @@ In addition, you may:
|
||||
- Connect us with [Linkedin](https://www.linkedin.com/company/greptime/)
|
||||
- Follow us on [Twitter](https://twitter.com/greptime)
|
||||
|
||||
## Commerial Support
|
||||
## Commercial Support
|
||||
|
||||
If you are running GreptimeDB OSS in your organization, we offer additional
|
||||
enterprise addons, installation service, training and consulting. [Contact
|
||||
enterprise add-ons, installation services, training, and consulting. [Contact
|
||||
us](https://greptime.com/contactus) and we will reach out to you with more
|
||||
detail of our commerial license.
|
||||
detail of our commercial license.
|
||||
|
||||
## License
|
||||
|
||||
|
||||
@@ -109,6 +109,11 @@
|
||||
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.http_client` | -- | -- | The http client options to the storage.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.http_client.pool_max_idle_per_host` | Integer | `1024` | The maximum idle connection per host allowed in the pool. |
|
||||
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
|
||||
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
|
||||
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
|
||||
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
||||
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
||||
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
||||
@@ -432,6 +437,11 @@
|
||||
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.http_client` | -- | -- | The http client options to the storage.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.http_client.pool_max_idle_per_host` | Integer | `1024` | The maximum idle connection per host allowed in the pool. |
|
||||
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
|
||||
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
|
||||
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
|
||||
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
||||
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
||||
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
||||
|
||||
@@ -375,6 +375,23 @@ endpoint = "https://s3.amazonaws.com"
|
||||
## @toml2docs:none-default
|
||||
region = "us-west-2"
|
||||
|
||||
## The http client options to the storage.
|
||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||
[storage.http_client]
|
||||
|
||||
## The maximum idle connection per host allowed in the pool.
|
||||
pool_max_idle_per_host = 1024
|
||||
|
||||
## The timeout for only the connect phase of a http client.
|
||||
connect_timeout = "30s"
|
||||
|
||||
## The total request timeout, applied from when the request starts connecting until the response body has finished.
|
||||
## Also considered a total deadline.
|
||||
timeout = "30s"
|
||||
|
||||
## The timeout for idle sockets being kept-alive.
|
||||
pool_idle_timeout = "90s"
|
||||
|
||||
# Custom storage options
|
||||
# [[storage.providers]]
|
||||
# name = "S3"
|
||||
|
||||
@@ -413,6 +413,23 @@ endpoint = "https://s3.amazonaws.com"
|
||||
## @toml2docs:none-default
|
||||
region = "us-west-2"
|
||||
|
||||
## The http client options to the storage.
|
||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||
[storage.http_client]
|
||||
|
||||
## The maximum idle connection per host allowed in the pool.
|
||||
pool_max_idle_per_host = 1024
|
||||
|
||||
## The timeout for only the connect phase of a http client.
|
||||
connect_timeout = "30s"
|
||||
|
||||
## The total request timeout, applied from when the request starts connecting until the response body has finished.
|
||||
## Also considered a total deadline.
|
||||
timeout = "30s"
|
||||
|
||||
## The timeout for idle sockets being kept-alive.
|
||||
pool_idle_timeout = "90s"
|
||||
|
||||
# Custom storage options
|
||||
# [[storage.providers]]
|
||||
# name = "S3"
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub use client::{CachedMetaKvBackend, CachedMetaKvBackendBuilder, MetaKvBackend};
|
||||
pub use client::{CachedKvBackend, CachedKvBackendBuilder, MetaKvBackend};
|
||||
|
||||
mod client;
|
||||
mod manager;
|
||||
|
||||
@@ -22,6 +22,7 @@ use common_error::ext::BoxedError;
|
||||
use common_meta::cache_invalidator::KvCacheInvalidator;
|
||||
use common_meta::error::Error::CacheNotGet;
|
||||
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, GetKvCacheSnafu, Result};
|
||||
use common_meta::kv_backend::txn::{Txn, TxnResponse};
|
||||
use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService};
|
||||
use common_meta::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
@@ -42,20 +43,20 @@ const DEFAULT_CACHE_MAX_CAPACITY: u64 = 10000;
|
||||
const DEFAULT_CACHE_TTL: Duration = Duration::from_secs(10 * 60);
|
||||
const DEFAULT_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
|
||||
|
||||
pub struct CachedMetaKvBackendBuilder {
|
||||
pub struct CachedKvBackendBuilder {
|
||||
cache_max_capacity: Option<u64>,
|
||||
cache_ttl: Option<Duration>,
|
||||
cache_tti: Option<Duration>,
|
||||
meta_client: Arc<MetaClient>,
|
||||
inner: KvBackendRef,
|
||||
}
|
||||
|
||||
impl CachedMetaKvBackendBuilder {
|
||||
pub fn new(meta_client: Arc<MetaClient>) -> Self {
|
||||
impl CachedKvBackendBuilder {
|
||||
pub fn new(inner: KvBackendRef) -> Self {
|
||||
Self {
|
||||
cache_max_capacity: None,
|
||||
cache_ttl: None,
|
||||
cache_tti: None,
|
||||
meta_client,
|
||||
inner,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,7 +75,7 @@ impl CachedMetaKvBackendBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> CachedMetaKvBackend {
|
||||
pub fn build(self) -> CachedKvBackend {
|
||||
let cache_max_capacity = self
|
||||
.cache_max_capacity
|
||||
.unwrap_or(DEFAULT_CACHE_MAX_CAPACITY);
|
||||
@@ -85,14 +86,11 @@ impl CachedMetaKvBackendBuilder {
|
||||
.time_to_live(cache_ttl)
|
||||
.time_to_idle(cache_tti)
|
||||
.build();
|
||||
|
||||
let kv_backend = Arc::new(MetaKvBackend {
|
||||
client: self.meta_client,
|
||||
});
|
||||
let kv_backend = self.inner;
|
||||
let name = format!("CachedKvBackend({})", kv_backend.name());
|
||||
let version = AtomicUsize::new(0);
|
||||
|
||||
CachedMetaKvBackend {
|
||||
CachedKvBackend {
|
||||
kv_backend,
|
||||
cache,
|
||||
name,
|
||||
@@ -112,19 +110,29 @@ pub type CacheBackend = Cache<Vec<u8>, KeyValue>;
|
||||
/// Therefore, it is recommended to use CachedMetaKvBackend to only read metadata related
|
||||
/// information. Note: If you read other information, you may read expired data, which depends on
|
||||
/// TTL and TTI for cache.
|
||||
pub struct CachedMetaKvBackend {
|
||||
pub struct CachedKvBackend {
|
||||
kv_backend: KvBackendRef,
|
||||
cache: CacheBackend,
|
||||
name: String,
|
||||
version: AtomicUsize,
|
||||
}
|
||||
|
||||
impl TxnService for CachedMetaKvBackend {
|
||||
#[async_trait::async_trait]
|
||||
impl TxnService for CachedKvBackend {
|
||||
type Error = Error;
|
||||
|
||||
async fn txn(&self, txn: Txn) -> std::result::Result<TxnResponse, Self::Error> {
|
||||
// TODO(hl): txn of CachedKvBackend simply pass through to inner backend without invalidating caches.
|
||||
self.kv_backend.txn(txn).await
|
||||
}
|
||||
|
||||
fn max_txn_ops(&self) -> usize {
|
||||
self.kv_backend.max_txn_ops()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvBackend for CachedMetaKvBackend {
|
||||
impl KvBackend for CachedKvBackend {
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
@@ -305,7 +313,7 @@ impl KvBackend for CachedMetaKvBackend {
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvCacheInvalidator for CachedMetaKvBackend {
|
||||
impl KvCacheInvalidator for CachedKvBackend {
|
||||
async fn invalidate_key(&self, key: &[u8]) {
|
||||
self.create_new_version();
|
||||
self.cache.invalidate(key).await;
|
||||
@@ -313,7 +321,7 @@ impl KvCacheInvalidator for CachedMetaKvBackend {
|
||||
}
|
||||
}
|
||||
|
||||
impl CachedMetaKvBackend {
|
||||
impl CachedKvBackend {
|
||||
// only for test
|
||||
#[cfg(test)]
|
||||
fn wrap(kv_backend: KvBackendRef) -> Self {
|
||||
@@ -466,7 +474,7 @@ mod tests {
|
||||
use common_meta::rpc::KeyValue;
|
||||
use dashmap::DashMap;
|
||||
|
||||
use super::CachedMetaKvBackend;
|
||||
use super::CachedKvBackend;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct SimpleKvBackend {
|
||||
@@ -540,7 +548,7 @@ mod tests {
|
||||
async fn test_cached_kv_backend() {
|
||||
let simple_kv = Arc::new(SimpleKvBackend::default());
|
||||
let get_execute_times = simple_kv.get_execute_times.clone();
|
||||
let cached_kv = CachedMetaKvBackend::wrap(simple_kv);
|
||||
let cached_kv = CachedKvBackend::wrap(simple_kv);
|
||||
|
||||
add_some_vals(&cached_kv).await;
|
||||
|
||||
|
||||
@@ -21,13 +21,14 @@ use cache::{
|
||||
TABLE_ROUTE_CACHE_NAME,
|
||||
};
|
||||
use catalog::kvbackend::{
|
||||
CachedMetaKvBackend, CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend,
|
||||
CachedKvBackend, CachedKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend,
|
||||
};
|
||||
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_base::Plugins;
|
||||
use common_config::Mode;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use common_telemetry::debug;
|
||||
@@ -258,8 +259,9 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
||||
.context(StartMetaClientSnafu)?;
|
||||
let meta_client = Arc::new(meta_client);
|
||||
|
||||
let cached_meta_backend =
|
||||
Arc::new(CachedMetaKvBackendBuilder::new(meta_client.clone()).build());
|
||||
let cached_meta_backend = Arc::new(
|
||||
CachedKvBackendBuilder::new(Arc::new(MetaKvBackend::new(meta_client.clone()))).build(),
|
||||
);
|
||||
let layered_cache_builder = LayeredCacheRegistryBuilder::default().add_cache_registry(
|
||||
CacheRegistryBuilder::default()
|
||||
.add_cache(cached_meta_backend.clone())
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||
use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||
use clap::Parser;
|
||||
use client::client_manager::NodeClients;
|
||||
use common_base::Plugins;
|
||||
@@ -246,11 +246,12 @@ impl StartCommand {
|
||||
let cache_tti = meta_config.metadata_cache_tti;
|
||||
|
||||
// TODO(discord9): add helper function to ease the creation of cache registry&such
|
||||
let cached_meta_backend = CachedMetaKvBackendBuilder::new(meta_client.clone())
|
||||
.cache_max_capacity(cache_max_capacity)
|
||||
.cache_ttl(cache_ttl)
|
||||
.cache_tti(cache_tti)
|
||||
.build();
|
||||
let cached_meta_backend =
|
||||
CachedKvBackendBuilder::new(Arc::new(MetaKvBackend::new(meta_client.clone())))
|
||||
.cache_max_capacity(cache_max_capacity)
|
||||
.cache_ttl(cache_ttl)
|
||||
.cache_tti(cache_tti)
|
||||
.build();
|
||||
let cached_meta_backend = Arc::new(cached_meta_backend);
|
||||
|
||||
// Builds cache registry
|
||||
|
||||
@@ -17,7 +17,7 @@ use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||
use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||
use clap::Parser;
|
||||
use client::client_manager::NodeClients;
|
||||
use common_base::Plugins;
|
||||
@@ -293,11 +293,12 @@ impl StartCommand {
|
||||
.context(MetaClientInitSnafu)?;
|
||||
|
||||
// TODO(discord9): add helper function to ease the creation of cache registry&such
|
||||
let cached_meta_backend = CachedMetaKvBackendBuilder::new(meta_client.clone())
|
||||
.cache_max_capacity(cache_max_capacity)
|
||||
.cache_ttl(cache_ttl)
|
||||
.cache_tti(cache_tti)
|
||||
.build();
|
||||
let cached_meta_backend =
|
||||
CachedKvBackendBuilder::new(Arc::new(MetaKvBackend::new(meta_client.clone())))
|
||||
.cache_max_capacity(cache_max_capacity)
|
||||
.cache_ttl(cache_ttl)
|
||||
.cache_tti(cache_tti)
|
||||
.build();
|
||||
let cached_meta_backend = Arc::new(cached_meta_backend);
|
||||
|
||||
// Builds cache registry
|
||||
|
||||
@@ -8,11 +8,10 @@ license.workspace = true
|
||||
workspace = true
|
||||
|
||||
[features]
|
||||
codec = ["dep:serde", "dep:schemars"]
|
||||
codec = ["dep:serde"]
|
||||
|
||||
[dependencies]
|
||||
const_format = "0.2"
|
||||
schemars = { workspace = true, optional = true }
|
||||
serde = { workspace = true, optional = true }
|
||||
shadow-rs.workspace = true
|
||||
|
||||
|
||||
@@ -49,10 +49,7 @@ impl Display for BuildInfo {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
#[cfg_attr(
|
||||
feature = "codec",
|
||||
derive(serde::Serialize, serde::Deserialize, schemars::JsonSchema)
|
||||
)]
|
||||
#[cfg_attr(feature = "codec", derive(serde::Serialize, serde::Deserialize))]
|
||||
pub struct OwnedBuildInfo {
|
||||
pub branch: String,
|
||||
pub commit: String,
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
|
||||
//! Datanode configurations
|
||||
|
||||
use core::time::Duration;
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_base::secrets::{ExposeSecret, SecretString};
|
||||
use common_config::Configurable;
|
||||
@@ -112,6 +114,38 @@ pub struct ObjectStorageCacheConfig {
|
||||
pub cache_capacity: Option<ReadableSize>,
|
||||
}
|
||||
|
||||
/// The http client options to the storage.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(default)]
|
||||
pub struct HttpClientConfig {
|
||||
/// The maximum idle connection per host allowed in the pool.
|
||||
pub(crate) pool_max_idle_per_host: u32,
|
||||
|
||||
/// The timeout for only the connect phase of a http client.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub(crate) connect_timeout: Duration,
|
||||
|
||||
/// The total request timeout, applied from when the request starts connecting until the response body has finished.
|
||||
/// Also considered a total deadline.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub(crate) timeout: Duration,
|
||||
|
||||
/// The timeout for idle sockets being kept-alive.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub(crate) pool_idle_timeout: Duration,
|
||||
}
|
||||
|
||||
impl Default for HttpClientConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
pool_max_idle_per_host: 1024,
|
||||
connect_timeout: Duration::from_secs(30),
|
||||
timeout: Duration::from_secs(30),
|
||||
pool_idle_timeout: Duration::from_secs(90),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct S3Config {
|
||||
@@ -126,6 +160,7 @@ pub struct S3Config {
|
||||
pub region: Option<String>,
|
||||
#[serde(flatten)]
|
||||
pub cache: ObjectStorageCacheConfig,
|
||||
pub http_client: HttpClientConfig,
|
||||
}
|
||||
|
||||
impl PartialEq for S3Config {
|
||||
@@ -138,6 +173,7 @@ impl PartialEq for S3Config {
|
||||
&& self.endpoint == other.endpoint
|
||||
&& self.region == other.region
|
||||
&& self.cache == other.cache
|
||||
&& self.http_client == other.http_client
|
||||
}
|
||||
}
|
||||
|
||||
@@ -154,6 +190,7 @@ pub struct OssConfig {
|
||||
pub endpoint: String,
|
||||
#[serde(flatten)]
|
||||
pub cache: ObjectStorageCacheConfig,
|
||||
pub http_client: HttpClientConfig,
|
||||
}
|
||||
|
||||
impl PartialEq for OssConfig {
|
||||
@@ -165,6 +202,7 @@ impl PartialEq for OssConfig {
|
||||
&& self.access_key_secret.expose_secret() == other.access_key_secret.expose_secret()
|
||||
&& self.endpoint == other.endpoint
|
||||
&& self.cache == other.cache
|
||||
&& self.http_client == other.http_client
|
||||
}
|
||||
}
|
||||
|
||||
@@ -182,6 +220,7 @@ pub struct AzblobConfig {
|
||||
pub sas_token: Option<String>,
|
||||
#[serde(flatten)]
|
||||
pub cache: ObjectStorageCacheConfig,
|
||||
pub http_client: HttpClientConfig,
|
||||
}
|
||||
|
||||
impl PartialEq for AzblobConfig {
|
||||
@@ -194,6 +233,7 @@ impl PartialEq for AzblobConfig {
|
||||
&& self.endpoint == other.endpoint
|
||||
&& self.sas_token == other.sas_token
|
||||
&& self.cache == other.cache
|
||||
&& self.http_client == other.http_client
|
||||
}
|
||||
}
|
||||
|
||||
@@ -211,6 +251,7 @@ pub struct GcsConfig {
|
||||
pub endpoint: String,
|
||||
#[serde(flatten)]
|
||||
pub cache: ObjectStorageCacheConfig,
|
||||
pub http_client: HttpClientConfig,
|
||||
}
|
||||
|
||||
impl PartialEq for GcsConfig {
|
||||
@@ -223,6 +264,7 @@ impl PartialEq for GcsConfig {
|
||||
&& self.credential.expose_secret() == other.credential.expose_secret()
|
||||
&& self.endpoint == other.endpoint
|
||||
&& self.cache == other.cache
|
||||
&& self.http_client == other.http_client
|
||||
}
|
||||
}
|
||||
|
||||
@@ -237,6 +279,7 @@ impl Default for S3Config {
|
||||
endpoint: Option::default(),
|
||||
region: Option::default(),
|
||||
cache: ObjectStorageCacheConfig::default(),
|
||||
http_client: HttpClientConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -251,6 +294,7 @@ impl Default for OssConfig {
|
||||
access_key_secret: SecretString::from(String::default()),
|
||||
endpoint: String::default(),
|
||||
cache: ObjectStorageCacheConfig::default(),
|
||||
http_client: HttpClientConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -266,6 +310,7 @@ impl Default for AzblobConfig {
|
||||
endpoint: String::default(),
|
||||
sas_token: Option::default(),
|
||||
cache: ObjectStorageCacheConfig::default(),
|
||||
http_client: HttpClientConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -281,6 +326,7 @@ impl Default for GcsConfig {
|
||||
credential: SecretString::from(String::default()),
|
||||
endpoint: String::default(),
|
||||
cache: ObjectStorageCacheConfig::default(),
|
||||
http_client: HttpClientConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use catalog::kvbackend::CachedKvBackendBuilder;
|
||||
use catalog::memory::MemoryCatalogManager;
|
||||
use common_base::Plugins;
|
||||
use common_error::ext::BoxedError;
|
||||
@@ -208,7 +209,10 @@ impl DatanodeBuilder {
|
||||
(Box::new(NoopRegionServerEventListener) as _, None)
|
||||
};
|
||||
|
||||
let schema_metadata_manager = Arc::new(SchemaMetadataManager::new(kv_backend.clone()));
|
||||
let cached_kv_backend = Arc::new(CachedKvBackendBuilder::new(kv_backend.clone()).build());
|
||||
|
||||
let schema_metadata_manager =
|
||||
Arc::new(SchemaMetadataManager::new(cached_kv_backend.clone()));
|
||||
let region_server = self
|
||||
.new_region_server(schema_metadata_manager, region_event_listener)
|
||||
.await?;
|
||||
@@ -239,7 +243,15 @@ impl DatanodeBuilder {
|
||||
}
|
||||
|
||||
let heartbeat_task = if let Some(meta_client) = meta_client {
|
||||
Some(HeartbeatTask::try_new(&self.opts, region_server.clone(), meta_client).await?)
|
||||
Some(
|
||||
HeartbeatTask::try_new(
|
||||
&self.opts,
|
||||
region_server.clone(),
|
||||
meta_client,
|
||||
cached_kv_backend,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
@@ -18,6 +18,7 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::meta::{HeartbeatRequest, NodeInfo, Peer, RegionRole, RegionStat};
|
||||
use catalog::kvbackend::CachedKvBackend;
|
||||
use common_meta::datanode::REGION_STATISTIC_KEY;
|
||||
use common_meta::distributed_time_constants::META_KEEP_ALIVE_INTERVAL_SECS;
|
||||
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||
@@ -39,6 +40,7 @@ use crate::alive_keeper::RegionAliveKeeper;
|
||||
use crate::config::DatanodeOptions;
|
||||
use crate::error::{self, MetaClientInitSnafu, Result};
|
||||
use crate::event_listener::RegionServerEventReceiver;
|
||||
use crate::heartbeat::handler::cache_invalidator::InvalidateSchemaCacheHandler;
|
||||
use crate::metrics::{self, HEARTBEAT_RECV_COUNT, HEARTBEAT_SENT_COUNT};
|
||||
use crate::region_server::RegionServer;
|
||||
|
||||
@@ -70,6 +72,7 @@ impl HeartbeatTask {
|
||||
opts: &DatanodeOptions,
|
||||
region_server: RegionServer,
|
||||
meta_client: MetaClientRef,
|
||||
cache_kv_backend: Arc<CachedKvBackend>,
|
||||
) -> Result<Self> {
|
||||
let region_alive_keeper = Arc::new(RegionAliveKeeper::new(
|
||||
region_server.clone(),
|
||||
@@ -79,6 +82,7 @@ impl HeartbeatTask {
|
||||
region_alive_keeper.clone(),
|
||||
Arc::new(ParseMailboxMessageHandler),
|
||||
Arc::new(RegionHeartbeatResponseHandler::new(region_server.clone())),
|
||||
Arc::new(InvalidateSchemaCacheHandler::new(cache_kv_backend)),
|
||||
]));
|
||||
|
||||
Ok(Self {
|
||||
|
||||
@@ -24,6 +24,7 @@ use futures::future::BoxFuture;
|
||||
use snafu::OptionExt;
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
pub(crate) mod cache_invalidator;
|
||||
mod close_region;
|
||||
mod downgrade_region;
|
||||
mod open_region;
|
||||
@@ -134,7 +135,7 @@ impl HeartbeatResponseHandler for RegionHeartbeatResponseHandler {
|
||||
}
|
||||
});
|
||||
|
||||
Ok(HandleControl::Done)
|
||||
Ok(HandleControl::Continue)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -285,7 +286,7 @@ mod tests {
|
||||
|
||||
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
|
||||
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
|
||||
assert_matches!(control, HandleControl::Done);
|
||||
assert_matches!(control, HandleControl::Continue);
|
||||
|
||||
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
|
||||
|
||||
@@ -340,7 +341,7 @@ mod tests {
|
||||
|
||||
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
|
||||
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
|
||||
assert_matches!(control, HandleControl::Done);
|
||||
assert_matches!(control, HandleControl::Continue);
|
||||
|
||||
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
|
||||
|
||||
@@ -373,7 +374,7 @@ mod tests {
|
||||
|
||||
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
|
||||
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
|
||||
assert_matches!(control, HandleControl::Done);
|
||||
assert_matches!(control, HandleControl::Continue);
|
||||
|
||||
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
|
||||
|
||||
@@ -420,7 +421,7 @@ mod tests {
|
||||
|
||||
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
|
||||
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
|
||||
assert_matches!(control, HandleControl::Done);
|
||||
assert_matches!(control, HandleControl::Continue);
|
||||
|
||||
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
|
||||
|
||||
@@ -442,7 +443,7 @@ mod tests {
|
||||
});
|
||||
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
|
||||
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
|
||||
assert_matches!(control, HandleControl::Done);
|
||||
assert_matches!(control, HandleControl::Continue);
|
||||
|
||||
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
|
||||
|
||||
|
||||
167
src/datanode/src/heartbeat/handler/cache_invalidator.rs
Normal file
167
src/datanode/src/heartbeat/handler/cache_invalidator.rs
Normal file
@@ -0,0 +1,167 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Schema cache invalidator handler
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use catalog::kvbackend::CachedKvBackend;
|
||||
use common_meta::cache_invalidator::KvCacheInvalidator;
|
||||
use common_meta::heartbeat::handler::{
|
||||
HandleControl, HeartbeatResponseHandler, HeartbeatResponseHandlerContext,
|
||||
};
|
||||
use common_meta::instruction::{CacheIdent, Instruction};
|
||||
use common_meta::key::schema_name::SchemaNameKey;
|
||||
use common_meta::key::MetadataKey;
|
||||
use common_telemetry::debug;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct InvalidateSchemaCacheHandler {
|
||||
cached_kv_backend: Arc<CachedKvBackend>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl HeartbeatResponseHandler for InvalidateSchemaCacheHandler {
|
||||
fn is_acceptable(&self, ctx: &HeartbeatResponseHandlerContext) -> bool {
|
||||
matches!(
|
||||
ctx.incoming_message.as_ref(),
|
||||
Some((_, Instruction::InvalidateCaches(_)))
|
||||
)
|
||||
}
|
||||
|
||||
async fn handle(
|
||||
&self,
|
||||
ctx: &mut HeartbeatResponseHandlerContext,
|
||||
) -> common_meta::error::Result<HandleControl> {
|
||||
let Some((_, Instruction::InvalidateCaches(caches))) = ctx.incoming_message.take() else {
|
||||
unreachable!("InvalidateSchemaCacheHandler: should be guarded by 'is_acceptable'")
|
||||
};
|
||||
|
||||
debug!(
|
||||
"InvalidateSchemaCacheHandler: invalidating caches: {:?}",
|
||||
caches
|
||||
);
|
||||
|
||||
for cache in caches {
|
||||
let CacheIdent::SchemaName(schema_name) = cache else {
|
||||
continue;
|
||||
};
|
||||
let key: SchemaNameKey = (&schema_name).into();
|
||||
let key_bytes = key.to_bytes();
|
||||
// invalidate cache
|
||||
self.cached_kv_backend.invalidate_key(&key_bytes).await;
|
||||
}
|
||||
|
||||
Ok(HandleControl::Done)
|
||||
}
|
||||
}
|
||||
|
||||
impl InvalidateSchemaCacheHandler {
|
||||
pub fn new(cached_kv_backend: Arc<CachedKvBackend>) -> Self {
|
||||
Self { cached_kv_backend }
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::meta::HeartbeatResponse;
|
||||
use catalog::kvbackend::CachedKvBackendBuilder;
|
||||
use common_meta::heartbeat::handler::{
|
||||
HandlerGroupExecutor, HeartbeatResponseHandlerContext, HeartbeatResponseHandlerExecutor,
|
||||
};
|
||||
use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MessageMeta};
|
||||
use common_meta::instruction::{CacheIdent, Instruction};
|
||||
use common_meta::key::schema_name::{SchemaName, SchemaNameKey, SchemaNameValue};
|
||||
use common_meta::key::{MetadataKey, SchemaMetadataManager};
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use common_meta::kv_backend::KvBackend;
|
||||
use common_meta::rpc::store::PutRequest;
|
||||
|
||||
use crate::heartbeat::handler::cache_invalidator::InvalidateSchemaCacheHandler;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_invalidate_schema_cache_handler() {
|
||||
let inner_kv = Arc::new(MemoryKvBackend::default());
|
||||
let cached_kv = Arc::new(CachedKvBackendBuilder::new(inner_kv.clone()).build());
|
||||
let schema_metadata_manager = SchemaMetadataManager::new(cached_kv.clone());
|
||||
|
||||
let schema_name = "test_schema";
|
||||
let catalog_name = "test_catalog";
|
||||
schema_metadata_manager
|
||||
.register_region_table_info(
|
||||
1,
|
||||
"test_table",
|
||||
schema_name,
|
||||
catalog_name,
|
||||
Some(SchemaNameValue {
|
||||
ttl: Some(Duration::from_secs(1)),
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
|
||||
schema_metadata_manager
|
||||
.get_schema_options_by_table_id(1)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let schema_key = SchemaNameKey::new(catalog_name, schema_name).to_bytes();
|
||||
let new_schema_value = SchemaNameValue {
|
||||
ttl: Some(Duration::from_secs(3)),
|
||||
}
|
||||
.try_as_raw_value()
|
||||
.unwrap();
|
||||
inner_kv
|
||||
.put(PutRequest {
|
||||
key: schema_key.clone(),
|
||||
value: new_schema_value,
|
||||
prev_kv: false,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let executor = Arc::new(HandlerGroupExecutor::new(vec![Arc::new(
|
||||
InvalidateSchemaCacheHandler::new(cached_kv),
|
||||
)]));
|
||||
|
||||
let (tx, _) = tokio::sync::mpsc::channel(8);
|
||||
let mailbox = Arc::new(HeartbeatMailbox::new(tx));
|
||||
|
||||
// removes a valid key
|
||||
let response = HeartbeatResponse::default();
|
||||
let mut ctx: HeartbeatResponseHandlerContext =
|
||||
HeartbeatResponseHandlerContext::new(mailbox, response);
|
||||
ctx.incoming_message = Some((
|
||||
MessageMeta::new_test(1, "hi", "foo", "bar"),
|
||||
Instruction::InvalidateCaches(vec![CacheIdent::SchemaName(SchemaName {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
schema_name: schema_name.to_string(),
|
||||
})]),
|
||||
));
|
||||
executor.handle(ctx).await.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
Some(Duration::from_secs(3)),
|
||||
SchemaNameValue::try_from_raw_value(
|
||||
&inner_kv.get(&schema_key).await.unwrap().unwrap().value
|
||||
)
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.ttl
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -32,7 +32,7 @@ use object_store::util::{join_dir, normalize_dir, with_instrument_layers};
|
||||
use object_store::{Access, Error, HttpClient, ObjectStore, ObjectStoreBuilder};
|
||||
use snafu::prelude::*;
|
||||
|
||||
use crate::config::{ObjectStoreConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE};
|
||||
use crate::config::{HttpClientConfig, ObjectStoreConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE};
|
||||
use crate::error::{self, Result};
|
||||
|
||||
pub(crate) async fn new_raw_object_store(
|
||||
@@ -177,7 +177,7 @@ pub(crate) fn clean_temp_dir(dir: &str) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn build_http_client() -> Result<HttpClient> {
|
||||
pub(crate) fn build_http_client(config: &HttpClientConfig) -> Result<HttpClient> {
|
||||
let http_builder = {
|
||||
let mut builder = reqwest::ClientBuilder::new();
|
||||
|
||||
@@ -186,25 +186,28 @@ pub(crate) fn build_http_client() -> Result<HttpClient> {
|
||||
let pool_max_idle_per_host = env::var("_GREPTIMEDB_HTTP_POOL_MAX_IDLE_PER_HOST")
|
||||
.ok()
|
||||
.and_then(|v| v.parse::<usize>().ok())
|
||||
.unwrap_or(usize::MAX);
|
||||
.inspect(|_| warn!("'_GREPTIMEDB_HTTP_POOL_MAX_IDLE_PER_HOST' might be deprecated in the future. Please set it in the config file instead."))
|
||||
.unwrap_or(config.pool_max_idle_per_host as usize);
|
||||
builder = builder.pool_max_idle_per_host(pool_max_idle_per_host);
|
||||
|
||||
// Connect timeout default to 30s.
|
||||
let connect_timeout = env::var("_GREPTIMEDB_HTTP_CONNECT_TIMEOUT")
|
||||
.ok()
|
||||
.and_then(|v| v.parse::<u64>().ok())
|
||||
.unwrap_or(30);
|
||||
builder = builder.connect_timeout(Duration::from_secs(connect_timeout));
|
||||
.and_then(|v| v.parse::<u64>().ok().map(Duration::from_secs))
|
||||
.inspect(|_| warn!("'_GREPTIMEDB_HTTP_CONNECT_TIMEOUT' might be deprecated in the future. Please set it in the config file instead."))
|
||||
.unwrap_or(config.connect_timeout);
|
||||
builder = builder.connect_timeout(connect_timeout);
|
||||
|
||||
// Pool connection idle timeout default to 90s.
|
||||
let idle_timeout = env::var("_GREPTIMEDB_HTTP_POOL_IDLE_TIMEOUT")
|
||||
.ok()
|
||||
.and_then(|v| v.parse::<u64>().ok())
|
||||
.unwrap_or(90);
|
||||
.and_then(|v| v.parse::<u64>().ok().map(Duration::from_secs))
|
||||
.inspect(|_| warn!("'_GREPTIMEDB_HTTP_POOL_IDLE_TIMEOUT' might be deprecated in the future. Please set it in the config file instead."))
|
||||
.unwrap_or(config.pool_idle_timeout);
|
||||
|
||||
builder = builder.pool_idle_timeout(Duration::from_secs(idle_timeout));
|
||||
builder = builder.pool_idle_timeout(idle_timeout);
|
||||
|
||||
builder
|
||||
builder.timeout(config.timeout)
|
||||
};
|
||||
|
||||
HttpClient::build(http_builder).context(error::InitBackendSnafu)
|
||||
|
||||
@@ -30,13 +30,15 @@ pub(crate) async fn new_azblob_object_store(azblob_config: &AzblobConfig) -> Res
|
||||
azblob_config.container, &root
|
||||
);
|
||||
|
||||
let client = build_http_client(&azblob_config.http_client)?;
|
||||
|
||||
let mut builder = Azblob::default()
|
||||
.root(&root)
|
||||
.container(&azblob_config.container)
|
||||
.endpoint(&azblob_config.endpoint)
|
||||
.account_name(azblob_config.account_name.expose_secret())
|
||||
.account_key(azblob_config.account_key.expose_secret())
|
||||
.http_client(build_http_client()?);
|
||||
.http_client(client);
|
||||
|
||||
if let Some(token) = &azblob_config.sas_token {
|
||||
builder = builder.sas_token(token);
|
||||
|
||||
@@ -29,6 +29,8 @@ pub(crate) async fn new_gcs_object_store(gcs_config: &GcsConfig) -> Result<Objec
|
||||
gcs_config.bucket, &root
|
||||
);
|
||||
|
||||
let client = build_http_client(&gcs_config.http_client);
|
||||
|
||||
let builder = Gcs::default()
|
||||
.root(&root)
|
||||
.bucket(&gcs_config.bucket)
|
||||
@@ -36,7 +38,7 @@ pub(crate) async fn new_gcs_object_store(gcs_config: &GcsConfig) -> Result<Objec
|
||||
.credential_path(gcs_config.credential_path.expose_secret())
|
||||
.credential(gcs_config.credential.expose_secret())
|
||||
.endpoint(&gcs_config.endpoint)
|
||||
.http_client(build_http_client()?);
|
||||
.http_client(client?);
|
||||
|
||||
Ok(ObjectStore::new(builder)
|
||||
.context(error::InitBackendSnafu)?
|
||||
|
||||
@@ -29,13 +29,15 @@ pub(crate) async fn new_oss_object_store(oss_config: &OssConfig) -> Result<Objec
|
||||
oss_config.bucket, &root
|
||||
);
|
||||
|
||||
let client = build_http_client(&oss_config.http_client)?;
|
||||
|
||||
let builder = Oss::default()
|
||||
.root(&root)
|
||||
.bucket(&oss_config.bucket)
|
||||
.endpoint(&oss_config.endpoint)
|
||||
.access_key_id(oss_config.access_key_id.expose_secret())
|
||||
.access_key_secret(oss_config.access_key_secret.expose_secret())
|
||||
.http_client(build_http_client()?);
|
||||
.http_client(client);
|
||||
|
||||
Ok(ObjectStore::new(builder)
|
||||
.context(error::InitBackendSnafu)?
|
||||
|
||||
@@ -30,12 +30,14 @@ pub(crate) async fn new_s3_object_store(s3_config: &S3Config) -> Result<ObjectSt
|
||||
s3_config.bucket, &root
|
||||
);
|
||||
|
||||
let client = build_http_client(&s3_config.http_client)?;
|
||||
|
||||
let mut builder = S3::default()
|
||||
.root(&root)
|
||||
.bucket(&s3_config.bucket)
|
||||
.access_key_id(s3_config.access_key_id.expose_secret())
|
||||
.secret_access_key(s3_config.secret_access_key.expose_secret())
|
||||
.http_client(build_http_client()?);
|
||||
.http_client(client);
|
||||
|
||||
if s3_config.endpoint.is_some() {
|
||||
builder = builder.endpoint(s3_config.endpoint.as_ref().unwrap());
|
||||
|
||||
@@ -23,6 +23,7 @@ use common_error::ext::BoxedError;
|
||||
use common_telemetry::debug;
|
||||
use datafusion::config::ConfigOptions;
|
||||
use datafusion::error::DataFusionError;
|
||||
use datafusion::optimizer::analyzer::count_wildcard_rule::CountWildcardRule;
|
||||
use datafusion::optimizer::analyzer::type_coercion::TypeCoercion;
|
||||
use datafusion::optimizer::common_subexpr_eliminate::CommonSubexprEliminate;
|
||||
use datafusion::optimizer::optimize_projections::OptimizeProjections;
|
||||
@@ -59,6 +60,7 @@ pub async fn apply_df_optimizer(
|
||||
) -> Result<datafusion_expr::LogicalPlan, Error> {
|
||||
let cfg = ConfigOptions::new();
|
||||
let analyzer = Analyzer::with_rules(vec![
|
||||
Arc::new(CountWildcardRule::new()),
|
||||
Arc::new(AvgExpandRule::new()),
|
||||
Arc::new(TumbleExpandRule::new()),
|
||||
Arc::new(CheckGroupByRule::new()),
|
||||
|
||||
@@ -44,7 +44,7 @@ impl MetasrvCacheInvalidator {
|
||||
.clone()
|
||||
.unwrap_or_else(|| DEFAULT_SUBJECT.to_string());
|
||||
|
||||
let msg = &MailboxMessage::json_message(
|
||||
let mut msg = MailboxMessage::json_message(
|
||||
subject,
|
||||
&format!("Metasrv@{}", self.info.server_addr),
|
||||
"Frontend broadcast",
|
||||
@@ -54,22 +54,21 @@ impl MetasrvCacheInvalidator {
|
||||
.with_context(|_| meta_error::SerdeJsonSnafu)?;
|
||||
|
||||
self.mailbox
|
||||
.broadcast(&BroadcastChannel::Frontend, msg)
|
||||
.broadcast(&BroadcastChannel::Frontend, &msg)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(meta_error::ExternalSnafu)?;
|
||||
|
||||
let msg = &MailboxMessage::json_message(
|
||||
subject,
|
||||
&format!("Metasrv@{}", self.info.server_addr),
|
||||
"Flownode broadcast",
|
||||
common_time::util::current_time_millis(),
|
||||
&instruction,
|
||||
)
|
||||
.with_context(|_| meta_error::SerdeJsonSnafu)?;
|
||||
|
||||
msg.to = "Datanode broadcast".to_string();
|
||||
self.mailbox
|
||||
.broadcast(&BroadcastChannel::Flownode, msg)
|
||||
.broadcast(&BroadcastChannel::Datanode, &msg)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(meta_error::ExternalSnafu)?;
|
||||
|
||||
msg.to = "Flownode broadcast".to_string();
|
||||
self.mailbox
|
||||
.broadcast(&BroadcastChannel::Flownode, &msg)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(meta_error::ExternalSnafu)
|
||||
|
||||
@@ -27,9 +27,10 @@ use store_api::storage::consts::ReservedColumnId;
|
||||
use store_api::storage::{ConcreteDataType, RegionId};
|
||||
|
||||
use crate::error::{
|
||||
ColumnTypeMismatchSnafu, MitoReadOperationSnafu, MitoWriteOperationSnafu, Result,
|
||||
ColumnTypeMismatchSnafu, ForbiddenPhysicalAlterSnafu, MitoReadOperationSnafu,
|
||||
MitoWriteOperationSnafu, Result,
|
||||
};
|
||||
use crate::metrics::MITO_DDL_DURATION;
|
||||
use crate::metrics::{FORBIDDEN_OPERATION_COUNT, MITO_DDL_DURATION};
|
||||
use crate::utils;
|
||||
|
||||
const MAX_RETRIES: usize = 5;
|
||||
@@ -186,6 +187,30 @@ impl DataRegion {
|
||||
.context(MitoReadOperationSnafu)?;
|
||||
Ok(metadata.column_metadatas.clone())
|
||||
}
|
||||
|
||||
pub async fn alter_region_options(
|
||||
&self,
|
||||
region_id: RegionId,
|
||||
request: RegionAlterRequest,
|
||||
) -> Result<AffectedRows> {
|
||||
match request.kind {
|
||||
AlterKind::SetRegionOptions { options: _ }
|
||||
| AlterKind::UnsetRegionOptions { keys: _ } => {
|
||||
let region_id = utils::to_data_region_id(region_id);
|
||||
self.mito
|
||||
.handle_request(region_id, RegionRequest::Alter(request))
|
||||
.await
|
||||
.context(MitoWriteOperationSnafu)
|
||||
.map(|result| result.affected_rows)
|
||||
}
|
||||
_ => {
|
||||
info!("Metric region received alter request {request:?} on physical region {region_id:?}");
|
||||
FORBIDDEN_OPERATION_COUNT.inc();
|
||||
|
||||
ForbiddenPhysicalAlterSnafu.fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -96,9 +96,10 @@ use crate::utils;
|
||||
/// | Read | ✅ | ✅ |
|
||||
/// | Close | ✅ | ✅ |
|
||||
/// | Open | ✅ | ✅ |
|
||||
/// | Alter | ✅ | ❌ |
|
||||
/// | Alter | ✅ | ❓* |
|
||||
///
|
||||
/// *: Physical region can be dropped only when all related logical regions are dropped.
|
||||
/// *: Alter: Physical regions only support altering region options.
|
||||
///
|
||||
/// ## Internal Columns
|
||||
///
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use common_telemetry::{error, info};
|
||||
use common_telemetry::error;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use store_api::metric_engine_consts::ALTER_PHYSICAL_EXTENSION_KEY;
|
||||
@@ -22,10 +22,7 @@ use store_api::region_request::{AffectedRows, AlterKind, RegionAlterRequest};
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use crate::engine::MetricEngineInner;
|
||||
use crate::error::{
|
||||
ForbiddenPhysicalAlterSnafu, LogicalRegionNotFoundSnafu, Result, SerializeColumnMetadataSnafu,
|
||||
};
|
||||
use crate::metrics::FORBIDDEN_OPERATION_COUNT;
|
||||
use crate::error::{LogicalRegionNotFoundSnafu, Result, SerializeColumnMetadataSnafu};
|
||||
use crate::utils::{to_data_region_id, to_metadata_region_id};
|
||||
|
||||
impl MetricEngineInner {
|
||||
@@ -150,20 +147,22 @@ impl MetricEngineInner {
|
||||
region_id: RegionId,
|
||||
request: RegionAlterRequest,
|
||||
) -> Result<()> {
|
||||
info!("Metric region received alter request {request:?} on physical region {region_id:?}");
|
||||
FORBIDDEN_OPERATION_COUNT.inc();
|
||||
|
||||
ForbiddenPhysicalAlterSnafu.fail()
|
||||
self.data_region
|
||||
.alter_region_options(region_id, request)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::SemanticType;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use store_api::region_request::AddColumn;
|
||||
use store_api::region_request::{AddColumn, SetRegionOption};
|
||||
|
||||
use super::*;
|
||||
use crate::test_util::TestEnv;
|
||||
@@ -204,6 +203,18 @@ mod test {
|
||||
"Alter request to physical region is forbidden".to_string()
|
||||
);
|
||||
|
||||
// alter physical region's option should work
|
||||
let alter_region_option_request = RegionAlterRequest {
|
||||
schema_version: 0,
|
||||
kind: AlterKind::SetRegionOptions {
|
||||
options: vec![SetRegionOption::TTL(Duration::from_secs(500))],
|
||||
},
|
||||
};
|
||||
let result = engine_inner
|
||||
.alter_physical_region(physical_region_id, alter_region_option_request.clone())
|
||||
.await;
|
||||
assert!(result.is_ok());
|
||||
|
||||
// alter logical region
|
||||
let metadata_region = env.metadata_region();
|
||||
let logical_region_id = env.default_logical_region_id();
|
||||
|
||||
@@ -456,11 +456,7 @@ impl MetricEngineInner {
|
||||
// concat region dir
|
||||
let metadata_region_dir = join_dir(&request.region_dir, METADATA_REGION_SUBDIR);
|
||||
|
||||
// remove TTL and APPEND_MODE option
|
||||
let mut options = request.options.clone();
|
||||
options.insert(TTL_KEY.to_string(), "10000 years".to_string());
|
||||
options.remove(APPEND_MODE_KEY);
|
||||
|
||||
let options = region_options_for_metadata_region(request.options.clone());
|
||||
RegionCreateRequest {
|
||||
engine: MITO_ENGINE_NAME.to_string(),
|
||||
column_metadatas: vec![
|
||||
@@ -539,6 +535,15 @@ impl MetricEngineInner {
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates the region options for metadata region in metric engine.
|
||||
pub(crate) fn region_options_for_metadata_region(
|
||||
mut original: HashMap<String, String>,
|
||||
) -> HashMap<String, String> {
|
||||
original.remove(APPEND_MODE_KEY);
|
||||
original.insert(TTL_KEY.to_string(), "10000 years".to_string());
|
||||
original
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use store_api::metric_engine_consts::{METRIC_ENGINE_NAME, PHYSICAL_TABLE_METADATA_KEY};
|
||||
|
||||
@@ -24,6 +24,7 @@ use store_api::region_request::{AffectedRows, RegionOpenRequest, RegionRequest};
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use super::MetricEngineInner;
|
||||
use crate::engine::create::region_options_for_metadata_region;
|
||||
use crate::engine::options::set_data_region_options;
|
||||
use crate::error::{OpenMitoRegionSnafu, Result};
|
||||
use crate::metrics::{LOGICAL_REGION_COUNT, PHYSICAL_REGION_COUNT};
|
||||
@@ -68,9 +69,10 @@ impl MetricEngineInner {
|
||||
let metadata_region_dir = join_dir(&request.region_dir, METADATA_REGION_SUBDIR);
|
||||
let data_region_dir = join_dir(&request.region_dir, DATA_REGION_SUBDIR);
|
||||
|
||||
let metadata_region_options = region_options_for_metadata_region(request.options.clone());
|
||||
let open_metadata_region_request = RegionOpenRequest {
|
||||
region_dir: metadata_region_dir,
|
||||
options: request.options.clone(),
|
||||
options: metadata_region_options,
|
||||
engine: MITO_ENGINE_NAME.to_string(),
|
||||
skip_wal_replay: request.skip_wal_replay,
|
||||
};
|
||||
|
||||
@@ -27,6 +27,7 @@ use store_api::metadata::ColumnMetadata;
|
||||
use store_api::region_engine::{RegionEngine, RegionRole};
|
||||
use store_api::region_request::{
|
||||
AddColumn, AddColumnLocation, AlterKind, RegionAlterRequest, RegionOpenRequest, RegionRequest,
|
||||
SetRegionOption,
|
||||
};
|
||||
use store_api::storage::{RegionId, ScanRequest};
|
||||
|
||||
@@ -573,6 +574,62 @@ async fn test_alter_column_fulltext_options() {
|
||||
check_region_version(&engine, region_id, 1, 3, 1, 3);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_alter_region_ttl_options() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let mut env = TestEnv::new();
|
||||
let listener = Arc::new(AlterFlushListener::default());
|
||||
let engine = env
|
||||
.create_engine_with(MitoConfig::default(), None, Some(listener.clone()))
|
||||
.await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
let request = CreateRequestBuilder::new().build();
|
||||
|
||||
env.get_schema_metadata_manager()
|
||||
.register_region_table_info(
|
||||
region_id.table_id(),
|
||||
"test_table",
|
||||
"test_catalog",
|
||||
"test_schema",
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
engine
|
||||
.handle_request(region_id, RegionRequest::Create(request))
|
||||
.await
|
||||
.unwrap();
|
||||
let engine_cloned = engine.clone();
|
||||
let alter_ttl_request = RegionAlterRequest {
|
||||
schema_version: 0,
|
||||
kind: AlterKind::SetRegionOptions {
|
||||
options: vec![SetRegionOption::TTL(Duration::from_secs(500))],
|
||||
},
|
||||
};
|
||||
let alter_job = tokio::spawn(async move {
|
||||
engine_cloned
|
||||
.handle_request(region_id, RegionRequest::Alter(alter_ttl_request))
|
||||
.await
|
||||
.unwrap();
|
||||
});
|
||||
|
||||
alter_job.await.unwrap();
|
||||
|
||||
let check_ttl = |engine: &MitoEngine, expected: &Duration| {
|
||||
let current_ttl = engine
|
||||
.get_region(region_id)
|
||||
.unwrap()
|
||||
.version()
|
||||
.options
|
||||
.ttl
|
||||
.unwrap();
|
||||
assert_eq!(*expected, current_ttl);
|
||||
};
|
||||
// Verify the ttl.
|
||||
check_ttl(&engine, &Duration::from_secs(500));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_write_stall_on_altering() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
@@ -170,7 +170,7 @@ pub(crate) fn scan_file_ranges(
|
||||
|
||||
for range in ranges {
|
||||
let build_reader_start = Instant::now();
|
||||
let reader = range.reader(None).await?;
|
||||
let reader = range.reader(stream_ctx.input.series_row_selector).await?;
|
||||
let build_cost = build_reader_start.elapsed();
|
||||
part_metrics.inc_build_reader_cost(build_cost);
|
||||
let compat_batch = range.compat_batch();
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
pub mod cmcd;
|
||||
pub mod csv;
|
||||
pub mod date;
|
||||
pub mod decolorize;
|
||||
pub mod dissect;
|
||||
pub mod epoch;
|
||||
pub mod gsub;
|
||||
@@ -29,6 +30,7 @@ use ahash::{HashSet, HashSetExt};
|
||||
use cmcd::{CmcdProcessor, CmcdProcessorBuilder};
|
||||
use csv::{CsvProcessor, CsvProcessorBuilder};
|
||||
use date::{DateProcessor, DateProcessorBuilder};
|
||||
use decolorize::{DecolorizeProcessor, DecolorizeProcessorBuilder};
|
||||
use dissect::{DissectProcessor, DissectProcessorBuilder};
|
||||
use enum_dispatch::enum_dispatch;
|
||||
use epoch::{EpochProcessor, EpochProcessorBuilder};
|
||||
@@ -61,11 +63,6 @@ const TARGET_FIELDS_NAME: &str = "target_fields";
|
||||
const JSON_PATH_NAME: &str = "json_path";
|
||||
const JSON_PATH_RESULT_INDEX_NAME: &str = "result_index";
|
||||
|
||||
// const IF_NAME: &str = "if";
|
||||
// const IGNORE_FAILURE_NAME: &str = "ignore_failure";
|
||||
// const ON_FAILURE_NAME: &str = "on_failure";
|
||||
// const TAG_NAME: &str = "tag";
|
||||
|
||||
/// Processor trait defines the interface for all processors.
|
||||
///
|
||||
/// A processor is a transformation that can be applied to a field in a document
|
||||
@@ -99,6 +96,7 @@ pub enum ProcessorKind {
|
||||
Epoch(EpochProcessor),
|
||||
Date(DateProcessor),
|
||||
JsonPath(JsonPathProcessor),
|
||||
Decolorize(DecolorizeProcessor),
|
||||
}
|
||||
|
||||
/// ProcessorBuilder trait defines the interface for all processor builders
|
||||
@@ -128,6 +126,7 @@ pub enum ProcessorBuilders {
|
||||
Epoch(EpochProcessorBuilder),
|
||||
Date(DateProcessorBuilder),
|
||||
JsonPath(JsonPathProcessorBuilder),
|
||||
Decolorize(DecolorizeProcessorBuilder),
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
@@ -275,6 +274,9 @@ fn parse_processor(doc: &yaml_rust::Yaml) -> Result<ProcessorBuilders> {
|
||||
json_path::PROCESSOR_JSON_PATH => {
|
||||
ProcessorBuilders::JsonPath(json_path::JsonPathProcessorBuilder::try_from(value)?)
|
||||
}
|
||||
decolorize::PROCESSOR_DECOLORIZE => {
|
||||
ProcessorBuilders::Decolorize(DecolorizeProcessorBuilder::try_from(value)?)
|
||||
}
|
||||
_ => return UnsupportedProcessorSnafu { processor: str_key }.fail(),
|
||||
};
|
||||
|
||||
|
||||
@@ -12,6 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Pipeline Processor for CMCD (Common Media Client Data) data.
|
||||
//!
|
||||
//! Refer to [`CmcdProcessor`] for more information.
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use ahash::HashSet;
|
||||
|
||||
195
src/pipeline/src/etl/processor/decolorize.rs
Normal file
195
src/pipeline/src/etl/processor/decolorize.rs
Normal file
@@ -0,0 +1,195 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Removes ANSI color control codes from the input text.
|
||||
//!
|
||||
//! Similar to [`decolorize`](https://grafana.com/docs/loki/latest/query/log_queries/#removing-color-codes)
|
||||
//! from Grafana Loki and [`strip_ansi_escape_codes`](https://vector.dev/docs/reference/vrl/functions/#strip_ansi_escape_codes)
|
||||
//! from Vector VRL.
|
||||
|
||||
use ahash::HashSet;
|
||||
use once_cell::sync::Lazy;
|
||||
use regex::Regex;
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::etl::error::{
|
||||
Error, KeyMustBeStringSnafu, ProcessorExpectStringSnafu, ProcessorMissingFieldSnafu, Result,
|
||||
};
|
||||
use crate::etl::field::{Fields, OneInputOneOutputField};
|
||||
use crate::etl::processor::{
|
||||
yaml_bool, yaml_new_field, yaml_new_fields, ProcessorBuilder, ProcessorKind, FIELDS_NAME,
|
||||
FIELD_NAME, IGNORE_MISSING_NAME,
|
||||
};
|
||||
use crate::etl::value::Value;
|
||||
|
||||
pub(crate) const PROCESSOR_DECOLORIZE: &str = "decolorize";
|
||||
|
||||
static RE: Lazy<Regex> = Lazy::new(|| Regex::new(r"\x1b\[[0-9;]*m").unwrap());
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct DecolorizeProcessorBuilder {
|
||||
fields: Fields,
|
||||
ignore_missing: bool,
|
||||
}
|
||||
|
||||
impl ProcessorBuilder for DecolorizeProcessorBuilder {
|
||||
fn output_keys(&self) -> HashSet<&str> {
|
||||
self.fields
|
||||
.iter()
|
||||
.map(|f| f.target_or_input_field())
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn input_keys(&self) -> HashSet<&str> {
|
||||
self.fields.iter().map(|f| f.input_field()).collect()
|
||||
}
|
||||
|
||||
fn build(self, intermediate_keys: &[String]) -> Result<ProcessorKind> {
|
||||
self.build(intermediate_keys).map(ProcessorKind::Decolorize)
|
||||
}
|
||||
}
|
||||
|
||||
impl DecolorizeProcessorBuilder {
|
||||
fn build(self, intermediate_keys: &[String]) -> Result<DecolorizeProcessor> {
|
||||
let mut real_fields = vec![];
|
||||
for field in self.fields.into_iter() {
|
||||
let input = OneInputOneOutputField::build(
|
||||
"decolorize",
|
||||
intermediate_keys,
|
||||
field.input_field(),
|
||||
field.target_or_input_field(),
|
||||
)?;
|
||||
real_fields.push(input);
|
||||
}
|
||||
Ok(DecolorizeProcessor {
|
||||
fields: real_fields,
|
||||
ignore_missing: self.ignore_missing,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove ANSI color control codes from the input text.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct DecolorizeProcessor {
|
||||
fields: Vec<OneInputOneOutputField>,
|
||||
ignore_missing: bool,
|
||||
}
|
||||
|
||||
impl DecolorizeProcessor {
|
||||
fn process_string(&self, val: &str) -> Result<Value> {
|
||||
Ok(Value::String(RE.replace_all(val, "").into_owned()))
|
||||
}
|
||||
|
||||
fn process(&self, val: &Value) -> Result<Value> {
|
||||
match val {
|
||||
Value::String(val) => self.process_string(val),
|
||||
_ => ProcessorExpectStringSnafu {
|
||||
processor: PROCESSOR_DECOLORIZE,
|
||||
v: val.clone(),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&yaml_rust::yaml::Hash> for DecolorizeProcessorBuilder {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: &yaml_rust::yaml::Hash) -> Result<Self> {
|
||||
let mut fields = Fields::default();
|
||||
let mut ignore_missing = false;
|
||||
|
||||
for (k, v) in value.iter() {
|
||||
let key = k
|
||||
.as_str()
|
||||
.with_context(|| KeyMustBeStringSnafu { k: k.clone() })?;
|
||||
|
||||
match key {
|
||||
FIELD_NAME => {
|
||||
fields = Fields::one(yaml_new_field(v, FIELD_NAME)?);
|
||||
}
|
||||
FIELDS_NAME => {
|
||||
fields = yaml_new_fields(v, FIELDS_NAME)?;
|
||||
}
|
||||
IGNORE_MISSING_NAME => {
|
||||
ignore_missing = yaml_bool(v, IGNORE_MISSING_NAME)?;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(DecolorizeProcessorBuilder {
|
||||
fields,
|
||||
ignore_missing,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl crate::etl::processor::Processor for DecolorizeProcessor {
|
||||
fn kind(&self) -> &str {
|
||||
PROCESSOR_DECOLORIZE
|
||||
}
|
||||
|
||||
fn ignore_missing(&self) -> bool {
|
||||
self.ignore_missing
|
||||
}
|
||||
|
||||
fn exec_mut(&self, val: &mut Vec<Value>) -> Result<()> {
|
||||
for field in self.fields.iter() {
|
||||
let index = field.input_index();
|
||||
match val.get(index) {
|
||||
Some(Value::Null) | None => {
|
||||
if !self.ignore_missing {
|
||||
return ProcessorMissingFieldSnafu {
|
||||
processor: self.kind(),
|
||||
field: field.input_name(),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
}
|
||||
Some(v) => {
|
||||
let result = self.process(v)?;
|
||||
let output_index = field.output_index();
|
||||
val[output_index] = result;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_decolorize_processor() {
|
||||
let processor = DecolorizeProcessor {
|
||||
fields: vec![],
|
||||
ignore_missing: false,
|
||||
};
|
||||
|
||||
let val = Value::String("\x1b[32mGreen\x1b[0m".to_string());
|
||||
let result = processor.process(&val).unwrap();
|
||||
assert_eq!(result, Value::String("Green".to_string()));
|
||||
|
||||
let val = Value::String("Plain text".to_string());
|
||||
let result = processor.process(&val).unwrap();
|
||||
assert_eq!(result, Value::String("Plain text".to_string()));
|
||||
|
||||
let val = Value::String("\x1b[46mfoo\x1b[0m bar".to_string());
|
||||
let result = processor.process(&val).unwrap();
|
||||
assert_eq!(result, Value::String("foo bar".to_string()));
|
||||
}
|
||||
}
|
||||
@@ -644,7 +644,6 @@ impl DissectProcessor {
|
||||
let mut pos = 0;
|
||||
|
||||
let mut appends: HashMap<usize, Vec<(String, u32)>> = HashMap::new();
|
||||
// let mut maps: HashMap<usize, (String,String)> = HashMap::new();
|
||||
|
||||
let mut process_name_value = |name: &Name, value: String| {
|
||||
let name_index = name.index;
|
||||
@@ -658,22 +657,6 @@ impl DissectProcessor {
|
||||
.or_default()
|
||||
.push((value, order.unwrap_or_default()));
|
||||
}
|
||||
// Some(StartModifier::MapKey) => match maps.get(&name_index) {
|
||||
// Some(map_val) => {
|
||||
// map.insert(value, Value::String(map_val.to_string()));
|
||||
// }
|
||||
// None => {
|
||||
// maps.insert(name_index, value);
|
||||
// }
|
||||
// },
|
||||
// Some(StartModifier::MapVal) => match maps.get(&name_index) {
|
||||
// Some(map_key) => {
|
||||
// map.insert(map_key, Value::String(value));
|
||||
// }
|
||||
// None => {
|
||||
// maps.insert(name_index, value);
|
||||
// }
|
||||
// },
|
||||
Some(_) => {
|
||||
// do nothing, ignore MapKey and MapVal
|
||||
// because transform can know the key name
|
||||
|
||||
@@ -132,10 +132,6 @@ impl GsubProcessor {
|
||||
v: val.clone(),
|
||||
}
|
||||
.fail(),
|
||||
// Err(format!(
|
||||
// "{} processor: expect string or array string, but got {val:?}",
|
||||
// self.kind()
|
||||
// )),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -674,3 +674,36 @@ transform:
|
||||
|
||||
assert_eq!(expected, r);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decolorize() {
|
||||
let input_value = serde_json::json!({
|
||||
"message": "\u{001b}[32mSuccess\u{001b}[0m and \u{001b}[31mError\u{001b}[0m"
|
||||
});
|
||||
|
||||
let pipeline_yaml = r#"
|
||||
processors:
|
||||
- decolorize:
|
||||
fields:
|
||||
- message
|
||||
transform:
|
||||
- fields:
|
||||
- message
|
||||
type: string
|
||||
"#;
|
||||
let yaml_content = Content::Yaml(pipeline_yaml.into());
|
||||
let pipeline: Pipeline<GreptimeTransformer> = parse(&yaml_content).unwrap();
|
||||
|
||||
let mut status = pipeline.init_intermediate_state();
|
||||
pipeline.prepare(input_value, &mut status).unwrap();
|
||||
let row = pipeline.exec_mut(&mut status).unwrap();
|
||||
|
||||
let r = row
|
||||
.values
|
||||
.into_iter()
|
||||
.map(|v| v.value_data.unwrap())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let expected = StringValue("Success and Error".into());
|
||||
assert_eq!(expected, r[0]);
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
default = []
|
||||
dashboard = []
|
||||
mem-prof = ["dep:common-mem-prof"]
|
||||
pprof = ["dep:common-pprof"]
|
||||
@@ -15,7 +16,6 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
ahash = "0.8"
|
||||
aide = { version = "0.9", features = ["axum"] }
|
||||
api.workspace = true
|
||||
arrow.workspace = true
|
||||
arrow-flight.workspace = true
|
||||
@@ -92,7 +92,6 @@ rust-embed = { version = "6.6", features = ["debug-embed"] }
|
||||
rustls = { version = "0.23", default-features = false, features = ["ring", "logging", "std", "tls12"] }
|
||||
rustls-pemfile = "2.0"
|
||||
rustls-pki-types = "1.0"
|
||||
schemars.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
session.workspace = true
|
||||
|
||||
@@ -18,15 +18,12 @@ use std::net::SocketAddr;
|
||||
use std::sync::Mutex as StdMutex;
|
||||
use std::time::Duration;
|
||||
|
||||
use aide::axum::{routing as apirouting, ApiRouter, IntoApiResponse};
|
||||
use aide::openapi::{Info, OpenApi, Server as OpenAPIServer};
|
||||
use aide::OperationOutput;
|
||||
use async_trait::async_trait;
|
||||
use auth::UserProviderRef;
|
||||
use axum::error_handling::HandleErrorLayer;
|
||||
use axum::extract::DefaultBodyLimit;
|
||||
use axum::response::{Html, IntoResponse, Json, Response};
|
||||
use axum::{middleware, routing, BoxError, Extension, Router};
|
||||
use axum::response::{IntoResponse, Json, Response};
|
||||
use axum::{middleware, routing, BoxError, Router};
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_base::Plugins;
|
||||
use common_error::status_code::StatusCode;
|
||||
@@ -39,7 +36,6 @@ use datatypes::schema::SchemaRef;
|
||||
use datatypes::value::transform_value_ref_to_json_value;
|
||||
use event::{LogState, LogValidatorRef};
|
||||
use futures::FutureExt;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use snafu::{ensure, ResultExt};
|
||||
@@ -148,7 +144,7 @@ impl Default for HttpOptions {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema, Eq, PartialEq)]
|
||||
#[derive(Debug, Serialize, Deserialize, Eq, PartialEq)]
|
||||
pub struct ColumnSchema {
|
||||
name: String,
|
||||
data_type: String,
|
||||
@@ -160,7 +156,7 @@ impl ColumnSchema {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema, Eq, PartialEq)]
|
||||
#[derive(Debug, Serialize, Deserialize, Eq, PartialEq)]
|
||||
pub struct OutputSchema {
|
||||
column_schemas: Vec<ColumnSchema>,
|
||||
}
|
||||
@@ -188,7 +184,7 @@ impl From<SchemaRef> for OutputSchema {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema, Eq, PartialEq)]
|
||||
#[derive(Debug, Serialize, Deserialize, Eq, PartialEq)]
|
||||
pub struct HttpRecordsOutput {
|
||||
schema: OutputSchema,
|
||||
rows: Vec<Vec<Value>>,
|
||||
@@ -264,7 +260,7 @@ impl HttpRecordsOutput {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, JsonSchema, Eq, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum GreptimeQueryOutput {
|
||||
AffectedRows(usize),
|
||||
@@ -352,7 +348,7 @@ impl Display for Epoch {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, JsonSchema)]
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub enum HttpResponse {
|
||||
Arrow(ArrowResponse),
|
||||
Csv(CsvResponse),
|
||||
@@ -420,10 +416,6 @@ impl IntoResponse for HttpResponse {
|
||||
}
|
||||
}
|
||||
|
||||
impl OperationOutput for HttpResponse {
|
||||
type Inner = Response;
|
||||
}
|
||||
|
||||
impl From<ArrowResponse> for HttpResponse {
|
||||
fn from(value: ArrowResponse) -> Self {
|
||||
HttpResponse::Arrow(value)
|
||||
@@ -466,14 +458,6 @@ impl From<JsonResponse> for HttpResponse {
|
||||
}
|
||||
}
|
||||
|
||||
async fn serve_api(Extension(api): Extension<OpenApi>) -> impl IntoApiResponse {
|
||||
Json(api)
|
||||
}
|
||||
|
||||
async fn serve_docs() -> Html<String> {
|
||||
Html(include_str!("http/redoc.html").to_owned())
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ApiState {
|
||||
pub sql_handler: ServerSqlQueryHandlerRef,
|
||||
@@ -490,45 +474,28 @@ pub struct HttpServerBuilder {
|
||||
options: HttpOptions,
|
||||
plugins: Plugins,
|
||||
user_provider: Option<UserProviderRef>,
|
||||
api: OpenApi,
|
||||
router: Router,
|
||||
}
|
||||
|
||||
impl HttpServerBuilder {
|
||||
pub fn new(options: HttpOptions) -> Self {
|
||||
let api = OpenApi {
|
||||
info: Info {
|
||||
title: "GreptimeDB HTTP API".to_string(),
|
||||
description: Some("HTTP APIs to interact with GreptimeDB".to_string()),
|
||||
version: HTTP_API_VERSION.to_string(),
|
||||
..Info::default()
|
||||
},
|
||||
servers: vec![OpenAPIServer {
|
||||
url: format!("/{HTTP_API_VERSION}"),
|
||||
..OpenAPIServer::default()
|
||||
}],
|
||||
..OpenApi::default()
|
||||
};
|
||||
Self {
|
||||
options,
|
||||
plugins: Plugins::default(),
|
||||
user_provider: None,
|
||||
api,
|
||||
router: Router::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_sql_handler(
|
||||
mut self,
|
||||
self,
|
||||
sql_handler: ServerSqlQueryHandlerRef,
|
||||
script_handler: Option<ScriptHandlerRef>,
|
||||
) -> Self {
|
||||
let sql_router = HttpServer::route_sql(ApiState {
|
||||
sql_handler,
|
||||
script_handler,
|
||||
})
|
||||
.finish_api(&mut self.api)
|
||||
.layer(Extension(self.api.clone()));
|
||||
});
|
||||
|
||||
Self {
|
||||
router: self
|
||||
@@ -635,11 +602,10 @@ impl HttpServerBuilder {
|
||||
Self { plugins, ..self }
|
||||
}
|
||||
|
||||
pub fn with_greptime_config_options(mut self, opts: String) -> Self {
|
||||
pub fn with_greptime_config_options(self, opts: String) -> Self {
|
||||
let config_router = HttpServer::route_config(GreptimeOptionsConfigState {
|
||||
greptime_config_options: opts,
|
||||
})
|
||||
.finish_api(&mut self.api);
|
||||
});
|
||||
|
||||
Self {
|
||||
router: self.router.nest("", config_router),
|
||||
@@ -791,22 +757,15 @@ impl HttpServer {
|
||||
.with_state(log_state)
|
||||
}
|
||||
|
||||
fn route_sql<S>(api_state: ApiState) -> ApiRouter<S> {
|
||||
ApiRouter::new()
|
||||
.api_route(
|
||||
"/sql",
|
||||
apirouting::get_with(handler::sql, handler::sql_docs)
|
||||
.post_with(handler::sql, handler::sql_docs),
|
||||
)
|
||||
.api_route(
|
||||
fn route_sql<S>(api_state: ApiState) -> Router<S> {
|
||||
Router::new()
|
||||
.route("/sql", routing::get(handler::sql).post(handler::sql))
|
||||
.route(
|
||||
"/promql",
|
||||
apirouting::get_with(handler::promql, handler::sql_docs)
|
||||
.post_with(handler::promql, handler::sql_docs),
|
||||
routing::get(handler::promql).post(handler::promql),
|
||||
)
|
||||
.api_route("/scripts", apirouting::post(script::scripts))
|
||||
.api_route("/run-script", apirouting::post(script::run_script))
|
||||
.route("/private/api.json", apirouting::get(serve_api))
|
||||
.route("/private/docs", apirouting::get(serve_docs))
|
||||
.route("/scripts", routing::post(script::scripts))
|
||||
.route("/run-script", routing::post(script::run_script))
|
||||
.with_state(api_state)
|
||||
}
|
||||
|
||||
@@ -902,9 +861,9 @@ impl HttpServer {
|
||||
.with_state(otlp_handler)
|
||||
}
|
||||
|
||||
fn route_config<S>(state: GreptimeOptionsConfigState) -> ApiRouter<S> {
|
||||
ApiRouter::new()
|
||||
.route("/config", apirouting::get(handler::config))
|
||||
fn route_config<S>(state: GreptimeOptionsConfigState) -> Router<S> {
|
||||
Router::new()
|
||||
.route("/config", routing::get(handler::config))
|
||||
.with_state(state)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,7 +40,6 @@ use pipeline::error::PipelineTransformSnafu;
|
||||
use pipeline::util::to_pipeline_version;
|
||||
use pipeline::PipelineVersion;
|
||||
use prost::Message;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::{Deserializer, Map, Value};
|
||||
use session::context::{Channel, QueryContext, QueryContextRef};
|
||||
@@ -89,7 +88,7 @@ lazy_static! {
|
||||
];
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct LogIngesterQueryParams {
|
||||
pub table: Option<String>,
|
||||
pub db: Option<String>,
|
||||
|
||||
@@ -16,7 +16,6 @@ use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use aide::transform::TransformOperation;
|
||||
use axum::extract::{Json, Query, State};
|
||||
use axum::response::{IntoResponse, Response};
|
||||
use axum::{Extension, Form};
|
||||
@@ -28,7 +27,6 @@ use common_query::{Output, OutputData};
|
||||
use common_recordbatch::util;
|
||||
use common_telemetry::tracing;
|
||||
use query::parser::{PromQuery, DEFAULT_LOOKBACK_STRING};
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use session::context::{Channel, QueryContext, QueryContextRef};
|
||||
@@ -48,7 +46,7 @@ use crate::http::{
|
||||
use crate::metrics_handler::MetricsHandler;
|
||||
use crate::query_handler::sql::ServerSqlQueryHandlerRef;
|
||||
|
||||
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct SqlQuery {
|
||||
pub db: Option<String>,
|
||||
pub sql: Option<String>,
|
||||
@@ -219,7 +217,7 @@ pub async fn from_output(
|
||||
Ok((results, merge_map))
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct PromqlQuery {
|
||||
pub query: String,
|
||||
pub start: String,
|
||||
@@ -277,10 +275,6 @@ pub async fn promql(
|
||||
.into_response()
|
||||
}
|
||||
|
||||
pub(crate) fn sql_docs(op: TransformOperation) -> TransformOperation {
|
||||
op.response::<200, Json<HttpResponse>>()
|
||||
}
|
||||
|
||||
/// Handler to export metrics
|
||||
#[axum_macros::debug_handler]
|
||||
pub async fn metrics(
|
||||
@@ -300,10 +294,10 @@ pub async fn metrics(
|
||||
state.render()
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct HealthQuery {}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema, PartialEq, Eq)]
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct HealthResponse {}
|
||||
|
||||
/// Handler to export healthy check
|
||||
@@ -314,7 +308,7 @@ pub async fn health(Query(_params): Query<HealthQuery>) -> Json<HealthResponse>
|
||||
Json(HealthResponse {})
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema, PartialEq, Eq)]
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct StatusResponse<'a> {
|
||||
pub source_time: &'a str,
|
||||
pub commit: &'a str,
|
||||
|
||||
@@ -22,14 +22,13 @@ pub mod handler {
|
||||
use axum::response::IntoResponse;
|
||||
use common_pprof::Profiling;
|
||||
use common_telemetry::info;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{DumpPprofSnafu, Result};
|
||||
|
||||
/// Output format.
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum Output {
|
||||
/// google’s pprof format report in protobuf.
|
||||
@@ -40,7 +39,7 @@ pub mod handler {
|
||||
Flamegraph,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, JsonSchema)]
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[serde(default)]
|
||||
pub struct PprofQuery {
|
||||
seconds: u64,
|
||||
|
||||
@@ -28,7 +28,6 @@ use hyper::{Body, HeaderMap};
|
||||
use lazy_static::lazy_static;
|
||||
use object_pool::Pool;
|
||||
use prost::Message;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use session::context::{Channel, QueryContext};
|
||||
use snafu::prelude::*;
|
||||
@@ -49,7 +48,7 @@ pub const DEFAULT_ENCODING: &str = "snappy";
|
||||
pub const VM_ENCODING: &str = "zstd";
|
||||
pub const VM_PROTO_VERSION: &str = "1";
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct RemoteWriteQuery {
|
||||
pub db: Option<String>,
|
||||
/// Specify which physical table to use for storing metrics.
|
||||
|
||||
@@ -38,7 +38,6 @@ use promql_parser::parser::{
|
||||
UnaryExpr, VectorSelector,
|
||||
};
|
||||
use query::parser::{PromQuery, DEFAULT_LOOKBACK_STRING};
|
||||
use schemars::JsonSchema;
|
||||
use serde::de::{self, MapAccess, Visitor};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
@@ -55,7 +54,7 @@ use crate::prom_store::{FIELD_NAME_LABEL, METRIC_NAME_LABEL};
|
||||
use crate::prometheus_handler::PrometheusHandlerRef;
|
||||
|
||||
/// For [ValueType::Vector] result type
|
||||
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema, PartialEq)]
|
||||
#[derive(Debug, Default, Serialize, Deserialize, PartialEq)]
|
||||
pub struct PromSeriesVector {
|
||||
pub metric: HashMap<String, String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
@@ -63,14 +62,14 @@ pub struct PromSeriesVector {
|
||||
}
|
||||
|
||||
/// For [ValueType::Matrix] result type
|
||||
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema, PartialEq)]
|
||||
#[derive(Debug, Default, Serialize, Deserialize, PartialEq)]
|
||||
pub struct PromSeriesMatrix {
|
||||
pub metric: HashMap<String, String>,
|
||||
pub values: Vec<(f64, String)>,
|
||||
}
|
||||
|
||||
/// Variants corresponding to [ValueType]
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema, PartialEq)]
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(untagged)]
|
||||
pub enum PromQueryResult {
|
||||
Matrix(Vec<PromSeriesMatrix>),
|
||||
@@ -85,14 +84,14 @@ impl Default for PromQueryResult {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema, PartialEq)]
|
||||
#[derive(Debug, Default, Serialize, Deserialize, PartialEq)]
|
||||
pub struct PromData {
|
||||
#[serde(rename = "resultType")]
|
||||
pub result_type: String,
|
||||
pub result: PromQueryResult,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema, PartialEq)]
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(untagged)]
|
||||
pub enum PrometheusResponse {
|
||||
PromData(PromData),
|
||||
@@ -101,7 +100,6 @@ pub enum PrometheusResponse {
|
||||
LabelValues(Vec<String>),
|
||||
FormatQuery(String),
|
||||
BuildInfo(OwnedBuildInfo),
|
||||
#[schemars(skip)]
|
||||
#[serde(skip_deserializing)]
|
||||
ParseResult(promql_parser::parser::Expr),
|
||||
}
|
||||
@@ -112,7 +110,7 @@ impl Default for PrometheusResponse {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct FormatQuery {
|
||||
query: Option<String>,
|
||||
}
|
||||
@@ -141,7 +139,7 @@ pub async fn format_query(
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct BuildInfoQuery {}
|
||||
|
||||
#[axum_macros::debug_handler]
|
||||
@@ -154,7 +152,7 @@ pub async fn build_info_query() -> PrometheusJsonResponse {
|
||||
PrometheusJsonResponse::success(PrometheusResponse::BuildInfo(build_info.into()))
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct InstantQuery {
|
||||
query: Option<String>,
|
||||
lookback: Option<String>,
|
||||
@@ -209,7 +207,7 @@ pub async fn instant_query(
|
||||
PrometheusJsonResponse::from_query_result(result, metric_name, result_type).await
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct RangeQuery {
|
||||
query: Option<String>,
|
||||
start: Option<String>,
|
||||
@@ -261,10 +259,10 @@ pub async fn range_query(
|
||||
PrometheusJsonResponse::from_query_result(result, metric_name, ValueType::Matrix).await
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Serialize, JsonSchema)]
|
||||
#[derive(Debug, Default, Serialize)]
|
||||
struct Matches(Vec<String>);
|
||||
|
||||
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct LabelsQuery {
|
||||
start: Option<String>,
|
||||
end: Option<String>,
|
||||
@@ -663,7 +661,7 @@ fn promql_expr_to_metric_name(expr: &PromqlExpr) -> Option<String> {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct LabelValueQuery {
|
||||
start: Option<String>,
|
||||
end: Option<String>,
|
||||
@@ -927,7 +925,7 @@ fn retrieve_metric_name_from_promql(query: &str) -> Option<String> {
|
||||
visitor.metric_name
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct SeriesQuery {
|
||||
start: Option<String>,
|
||||
end: Option<String>,
|
||||
@@ -1018,7 +1016,7 @@ pub async fn series_query(
|
||||
resp
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct ParseQuery {
|
||||
query: Option<String>,
|
||||
db: Option<String>,
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Redoc</title>
|
||||
<!-- needed for adaptive design -->
|
||||
<meta charset="utf-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
<link
|
||||
href="https://fonts.googleapis.com/css?family=Montserrat:300,400,700|Roboto:300,400,700"
|
||||
rel="stylesheet"
|
||||
/>
|
||||
|
||||
<!--
|
||||
Redoc doesn't change outer page styles
|
||||
-->
|
||||
<style>
|
||||
body {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<redoc spec-url="/v1/private/api.json"></redoc>
|
||||
<script src="https://cdn.redoc.ly/redoc/latest/bundles/redoc.standalone.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
@@ -24,7 +24,6 @@ use common_error::status_code::StatusCode;
|
||||
use common_query::{Output, OutputData};
|
||||
use common_recordbatch::RecordBatchStream;
|
||||
use futures::StreamExt;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ResultExt;
|
||||
|
||||
@@ -33,7 +32,7 @@ use crate::http::header::{GREPTIME_DB_HEADER_EXECUTION_TIME, GREPTIME_DB_HEADER_
|
||||
use crate::http::result::error_result::ErrorResponse;
|
||||
use crate::http::{HttpResponse, ResponseFormat};
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, JsonSchema)]
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct ArrowResponse {
|
||||
pub(crate) data: Vec<u8>,
|
||||
pub(crate) execution_time_ms: u64,
|
||||
|
||||
@@ -20,7 +20,6 @@ use common_error::status_code::StatusCode;
|
||||
use common_query::Output;
|
||||
use itertools::Itertools;
|
||||
use mime_guess::mime;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::http::header::{GREPTIME_DB_HEADER_EXECUTION_TIME, GREPTIME_DB_HEADER_FORMAT};
|
||||
@@ -28,7 +27,7 @@ use crate::http::header::{GREPTIME_DB_HEADER_EXECUTION_TIME, GREPTIME_DB_HEADER_
|
||||
use crate::http::result::error_result::ErrorResponse;
|
||||
use crate::http::{handler, process_with_limit, GreptimeQueryOutput, HttpResponse, ResponseFormat};
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, JsonSchema)]
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct CsvResponse {
|
||||
output: Vec<GreptimeQueryOutput>,
|
||||
execution_time_ms: u64,
|
||||
|
||||
@@ -18,14 +18,13 @@ use axum::Json;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_telemetry::{debug, error};
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::error::status_code_to_http_status;
|
||||
use crate::http::header::constants::GREPTIME_DB_HEADER_ERROR_CODE;
|
||||
use crate::http::header::GREPTIME_DB_HEADER_EXECUTION_TIME;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, JsonSchema)]
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct ErrorResponse {
|
||||
code: u32,
|
||||
error: String,
|
||||
|
||||
@@ -16,14 +16,13 @@ use axum::response::IntoResponse;
|
||||
use axum::Json;
|
||||
use http::header::CONTENT_TYPE;
|
||||
use http::HeaderValue;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::http::header::{GREPTIME_DB_HEADER_EXECUTION_TIME, GREPTIME_DB_HEADER_FORMAT};
|
||||
|
||||
/// Greptimedb Manage Api Response struct
|
||||
/// Currently we have `Pipelines` and `Scripts` as control panel api
|
||||
#[derive(Serialize, Deserialize, Debug, JsonSchema)]
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct GreptimedbManageResponse {
|
||||
#[serde(flatten)]
|
||||
pub(crate) manage_result: ManageResult,
|
||||
@@ -57,7 +56,7 @@ impl GreptimedbManageResponse {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, JsonSchema)]
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[serde(untagged)]
|
||||
pub enum ManageResult {
|
||||
Pipelines { pipelines: Vec<PipelineOutput> },
|
||||
@@ -65,7 +64,7 @@ pub enum ManageResult {
|
||||
Scripts(),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, JsonSchema)]
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct PipelineOutput {
|
||||
name: String,
|
||||
version: String,
|
||||
|
||||
@@ -18,7 +18,6 @@ use axum::headers::HeaderValue;
|
||||
use axum::response::{IntoResponse, Response};
|
||||
use axum::Json;
|
||||
use common_query::Output;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
|
||||
@@ -27,7 +26,7 @@ use crate::http::header::{
|
||||
};
|
||||
use crate::http::{handler, process_with_limit, GreptimeQueryOutput, HttpResponse, ResponseFormat};
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, JsonSchema)]
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct GreptimedbV1Response {
|
||||
#[serde(skip_serializing_if = "Vec::is_empty", default)]
|
||||
pub(crate) output: Vec<GreptimeQueryOutput>,
|
||||
|
||||
@@ -17,7 +17,6 @@ use axum::response::{IntoResponse, Response};
|
||||
use axum::Json;
|
||||
use common_query::{Output, OutputData};
|
||||
use common_recordbatch::{util, RecordBatch};
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use snafu::ResultExt;
|
||||
@@ -27,7 +26,7 @@ use crate::http::header::{GREPTIME_DB_HEADER_EXECUTION_TIME, GREPTIME_DB_HEADER_
|
||||
use crate::http::result::error_result::ErrorResponse;
|
||||
use crate::http::{Epoch, HttpResponse, ResponseFormat};
|
||||
|
||||
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct SqlQuery {
|
||||
pub db: Option<String>,
|
||||
// Returns epoch timestamps with the specified precision.
|
||||
@@ -37,7 +36,7 @@ pub struct SqlQuery {
|
||||
pub sql: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, JsonSchema, Eq, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
|
||||
pub struct InfluxdbRecordsOutput {
|
||||
// The SQL query does not return the table name, but in InfluxDB,
|
||||
// we require the table name, so we set it to an empty string “”.
|
||||
@@ -106,7 +105,7 @@ impl TryFrom<(Option<Epoch>, Vec<RecordBatch>)> for InfluxdbRecordsOutput {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, JsonSchema, Eq, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
|
||||
pub struct InfluxdbOutput {
|
||||
pub statement_id: u32,
|
||||
pub series: Vec<InfluxdbRecordsOutput>,
|
||||
@@ -125,7 +124,7 @@ impl InfluxdbOutput {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, JsonSchema)]
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct InfluxdbV1Response {
|
||||
results: Vec<InfluxdbOutput>,
|
||||
execution_time_ms: u64,
|
||||
|
||||
@@ -17,7 +17,6 @@ use axum::response::{IntoResponse, Response};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_query::Output;
|
||||
use mime_guess::mime;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::{json, Map, Value};
|
||||
|
||||
@@ -27,7 +26,7 @@ use crate::http::{handler, process_with_limit, GreptimeQueryOutput, HttpResponse
|
||||
|
||||
/// The json format here is different from the default json output of `GreptimedbV1` result.
|
||||
/// `JsonResponse` is intended to make it easier for user to consume data.
|
||||
#[derive(Serialize, Deserialize, Debug, JsonSchema)]
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct JsonResponse {
|
||||
output: Vec<GreptimeQueryOutput>,
|
||||
execution_time_ms: u64,
|
||||
|
||||
@@ -27,7 +27,6 @@ use datatypes::scalars::ScalarVector;
|
||||
use datatypes::vectors::{Float64Vector, StringVector, TimestampMillisecondVector};
|
||||
use promql_parser::label::METRIC_NAME;
|
||||
use promql_parser::parser::value::ValueType;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
@@ -40,7 +39,7 @@ use crate::http::prometheus::{
|
||||
PromData, PromQueryResult, PromSeriesMatrix, PromSeriesVector, PrometheusResponse,
|
||||
};
|
||||
|
||||
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema, PartialEq)]
|
||||
#[derive(Debug, Default, Serialize, Deserialize, PartialEq)]
|
||||
pub struct PrometheusJsonResponse {
|
||||
pub status: String,
|
||||
pub data: PrometheusResponse,
|
||||
|
||||
@@ -21,14 +21,13 @@ use common_error::status_code::StatusCode;
|
||||
use common_query::Output;
|
||||
use itertools::Itertools;
|
||||
use mime_guess::mime;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::http::header::{GREPTIME_DB_HEADER_EXECUTION_TIME, GREPTIME_DB_HEADER_FORMAT};
|
||||
use crate::http::result::error_result::ErrorResponse;
|
||||
use crate::http::{handler, process_with_limit, GreptimeQueryOutput, HttpResponse, ResponseFormat};
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, JsonSchema)]
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct TableResponse {
|
||||
output: Vec<GreptimeQueryOutput>,
|
||||
execution_time_ms: u64,
|
||||
|
||||
@@ -20,7 +20,6 @@ use axum::extract::{Query, RawBody, State};
|
||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use session::context::QueryContext;
|
||||
use snafu::ResultExt;
|
||||
@@ -96,7 +95,7 @@ pub async fn scripts(
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema, Default)]
|
||||
#[derive(Debug, Serialize, Deserialize, Default)]
|
||||
pub struct ScriptQuery {
|
||||
pub catalog: Option<String>,
|
||||
pub db: Option<String>,
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use axum::Router;
|
||||
use common_test_util::ports;
|
||||
use servers::http::test_helpers::TestClient;
|
||||
use servers::http::{HttpOptions, HttpServerBuilder};
|
||||
use table::test_util::MemTable;
|
||||
|
||||
use crate::create_testing_sql_query_handler;
|
||||
|
||||
fn make_test_app() -> Router {
|
||||
let http_opts = HttpOptions {
|
||||
addr: format!("127.0.0.1:{}", ports::get_port()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let server = HttpServerBuilder::new(http_opts)
|
||||
.with_sql_handler(
|
||||
create_testing_sql_query_handler(MemTable::default_numbers_table()),
|
||||
None,
|
||||
)
|
||||
.build();
|
||||
server.build(server.make_app())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_api_and_doc() {
|
||||
let app = make_test_app();
|
||||
let client = TestClient::new(app);
|
||||
let result = client.get("/v1/private/api.json").send().await;
|
||||
assert_eq!(result.status(), 200);
|
||||
let result = client.get("/v1/private/docs").send().await;
|
||||
assert_eq!(result.status(), 200);
|
||||
}
|
||||
@@ -14,7 +14,6 @@
|
||||
|
||||
mod authorize;
|
||||
mod http_handler_test;
|
||||
mod http_test;
|
||||
mod influxdb_test;
|
||||
mod opentsdb_test;
|
||||
mod prom_store_test;
|
||||
|
||||
@@ -259,9 +259,17 @@ impl<'a> ParserContext<'a> {
|
||||
|
||||
let flow_name = self.intern_parse_table_name()?;
|
||||
|
||||
self.parser
|
||||
.expect_token(&Token::make_keyword(SINK))
|
||||
.context(SyntaxSnafu)?;
|
||||
// make `SINK` case in-sensitive
|
||||
if let Token::Word(word) = self.parser.peek_token().token
|
||||
&& word.value.eq_ignore_ascii_case(SINK)
|
||||
{
|
||||
self.parser.next_token();
|
||||
} else {
|
||||
Err(ParserError::ParserError(
|
||||
"Expect `SINK` keyword".to_string(),
|
||||
))
|
||||
.context(SyntaxSnafu)?
|
||||
}
|
||||
self.parser
|
||||
.expect_keyword(Keyword::TO)
|
||||
.context(SyntaxSnafu)?;
|
||||
|
||||
@@ -20,7 +20,7 @@ use std::time::Duration;
|
||||
use api::v1::region::region_server::RegionServer;
|
||||
use arrow_flight::flight_service_server::FlightServiceServer;
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||
use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||
use client::client_manager::NodeClients;
|
||||
use client::Client;
|
||||
use cmd::DistributedInformationExtension;
|
||||
@@ -351,8 +351,9 @@ impl GreptimeDbClusterBuilder {
|
||||
meta_client.start(&[&metasrv.server_addr]).await.unwrap();
|
||||
let meta_client = Arc::new(meta_client);
|
||||
|
||||
let cached_meta_backend =
|
||||
Arc::new(CachedMetaKvBackendBuilder::new(meta_client.clone()).build());
|
||||
let cached_meta_backend = Arc::new(
|
||||
CachedKvBackendBuilder::new(Arc::new(MetaKvBackend::new(meta_client.clone()))).build(),
|
||||
);
|
||||
|
||||
let layered_cache_builder = LayeredCacheRegistryBuilder::default().add_cache_registry(
|
||||
CacheRegistryBuilder::default()
|
||||
|
||||
@@ -799,6 +799,28 @@ pub async fn test_config_api(store_type: StorageType) {
|
||||
let res_get = client.get("/config").send().await;
|
||||
assert_eq!(res_get.status(), StatusCode::OK);
|
||||
|
||||
let storage = if store_type != StorageType::File {
|
||||
format!(
|
||||
r#"[storage]
|
||||
type = "{}"
|
||||
providers = []
|
||||
|
||||
[storage.http_client]
|
||||
pool_max_idle_per_host = 1024
|
||||
connect_timeout = "30s"
|
||||
timeout = "30s"
|
||||
pool_idle_timeout = "1m 30s""#,
|
||||
store_type
|
||||
)
|
||||
} else {
|
||||
format!(
|
||||
r#"[storage]
|
||||
type = "{}"
|
||||
providers = []"#,
|
||||
store_type
|
||||
)
|
||||
};
|
||||
|
||||
let expected_toml_str = format!(
|
||||
r#"
|
||||
mode = "standalone"
|
||||
@@ -867,9 +889,7 @@ sync_write = false
|
||||
enable_log_recycle = true
|
||||
prefill_log_files = false
|
||||
|
||||
[storage]
|
||||
type = "{}"
|
||||
providers = []
|
||||
{storage}
|
||||
|
||||
[metadata_store]
|
||||
file_size = "256MiB"
|
||||
@@ -933,7 +953,6 @@ enable = false
|
||||
write_interval = "30s"
|
||||
|
||||
[tracing]"#,
|
||||
store_type
|
||||
)
|
||||
.trim()
|
||||
.to_string();
|
||||
|
||||
@@ -281,3 +281,54 @@ DROP TABLE ato;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE TABLE phy (ts timestamp time index, val double) engine=metric with ("physical_metric_table" = "");
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
ALTER TABLE phy set ttl='2years';
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
SHOW CREATE TABLE phy;
|
||||
|
||||
+-------+------------------------------------+
|
||||
| Table | Create Table |
|
||||
+-------+------------------------------------+
|
||||
| phy | CREATE TABLE IF NOT EXISTS "phy" ( |
|
||||
| | "ts" TIMESTAMP(3) NOT NULL, |
|
||||
| | "val" DOUBLE NULL, |
|
||||
| | TIME INDEX ("ts") |
|
||||
| | ) |
|
||||
| | |
|
||||
| | ENGINE=metric |
|
||||
| | WITH( |
|
||||
| | physical_metric_table = '', |
|
||||
| | ttl = '2years' |
|
||||
| | ) |
|
||||
+-------+------------------------------------+
|
||||
|
||||
ALTER TABLE phy UNSET 'ttl';
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
SHOW CREATE TABLE phy;
|
||||
|
||||
+-------+------------------------------------+
|
||||
| Table | Create Table |
|
||||
+-------+------------------------------------+
|
||||
| phy | CREATE TABLE IF NOT EXISTS "phy" ( |
|
||||
| | "ts" TIMESTAMP(3) NOT NULL, |
|
||||
| | "val" DOUBLE NULL, |
|
||||
| | TIME INDEX ("ts") |
|
||||
| | ) |
|
||||
| | |
|
||||
| | ENGINE=metric |
|
||||
| | WITH( |
|
||||
| | physical_metric_table = '' |
|
||||
| | ) |
|
||||
+-------+------------------------------------+
|
||||
|
||||
DROP TABLE phy;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
|
||||
@@ -60,3 +60,15 @@ SHOW CREATE TABLE ato;
|
||||
SHOW CREATE TABLE ato;
|
||||
|
||||
DROP TABLE ato;
|
||||
|
||||
CREATE TABLE phy (ts timestamp time index, val double) engine=metric with ("physical_metric_table" = "");
|
||||
|
||||
ALTER TABLE phy set ttl='2years';
|
||||
|
||||
SHOW CREATE TABLE phy;
|
||||
|
||||
ALTER TABLE phy UNSET 'ttl';
|
||||
|
||||
SHOW CREATE TABLE phy;
|
||||
|
||||
DROP TABLE phy;
|
||||
|
||||
@@ -112,6 +112,73 @@ DROP TABLE out_num_cnt_basic;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
-- test count(*) rewrite
|
||||
CREATE TABLE input_basic (
|
||||
number INT,
|
||||
ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY(number),
|
||||
TIME INDEX(ts)
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE FLOW test_wildcard_basic SiNk TO out_basic AS
|
||||
SELECT
|
||||
COUNT(*) as wildcard
|
||||
FROM
|
||||
input_basic;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP FLOW test_wildcard_basic;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE FLOW test_wildcard_basic sink TO out_basic AS
|
||||
SELECT
|
||||
COUNT(*) as wildcard
|
||||
FROM
|
||||
input_basic;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO
|
||||
input_basic
|
||||
VALUES
|
||||
(23, "2021-07-01 00:00:01.000"),
|
||||
(24, "2021-07-01 00:00:01.500");
|
||||
|
||||
Affected Rows: 2
|
||||
|
||||
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
|
||||
ADMIN FLUSH_FLOW('test_wildcard_basic');
|
||||
|
||||
+-----------------------------------------+
|
||||
| ADMIN FLUSH_FLOW('test_wildcard_basic') |
|
||||
+-----------------------------------------+
|
||||
| FLOW_FLUSHED |
|
||||
+-----------------------------------------+
|
||||
|
||||
SELECT wildcard FROM out_basic;
|
||||
|
||||
+----------+
|
||||
| wildcard |
|
||||
+----------+
|
||||
| 2 |
|
||||
+----------+
|
||||
|
||||
DROP FLOW test_wildcard_basic;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE out_basic;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE input_basic;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
-- test distinct
|
||||
CREATE TABLE distinct_basic (
|
||||
number INT,
|
||||
|
||||
@@ -61,6 +61,43 @@ DROP TABLE numbers_input_basic;
|
||||
|
||||
DROP TABLE out_num_cnt_basic;
|
||||
|
||||
-- test count(*) rewrite
|
||||
CREATE TABLE input_basic (
|
||||
number INT,
|
||||
ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY(number),
|
||||
TIME INDEX(ts)
|
||||
);
|
||||
|
||||
CREATE FLOW test_wildcard_basic SiNk TO out_basic AS
|
||||
SELECT
|
||||
COUNT(*) as wildcard
|
||||
FROM
|
||||
input_basic;
|
||||
|
||||
DROP FLOW test_wildcard_basic;
|
||||
|
||||
CREATE FLOW test_wildcard_basic sink TO out_basic AS
|
||||
SELECT
|
||||
COUNT(*) as wildcard
|
||||
FROM
|
||||
input_basic;
|
||||
|
||||
INSERT INTO
|
||||
input_basic
|
||||
VALUES
|
||||
(23, "2021-07-01 00:00:01.000"),
|
||||
(24, "2021-07-01 00:00:01.500");
|
||||
|
||||
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
|
||||
ADMIN FLUSH_FLOW('test_wildcard_basic');
|
||||
|
||||
SELECT wildcard FROM out_basic;
|
||||
|
||||
DROP FLOW test_wildcard_basic;
|
||||
DROP TABLE out_basic;
|
||||
DROP TABLE input_basic;
|
||||
|
||||
-- test distinct
|
||||
CREATE TABLE distinct_basic (
|
||||
number INT,
|
||||
|
||||
@@ -11,5 +11,6 @@ extend-exclude = [
|
||||
"tests-fuzz/src/data/lorem_words",
|
||||
"*.sql",
|
||||
"*.result",
|
||||
"src/pipeline/benches/data.log"
|
||||
"src/pipeline/benches/data.log",
|
||||
"cyborg/pnpm-lock.yaml"
|
||||
]
|
||||
|
||||
Reference in New Issue
Block a user