mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-26 08:00:01 +00:00
Compare commits
20 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ed676d97c7 | ||
|
|
14b2badded | ||
|
|
3626a50395 | ||
|
|
0d0dad4ba2 | ||
|
|
ae00e28b2a | ||
|
|
92d2fafb33 | ||
|
|
30b3600597 | ||
|
|
87f1a8c622 | ||
|
|
8e815fc385 | ||
|
|
ca46bd04ee | ||
|
|
d32ade7399 | ||
|
|
b4aa0c8b8b | ||
|
|
e647559d27 | ||
|
|
d2c4767d41 | ||
|
|
82cee11eea | ||
|
|
6d0470c3fb | ||
|
|
47a267e29c | ||
|
|
fa13d06fc6 | ||
|
|
26d9517c3e | ||
|
|
a7da9af5de |
15
.coderabbit.yaml
Normal file
15
.coderabbit.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json
|
||||
language: "en-US"
|
||||
early_access: false
|
||||
reviews:
|
||||
profile: "chill"
|
||||
request_changes_workflow: false
|
||||
high_level_summary: true
|
||||
poem: true
|
||||
review_status: true
|
||||
collapse_walkthrough: false
|
||||
auto_review:
|
||||
enabled: false
|
||||
drafts: false
|
||||
chat:
|
||||
auto_reply: true
|
||||
87
Cargo.lock
generated
87
Cargo.lock
generated
@@ -1619,9 +1619,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.2.20"
|
||||
version = "1.1.24"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "04da6a0d40b948dfc4fa8f5bbf402b0fc1a64a28dbf7d12ffd683550f2c1b63a"
|
||||
checksum = "812acba72f0a070b003d3697490d2b55b837230ae7c6c6497f05cc2ddbb8d938"
|
||||
dependencies = [
|
||||
"jobserver",
|
||||
"libc",
|
||||
@@ -2510,7 +2510,7 @@ dependencies = [
|
||||
"futures-util",
|
||||
"serde",
|
||||
"snafu 0.8.5",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089)",
|
||||
"sqlparser_derive 0.1.1",
|
||||
"statrs",
|
||||
"store-api",
|
||||
@@ -2946,9 +2946,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-channel"
|
||||
version = "0.5.15"
|
||||
version = "0.5.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2"
|
||||
checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2"
|
||||
dependencies = [
|
||||
"crossbeam-utils",
|
||||
]
|
||||
@@ -3117,7 +3117,7 @@ checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2"
|
||||
[[package]]
|
||||
name = "datafusion"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"arrow-array 54.2.1",
|
||||
@@ -3168,7 +3168,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-catalog"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"async-trait",
|
||||
@@ -3188,7 +3188,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-catalog-listing"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"arrow-schema 54.3.1",
|
||||
@@ -3211,7 +3211,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-common"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"arrow 54.2.1",
|
||||
@@ -3236,7 +3236,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-common-runtime"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
dependencies = [
|
||||
"log",
|
||||
"tokio",
|
||||
@@ -3245,12 +3245,12 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-doc"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-execution"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"dashmap",
|
||||
@@ -3268,7 +3268,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-expr"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"chrono",
|
||||
@@ -3288,7 +3288,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-expr-common"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"datafusion-common",
|
||||
@@ -3299,7 +3299,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-functions"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"arrow-buffer 54.3.1",
|
||||
@@ -3328,7 +3328,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-functions-aggregate"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"arrow 54.2.1",
|
||||
@@ -3349,7 +3349,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-functions-aggregate-common"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"arrow 54.2.1",
|
||||
@@ -3361,7 +3361,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-functions-nested"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"arrow-array 54.2.1",
|
||||
@@ -3383,7 +3383,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-functions-table"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"async-trait",
|
||||
@@ -3398,7 +3398,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-functions-window"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
dependencies = [
|
||||
"datafusion-common",
|
||||
"datafusion-doc",
|
||||
@@ -3414,7 +3414,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-functions-window-common"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
dependencies = [
|
||||
"datafusion-common",
|
||||
"datafusion-physical-expr-common",
|
||||
@@ -3423,7 +3423,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-macros"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
dependencies = [
|
||||
"datafusion-expr",
|
||||
"quote",
|
||||
@@ -3433,7 +3433,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-optimizer"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"chrono",
|
||||
@@ -3451,7 +3451,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-physical-expr"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"arrow 54.2.1",
|
||||
@@ -3474,7 +3474,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-physical-expr-common"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"arrow 54.2.1",
|
||||
@@ -3487,7 +3487,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-physical-optimizer"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"arrow-schema 54.3.1",
|
||||
@@ -3508,7 +3508,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-physical-plan"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"arrow 54.2.1",
|
||||
@@ -3538,7 +3538,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-sql"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"arrow-array 54.2.1",
|
||||
@@ -3556,7 +3556,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-substrait"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
dependencies = [
|
||||
"async-recursion",
|
||||
"async-trait",
|
||||
@@ -3656,7 +3656,7 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
"snafu 0.8.5",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089)",
|
||||
"sqlparser_derive 0.1.1",
|
||||
]
|
||||
|
||||
@@ -4553,7 +4553,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"sql",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089)",
|
||||
"store-api",
|
||||
"strfmt",
|
||||
"substrait 0.14.0",
|
||||
@@ -6509,7 +6509,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"windows-targets 0.48.5",
|
||||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -7041,14 +7041,12 @@ dependencies = [
|
||||
"common-macro",
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
"common-runtime",
|
||||
"common-telemetry",
|
||||
"common-test-util",
|
||||
"common-time",
|
||||
"datafusion",
|
||||
"datatypes",
|
||||
"futures-util",
|
||||
"humantime-serde",
|
||||
"itertools 0.14.0",
|
||||
"lazy_static",
|
||||
"mito2",
|
||||
@@ -8166,7 +8164,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"sql",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089)",
|
||||
"store-api",
|
||||
"substrait 0.14.0",
|
||||
"table",
|
||||
@@ -8443,7 +8441,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"sql",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089)",
|
||||
"store-api",
|
||||
"table",
|
||||
]
|
||||
@@ -9477,7 +9475,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"sql",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089)",
|
||||
"statrs",
|
||||
"store-api",
|
||||
"substrait 0.14.0",
|
||||
@@ -10005,14 +10003,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "ring"
|
||||
version = "0.17.14"
|
||||
version = "0.17.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7"
|
||||
checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"cfg-if",
|
||||
"getrandom 0.2.15",
|
||||
"libc",
|
||||
"spin",
|
||||
"untrusted",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
@@ -11303,7 +11302,7 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
"snafu 0.8.5",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089)",
|
||||
"sqlparser_derive 0.1.1",
|
||||
"store-api",
|
||||
"table",
|
||||
@@ -11372,7 +11371,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "sqlparser"
|
||||
version = "0.54.0"
|
||||
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e#0cf6c04490d59435ee965edd2078e8855bd8471e"
|
||||
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089#e98e6b322426a9d397a71efef17075966223c089"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"log",
|
||||
@@ -11380,7 +11379,7 @@ dependencies = [
|
||||
"regex",
|
||||
"serde",
|
||||
"sqlparser 0.54.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"sqlparser_derive 0.3.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"sqlparser_derive 0.3.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -11408,7 +11407,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "sqlparser_derive"
|
||||
version = "0.3.0"
|
||||
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e#0cf6c04490d59435ee965edd2078e8855bd8471e"
|
||||
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089#e98e6b322426a9d397a71efef17075966223c089"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -12263,7 +12262,7 @@ dependencies = [
|
||||
"serde_yaml",
|
||||
"snafu 0.8.5",
|
||||
"sql",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089)",
|
||||
"sqlx",
|
||||
"store-api",
|
||||
"strum 0.27.1",
|
||||
|
||||
20
Cargo.toml
20
Cargo.toml
@@ -112,15 +112,15 @@ clap = { version = "4.4", features = ["derive"] }
|
||||
config = "0.13.0"
|
||||
crossbeam-utils = "0.8"
|
||||
dashmap = "6.1"
|
||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
||||
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
||||
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
||||
deadpool = "0.12"
|
||||
deadpool-postgres = "0.14"
|
||||
derive_builder = "0.20"
|
||||
@@ -191,7 +191,7 @@ simd-json = "0.15"
|
||||
similar-asserts = "1.6.0"
|
||||
smallvec = { version = "1", features = ["serde"] }
|
||||
snafu = "0.8"
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "0cf6c04490d59435ee965edd2078e8855bd8471e", features = [
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "e98e6b322426a9d397a71efef17075966223c089", features = [
|
||||
"visitor",
|
||||
"serde",
|
||||
] } # branch = "v0.54.x"
|
||||
|
||||
@@ -319,7 +319,6 @@
|
||||
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
||||
| `allow_region_failover_on_local_wal` | Bool | `false` | Whether to allow region failover on local WAL.<br/>**This option is not recommended to be set to true, because it may lead to data loss during failover.** |
|
||||
| `node_max_idle_time` | String | `24hours` | Max allowed idle time before removing node info from metasrv memory. |
|
||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
|
||||
@@ -50,10 +50,6 @@ use_memory_store = false
|
||||
## - Using shared storage (e.g., s3).
|
||||
enable_region_failover = false
|
||||
|
||||
## Whether to allow region failover on local WAL.
|
||||
## **This option is not recommended to be set to true, because it may lead to data loss during failover.**
|
||||
allow_region_failover_on_local_wal = false
|
||||
|
||||
## Max allowed idle time before removing node info from metasrv memory.
|
||||
node_max_idle_time = "24hours"
|
||||
|
||||
|
||||
@@ -4,21 +4,15 @@
|
||||
|
||||
This repository maintains the Grafana dashboards for GreptimeDB. It has two types of dashboards:
|
||||
|
||||
- `cluster/dashboard.json`: The Grafana dashboard for the GreptimeDB cluster. Read the [dashboard.md](./dashboards/cluster/dashboard.md) for more details.
|
||||
- `standalone/dashboard.json`: The Grafana dashboard for the standalone GreptimeDB instance. **It's generated from the `cluster/dashboard.json` by removing the instance filter through the `make dashboards` command**. Read the [dashboard.md](./dashboards/standalone/dashboard.md) for more details.
|
||||
- `cluster/`: The dashboard for the GreptimeDB cluster. Read the [dashboard.md](./dashboards/cluster/dashboard.md) for more details.
|
||||
- `standalone/`: The dashboard for the standalone GreptimeDB instance. Read the [dashboard.md](./dashboards/standalone/dashboard.md) for more details.
|
||||
|
||||
As the rapid development of GreptimeDB, the metrics may be changed, and please feel free to submit your feedback and/or contribution to this dashboard 🤗
|
||||
|
||||
**NOTE**:
|
||||
|
||||
- The Grafana version should be greater than 9.0.
|
||||
|
||||
- If you want to modify the dashboards, you only need to modify the `cluster/dashboard.json` and run the `make dashboards` command to generate the `standalone/dashboard.json` and other related files.
|
||||
|
||||
To maintain the dashboards easily, we use the [`dac`](https://github.com/zyy17/dac) tool to generate the intermediate dashboards and markdown documents:
|
||||
To maintain the dashboards, we use the [`dac`](https://github.com/zyy17/dac) tool to generate the intermediate dashboards and markdown documents:
|
||||
|
||||
- `cluster/dashboard.yaml`: The intermediate dashboard for the GreptimeDB cluster.
|
||||
- `standalone/dashboard.yaml`: The intermediate dashboard for the standalone GreptimeDB instance.
|
||||
- `standalone/dashboard.yaml`: The intermediatedashboard for the standalone GreptimeDB instance.
|
||||
|
||||
## Data Sources
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,97 +1,96 @@
|
||||
# Overview
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Uptime | `time() - process_start_time_seconds` | `stat` | The start time of GreptimeDB. | `prometheus` | `s` | `__auto` |
|
||||
| Version | `SELECT pkg_version FROM information_schema.build_info` | `stat` | GreptimeDB version. | `mysql` | -- | -- |
|
||||
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows[$__rate_interval]))` | `stat` | Total ingestion rate. | `prometheus` | `rowsps` | `__auto` |
|
||||
| Total Storage Size | `select SUM(disk_size) from information_schema.region_statistics;` | `stat` | Total number of data file size. | `mysql` | `decbytes` | -- |
|
||||
| Total Rows | `select SUM(region_rows) from information_schema.region_statistics;` | `stat` | Total number of data rows in the cluster. Calculated by sum of rows from each region. | `mysql` | `sishort` | -- |
|
||||
| Deployment | `SELECT count(*) as datanode FROM information_schema.cluster_info WHERE peer_type = 'DATANODE';`<br/>`SELECT count(*) as frontend FROM information_schema.cluster_info WHERE peer_type = 'FRONTEND';`<br/>`SELECT count(*) as metasrv FROM information_schema.cluster_info WHERE peer_type = 'METASRV';`<br/>`SELECT count(*) as flownode FROM information_schema.cluster_info WHERE peer_type = 'FLOWNODE';` | `stat` | The deployment topology of GreptimeDB. | `mysql` | -- | -- |
|
||||
| Database Resources | `SELECT COUNT(*) as databases FROM information_schema.schemata WHERE schema_name NOT IN ('greptime_private', 'information_schema')`<br/>`SELECT COUNT(*) as tables FROM information_schema.tables WHERE table_schema != 'information_schema'`<br/>`SELECT COUNT(region_id) as regions FROM information_schema.region_peers`<br/>`SELECT COUNT(*) as flows FROM information_schema.flows` | `stat` | The number of the key resources in GreptimeDB. | `mysql` | -- | -- |
|
||||
| Data Size | `SELECT SUM(memtable_size) * 0.42825 as WAL FROM information_schema.region_statistics;`<br/>`SELECT SUM(index_size) as index FROM information_schema.region_statistics;`<br/>`SELECT SUM(manifest_size) as manifest FROM information_schema.region_statistics;` | `stat` | The data size of wal/index/manifest in the GreptimeDB. | `mysql` | `decbytes` | -- |
|
||||
| Uptime | `time() - process_start_time_seconds` | `stat` | The start time of GreptimeDB. | `s` | `prometheus` | `__auto` |
|
||||
| Version | `SELECT pkg_version FROM information_schema.build_info` | `stat` | GreptimeDB version. | -- | `mysql` | -- |
|
||||
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows[$__rate_interval]))` | `stat` | Total ingestion rate. | `rowsps` | `prometheus` | `__auto` |
|
||||
| Total Storage Size | `select SUM(disk_size) from information_schema.region_statistics;` | `stat` | Total number of data file size. | `decbytes` | `mysql` | -- |
|
||||
| Total Rows | `select SUM(region_rows) from information_schema.region_statistics;` | `stat` | Total number of data rows in the cluster. Calculated by sum of rows from each region. | `sishort` | `mysql` | -- |
|
||||
| Deployment | `SELECT count(*) as datanode FROM information_schema.cluster_info WHERE peer_type = 'DATANODE';`<br/>`SELECT count(*) as frontend FROM information_schema.cluster_info WHERE peer_type = 'FRONTEND';`<br/>`SELECT count(*) as metasrv FROM information_schema.cluster_info WHERE peer_type = 'METASRV';`<br/>`SELECT count(*) as flownode FROM information_schema.cluster_info WHERE peer_type = 'FLOWNODE';` | `stat` | The deployment topology of GreptimeDB. | -- | `mysql` | -- |
|
||||
| Database Resources | `SELECT COUNT(*) as databases FROM information_schema.schemata WHERE schema_name NOT IN ('greptime_private', 'information_schema')`<br/>`SELECT COUNT(*) as tables FROM information_schema.tables WHERE table_schema != 'information_schema'`<br/>`SELECT COUNT(region_id) as regions FROM information_schema.region_peers`<br/>`SELECT COUNT(*) as flows FROM information_schema.flows` | `stat` | The number of the key resources in GreptimeDB. | -- | `mysql` | -- |
|
||||
| Data Size | `SELECT SUM(memtable_size) * 0.42825 as WAL FROM information_schema.region_statistics;`<br/>`SELECT SUM(index_size) as index FROM information_schema.region_statistics;`<br/>`SELECT SUM(manifest_size) as manifest FROM information_schema.region_statistics;` | `stat` | The data size of wal/index/manifest in the GreptimeDB. | `decbytes` | `mysql` | -- |
|
||||
# Ingestion
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `prometheus` | `rowsps` | `ingestion` |
|
||||
| Ingestion Rate by Type | `sum(rate(greptime_servers_http_logs_ingestion_counter[$__rate_interval]))`<br/>`sum(rate(greptime_servers_prometheus_remote_write_samples[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `prometheus` | `rowsps` | `http-logs` |
|
||||
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `rowsps` | `prometheus` | `ingestion` |
|
||||
| Ingestion Rate by Type | `sum(rate(greptime_servers_http_logs_ingestion_counter[$__rate_interval]))`<br/>`sum(rate(greptime_servers_prometheus_remote_write_samples[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `rowsps` | `prometheus` | `http-logs` |
|
||||
# Queries
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Total Query Rate | `sum (rate(greptime_servers_mysql_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_postgres_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_http_promql_elapsed_counte{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Total rate of query API calls by protocol. This metric is collected from frontends.<br/><br/>Here we listed 3 main protocols:<br/>- MySQL<br/>- Postgres<br/>- Prometheus API<br/><br/>Note that there are some other minor query APIs like /sql are not included | `prometheus` | `reqps` | `mysql` |
|
||||
| Total Query Rate | `sum (rate(greptime_servers_mysql_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_postgres_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_http_promql_elapsed_counte{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Total rate of query API calls by protocol. This metric is collected from frontends.<br/><br/>Here we listed 3 main protocols:<br/>- MySQL<br/>- Postgres<br/>- Prometheus API<br/><br/>Note that there are some other minor query APIs like /sql are not included | `reqps` | `prometheus` | `mysql` |
|
||||
# Resources
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$datanode"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{instance}}]-[{{ pod }}]` |
|
||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$datanode"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$frontend"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$frontend"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$metasrv"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$metasrv"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$flownode"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$flownode"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$datanode"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `decbytes` | `prometheus` | `[{{instance}}]-[{{ pod }}]` |
|
||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$datanode"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `none` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$frontend"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `decbytes` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$frontend"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `none` | `prometheus` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$metasrv"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `decbytes` | `prometheus` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$metasrv"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `none` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$flownode"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `decbytes` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$flownode"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `none` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
# Frontend Requests
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| HTTP QPS per Instance | `sum by(instance, pod, path, method, code) (rate(greptime_servers_http_requests_elapsed_count{instance=~"$frontend",path!~"/health\|/metrics"}[$__rate_interval]))` | `timeseries` | HTTP QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]` |
|
||||
| HTTP P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, method, code) (rate(greptime_servers_http_requests_elapsed_bucket{instance=~"$frontend",path!~"/health\|/metrics"}[$__rate_interval])))` | `timeseries` | HTTP P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
|
||||
| gRPC QPS per Instance | `sum by(instance, pod, path, code) (rate(greptime_servers_grpc_requests_elapsed_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | gRPC QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{code}}]` |
|
||||
| gRPC P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, code) (rate(greptime_servers_grpc_requests_elapsed_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | gRPC P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
|
||||
| MySQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_mysql_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | MySQL QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]` |
|
||||
| MySQL P99 per Instance | `histogram_quantile(0.99, sum by(pod, instance, le) (rate(greptime_servers_mysql_query_elapsed_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | MySQL P99 per Instance. | `prometheus` | `s` | `[{{ instance }}]-[{{ pod }}]-p99` |
|
||||
| PostgreSQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_postgres_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | PostgreSQL QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]` |
|
||||
| PostgreSQL P99 per Instance | `histogram_quantile(0.99, sum by(pod,instance,le) (rate(greptime_servers_postgres_query_elapsed_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | PostgreSQL P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-p99` |
|
||||
| HTTP QPS per Instance | `sum by(instance, pod, path, method, code) (rate(greptime_servers_http_requests_elapsed_count{instance=~"$frontend",path!~"/health\|/metrics"}[$__rate_interval]))` | `timeseries` | HTTP QPS per Instance. | `reqps` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]` |
|
||||
| HTTP P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, method, code) (rate(greptime_servers_http_requests_elapsed_bucket{instance=~"$frontend",path!~"/health\|/metrics"}[$__rate_interval])))` | `timeseries` | HTTP P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
|
||||
| gRPC QPS per Instance | `sum by(instance, pod, path, code) (rate(greptime_servers_grpc_requests_elapsed_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | gRPC QPS per Instance. | `reqps` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{code}}]` |
|
||||
| gRPC P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, code) (rate(greptime_servers_grpc_requests_elapsed_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | gRPC P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
|
||||
| MySQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_mysql_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | MySQL QPS per Instance. | `reqps` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
||||
| MySQL P99 per Instance | `histogram_quantile(0.99, sum by(pod, instance, le) (rate(greptime_servers_mysql_query_elapsed_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | MySQL P99 per Instance. | `s` | `prometheus` | `[{{ instance }}]-[{{ pod }}]-p99` |
|
||||
| PostgreSQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_postgres_query_elapsed_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | PostgreSQL QPS per Instance. | `reqps` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
||||
| PostgreSQL P99 per Instance | `histogram_quantile(0.99, sum by(pod,instance,le) (rate(greptime_servers_postgres_query_elapsed_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | PostgreSQL P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-p99` |
|
||||
# Frontend to Datanode
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Ingest Rows per Instance | `sum by(instance, pod)(rate(greptime_table_operator_ingest_rows{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Ingestion rate by row as in each frontend | `prometheus` | `rowsps` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Region Call QPS per Instance | `sum by(instance, pod, request_type) (rate(greptime_grpc_region_request_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Region Call QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
|
||||
| Region Call P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, request_type) (rate(greptime_grpc_region_request_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | Region Call P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
|
||||
| Ingest Rows per Instance | `sum by(instance, pod)(rate(greptime_table_operator_ingest_rows{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Ingestion rate by row as in each frontend | `rowsps` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Region Call QPS per Instance | `sum by(instance, pod, request_type) (rate(greptime_grpc_region_request_count{instance=~"$frontend"}[$__rate_interval]))` | `timeseries` | Region Call QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
|
||||
| Region Call P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, request_type) (rate(greptime_grpc_region_request_bucket{instance=~"$frontend"}[$__rate_interval])))` | `timeseries` | Region Call P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
|
||||
# Mito Engine
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Request OPS per Instance | `sum by(instance, pod, type) (rate(greptime_mito_handle_request_elapsed_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Request QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||
| Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, type) (rate(greptime_mito_handle_request_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||
| Write Buffer per Instance | `greptime_mito_write_buffer_bytes{instance=~"$datanode"}` | `timeseries` | Write Buffer per Instance. | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Write Rows per Instance | `sum by (instance, pod) (rate(greptime_mito_write_rows_total{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Ingestion size by row counts. | `prometheus` | `rowsps` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Flush OPS per Instance | `sum by(instance, pod, reason) (rate(greptime_mito_flush_requests_total{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Flush QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{reason}}]` |
|
||||
| Write Stall per Instance | `sum by(instance, pod) (greptime_mito_write_stall_total{instance=~"$datanode"})` | `timeseries` | Write Stall per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]` |
|
||||
| Read Stage OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_read_stage_elapsed_count{instance=~"$datanode", stage="total"}[$__rate_interval]))` | `timeseries` | Read Stage OPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Read Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Read Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
||||
| Write Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Write Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
||||
| Compaction OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_compaction_total_elapsed_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Compaction OPS per Instance. | `prometheus` | `ops` | `[{{ instance }}]-[{{pod}}]` |
|
||||
| Compaction P99 per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
|
||||
| Compaction P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le,stage) (rate(greptime_mito_compaction_total_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Compaction P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-compaction` |
|
||||
| WAL write size | `histogram_quantile(0.95, sum by(le,instance, pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`histogram_quantile(0.99, sum by(le,instance,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`sum by (instance, pod)(rate(raft_engine_write_size_sum[$__rate_interval]))` | `timeseries` | Write-ahead logs write size as bytes. This chart includes stats of p95 and p99 size by instance, total WAL write rate. | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-req-size-p95` |
|
||||
| Cached Bytes per Instance | `greptime_mito_cache_bytes{instance=~"$datanode"}` | `timeseries` | Cached Bytes per Instance. | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||
| Inflight Compaction | `greptime_mito_inflight_compaction_count` | `timeseries` | Ongoing compaction task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
|
||||
| WAL sync duration seconds | `histogram_quantile(0.99, sum by(le, type, node, instance, pod) (rate(raft_engine_sync_log_duration_seconds_bucket[$__rate_interval])))` | `timeseries` | Raft engine (local disk) log store sync latency, p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-p99` |
|
||||
| Log Store op duration seconds | `histogram_quantile(0.99, sum by(le,logstore,optype,instance, pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))` | `timeseries` | Write-ahead log operations latency at p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{logstore}}]-[{{optype}}]-p99` |
|
||||
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Request OPS per Instance | `sum by(instance, pod, type) (rate(greptime_mito_handle_request_elapsed_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Request QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||
| Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, type) (rate(greptime_mito_handle_request_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Request P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||
| Write Buffer per Instance | `greptime_mito_write_buffer_bytes{instance=~"$datanode"}` | `timeseries` | Write Buffer per Instance. | `decbytes` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Write Rows per Instance | `sum by (instance, pod) (rate(greptime_mito_write_rows_total{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Ingestion size by row counts. | `rowsps` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Flush OPS per Instance | `sum by(instance, pod, reason) (rate(greptime_mito_flush_requests_total{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Flush QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{reason}}]` |
|
||||
| Write Stall per Instance | `sum by(instance, pod) (greptime_mito_write_stall_total{instance=~"$datanode"})` | `timeseries` | Write Stall per Instance. | `decbytes` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Read Stage OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_read_stage_elapsed_count{instance=~"$datanode", stage="total"}[$__rate_interval]))` | `timeseries` | Read Stage OPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Read Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Read Stage P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
||||
| Write Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Write Stage P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
||||
| Compaction OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_compaction_total_elapsed_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Compaction OPS per Instance. | `ops` | `prometheus` | `[{{ instance }}]-[{{pod}}]` |
|
||||
| Compaction P99 per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Compaction latency by stage | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
|
||||
| Compaction P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le,stage) (rate(greptime_mito_compaction_total_elapsed_bucket{instance=~"$datanode"}[$__rate_interval])))` | `timeseries` | Compaction P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-compaction` |
|
||||
| WAL write size | `histogram_quantile(0.95, sum by(le,instance, pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`histogram_quantile(0.99, sum by(le,instance,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`sum by (instance, pod)(rate(raft_engine_write_size_sum[$__rate_interval]))` | `timeseries` | Write-ahead logs write size as bytes. This chart includes stats of p95 and p99 size by instance, total WAL write rate. | `bytes` | `prometheus` | `[{{instance}}]-[{{pod}}]-req-size-p95` |
|
||||
| Cached Bytes per Instance | `greptime_mito_cache_bytes{instance=~"$datanode"}` | `timeseries` | Cached Bytes per Instance. | `decbytes` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||
| Inflight Compaction | `greptime_mito_inflight_compaction_count` | `timeseries` | Ongoing compaction task count | `none` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
||||
| WAL sync duration seconds | `histogram_quantile(0.99, sum by(le, type, node, instance, pod) (rate(raft_engine_sync_log_duration_seconds_bucket[$__rate_interval])))` | `timeseries` | Raft engine (local disk) log store sync latency, p99 | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-p99` |
|
||||
| Log Store op duration seconds | `histogram_quantile(0.99, sum by(le,logstore,optype,instance, pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))` | `timeseries` | Write-ahead log operations latency at p99 | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{logstore}}]-[{{optype}}]-p99` |
|
||||
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `none` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
||||
# OpenDAL
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| QPS per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Read QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="read"}[$__rate_interval]))` | `timeseries` | Read QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| Read P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode",operation="read"}[$__rate_interval])))` | `timeseries` | Read P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
|
||||
| Write QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="write"}[$__rate_interval]))` | `timeseries` | Write QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
|
||||
| Write P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation="write"}[$__rate_interval])))` | `timeseries` | Write P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| List QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="list"}[$__rate_interval]))` | `timeseries` | List QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| List P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation="list"}[$__rate_interval])))` | `timeseries` | List P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| Other Requests per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode",operation!~"read\|write\|list\|stat"}[$__rate_interval]))` | `timeseries` | Other Requests per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation!~"read\|write\|list"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| OpenDAL errors per Instance | `sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{instance=~"$datanode", error!="NotFound"}[$__rate_interval]))` | `timeseries` | OpenDAL error counts per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]` |
|
||||
| QPS per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Read QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="read"}[$__rate_interval]))` | `timeseries` | Read QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| Read P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode",operation="read"}[$__rate_interval])))` | `timeseries` | Read P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
|
||||
| Write QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="write"}[$__rate_interval]))` | `timeseries` | Write QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
|
||||
| Write P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation="write"}[$__rate_interval])))` | `timeseries` | Write P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| List QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode", operation="list"}[$__rate_interval]))` | `timeseries` | List QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| List P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation="list"}[$__rate_interval])))` | `timeseries` | List P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| Other Requests per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{instance=~"$datanode",operation!~"read\|write\|list\|stat"}[$__rate_interval]))` | `timeseries` | Other Requests per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation!~"read\|write\|list"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
# Metasrv
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Region migration datanode | `greptime_meta_region_migration_stat{datanode_type="src"}`<br/>`greptime_meta_region_migration_stat{datanode_type="desc"}` | `state-timeline` | Counter of region migration by source and destination | `prometheus` | `none` | `from-datanode-{{datanode_id}}` |
|
||||
| Region migration error | `greptime_meta_region_migration_error` | `timeseries` | Counter of region migration error | `prometheus` | `none` | `__auto` |
|
||||
| Datanode load | `greptime_datanode_load` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `__auto` |
|
||||
| Region migration datanode | `greptime_meta_region_migration_stat{datanode_type="src"}`<br/>`greptime_meta_region_migration_stat{datanode_type="desc"}` | `state-timeline` | Counter of region migration by source and destination | `none` | `prometheus` | `from-datanode-{{datanode_id}}` |
|
||||
| Region migration error | `greptime_meta_region_migration_error` | `timeseries` | Counter of region migration error | `none` | `prometheus` | `__auto` |
|
||||
| Datanode load | `greptime_datanode_load` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `none` | `prometheus` | `__auto` |
|
||||
# Flownode
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Flow Ingest / Output Rate | `sum by(instance, pod, direction) (rate(greptime_flow_processed_rows[$__rate_interval]))` | `timeseries` | Flow Ingest / Output Rate. | `prometheus` | -- | `[{{pod}}]-[{{instance}}]-[{{direction}}]` |
|
||||
| Flow Ingest Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))` | `timeseries` | Flow Ingest Latency. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-p95` |
|
||||
| Flow Operation Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))` | `timeseries` | Flow Operation Latency. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{type}}]-p95` |
|
||||
| Flow Buffer Size per Instance | `greptime_flow_input_buf_size` | `timeseries` | Flow Buffer Size per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}]` |
|
||||
| Flow Processing Error per Instance | `sum by(instance,pod,code) (rate(greptime_flow_errors[$__rate_interval]))` | `timeseries` | Flow Processing Error per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{code}}]` |
|
||||
| Flow Ingest / Output Rate | `sum by(instance, pod, direction) (rate(greptime_flow_processed_rows[$__rate_interval]))` | `timeseries` | Flow Ingest / Output Rate. | -- | `prometheus` | `[{{pod}}]-[{{instance}}]-[{{direction}}]` |
|
||||
| Flow Ingest Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))` | `timeseries` | Flow Ingest Latency. | -- | `prometheus` | `[{{instance}}]-[{{pod}}]-p95` |
|
||||
| Flow Operation Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))` | `timeseries` | Flow Operation Latency. | -- | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{type}}]-p95` |
|
||||
| Flow Buffer Size per Instance | `greptime_flow_input_buf_size` | `timeseries` | Flow Buffer Size per Instance. | -- | `prometheus` | `[{{instance}}]-[{{pod}]` |
|
||||
| Flow Processing Error per Instance | `sum by(instance,pod,code) (rate(greptime_flow_errors[$__rate_interval]))` | `timeseries` | Flow Processing Error per Instance. | -- | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{code}}]` |
|
||||
|
||||
@@ -426,6 +426,7 @@ groups:
|
||||
- title: Write Stall per Instance
|
||||
type: timeseries
|
||||
description: Write Stall per Instance.
|
||||
unit: decbytes
|
||||
queries:
|
||||
- expr: sum by(instance, pod) (greptime_mito_write_stall_total{instance=~"$datanode"})
|
||||
datasource:
|
||||
@@ -657,22 +658,13 @@ groups:
|
||||
- title: Opendal traffic
|
||||
type: timeseries
|
||||
description: Total traffic as in bytes by instance and operation
|
||||
unit: decbytes
|
||||
unit: ops
|
||||
queries:
|
||||
- expr: sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{instance=~"$datanode"}[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
|
||||
- title: OpenDAL errors per Instance
|
||||
type: timeseries
|
||||
description: OpenDAL error counts per Instance.
|
||||
queries:
|
||||
- expr: sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{instance=~"$datanode", error!="NotFound"}[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]'
|
||||
- title: Metasrv
|
||||
panels:
|
||||
- title: Region migration datanode
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,97 +1,96 @@
|
||||
# Overview
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Uptime | `time() - process_start_time_seconds` | `stat` | The start time of GreptimeDB. | `prometheus` | `s` | `__auto` |
|
||||
| Version | `SELECT pkg_version FROM information_schema.build_info` | `stat` | GreptimeDB version. | `mysql` | -- | -- |
|
||||
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows[$__rate_interval]))` | `stat` | Total ingestion rate. | `prometheus` | `rowsps` | `__auto` |
|
||||
| Total Storage Size | `select SUM(disk_size) from information_schema.region_statistics;` | `stat` | Total number of data file size. | `mysql` | `decbytes` | -- |
|
||||
| Total Rows | `select SUM(region_rows) from information_schema.region_statistics;` | `stat` | Total number of data rows in the cluster. Calculated by sum of rows from each region. | `mysql` | `sishort` | -- |
|
||||
| Deployment | `SELECT count(*) as datanode FROM information_schema.cluster_info WHERE peer_type = 'DATANODE';`<br/>`SELECT count(*) as frontend FROM information_schema.cluster_info WHERE peer_type = 'FRONTEND';`<br/>`SELECT count(*) as metasrv FROM information_schema.cluster_info WHERE peer_type = 'METASRV';`<br/>`SELECT count(*) as flownode FROM information_schema.cluster_info WHERE peer_type = 'FLOWNODE';` | `stat` | The deployment topology of GreptimeDB. | `mysql` | -- | -- |
|
||||
| Database Resources | `SELECT COUNT(*) as databases FROM information_schema.schemata WHERE schema_name NOT IN ('greptime_private', 'information_schema')`<br/>`SELECT COUNT(*) as tables FROM information_schema.tables WHERE table_schema != 'information_schema'`<br/>`SELECT COUNT(region_id) as regions FROM information_schema.region_peers`<br/>`SELECT COUNT(*) as flows FROM information_schema.flows` | `stat` | The number of the key resources in GreptimeDB. | `mysql` | -- | -- |
|
||||
| Data Size | `SELECT SUM(memtable_size) * 0.42825 as WAL FROM information_schema.region_statistics;`<br/>`SELECT SUM(index_size) as index FROM information_schema.region_statistics;`<br/>`SELECT SUM(manifest_size) as manifest FROM information_schema.region_statistics;` | `stat` | The data size of wal/index/manifest in the GreptimeDB. | `mysql` | `decbytes` | -- |
|
||||
| Uptime | `time() - process_start_time_seconds` | `stat` | The start time of GreptimeDB. | `s` | `prometheus` | `__auto` |
|
||||
| Version | `SELECT pkg_version FROM information_schema.build_info` | `stat` | GreptimeDB version. | -- | `mysql` | -- |
|
||||
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows[$__rate_interval]))` | `stat` | Total ingestion rate. | `rowsps` | `prometheus` | `__auto` |
|
||||
| Total Storage Size | `select SUM(disk_size) from information_schema.region_statistics;` | `stat` | Total number of data file size. | `decbytes` | `mysql` | -- |
|
||||
| Total Rows | `select SUM(region_rows) from information_schema.region_statistics;` | `stat` | Total number of data rows in the cluster. Calculated by sum of rows from each region. | `sishort` | `mysql` | -- |
|
||||
| Deployment | `SELECT count(*) as datanode FROM information_schema.cluster_info WHERE peer_type = 'DATANODE';`<br/>`SELECT count(*) as frontend FROM information_schema.cluster_info WHERE peer_type = 'FRONTEND';`<br/>`SELECT count(*) as metasrv FROM information_schema.cluster_info WHERE peer_type = 'METASRV';`<br/>`SELECT count(*) as flownode FROM information_schema.cluster_info WHERE peer_type = 'FLOWNODE';` | `stat` | The deployment topology of GreptimeDB. | -- | `mysql` | -- |
|
||||
| Database Resources | `SELECT COUNT(*) as databases FROM information_schema.schemata WHERE schema_name NOT IN ('greptime_private', 'information_schema')`<br/>`SELECT COUNT(*) as tables FROM information_schema.tables WHERE table_schema != 'information_schema'`<br/>`SELECT COUNT(region_id) as regions FROM information_schema.region_peers`<br/>`SELECT COUNT(*) as flows FROM information_schema.flows` | `stat` | The number of the key resources in GreptimeDB. | -- | `mysql` | -- |
|
||||
| Data Size | `SELECT SUM(memtable_size) * 0.42825 as WAL FROM information_schema.region_statistics;`<br/>`SELECT SUM(index_size) as index FROM information_schema.region_statistics;`<br/>`SELECT SUM(manifest_size) as manifest FROM information_schema.region_statistics;` | `stat` | The data size of wal/index/manifest in the GreptimeDB. | `decbytes` | `mysql` | -- |
|
||||
# Ingestion
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows{}[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `prometheus` | `rowsps` | `ingestion` |
|
||||
| Ingestion Rate by Type | `sum(rate(greptime_servers_http_logs_ingestion_counter[$__rate_interval]))`<br/>`sum(rate(greptime_servers_prometheus_remote_write_samples[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `prometheus` | `rowsps` | `http-logs` |
|
||||
| Total Ingestion Rate | `sum(rate(greptime_table_operator_ingest_rows{}[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `rowsps` | `prometheus` | `ingestion` |
|
||||
| Ingestion Rate by Type | `sum(rate(greptime_servers_http_logs_ingestion_counter[$__rate_interval]))`<br/>`sum(rate(greptime_servers_prometheus_remote_write_samples[$__rate_interval]))` | `timeseries` | Total ingestion rate.<br/><br/>Here we listed 3 primary protocols:<br/><br/>- Prometheus remote write<br/>- Greptime's gRPC API (when using our ingest SDK)<br/>- Log ingestion http API<br/> | `rowsps` | `prometheus` | `http-logs` |
|
||||
# Queries
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Total Query Rate | `sum (rate(greptime_servers_mysql_query_elapsed_count{}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_postgres_query_elapsed_count{}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_http_promql_elapsed_counte{}[$__rate_interval]))` | `timeseries` | Total rate of query API calls by protocol. This metric is collected from frontends.<br/><br/>Here we listed 3 main protocols:<br/>- MySQL<br/>- Postgres<br/>- Prometheus API<br/><br/>Note that there are some other minor query APIs like /sql are not included | `prometheus` | `reqps` | `mysql` |
|
||||
| Total Query Rate | `sum (rate(greptime_servers_mysql_query_elapsed_count{}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_postgres_query_elapsed_count{}[$__rate_interval]))`<br/>`sum (rate(greptime_servers_http_promql_elapsed_counte{}[$__rate_interval]))` | `timeseries` | Total rate of query API calls by protocol. This metric is collected from frontends.<br/><br/>Here we listed 3 main protocols:<br/>- MySQL<br/>- Postgres<br/>- Prometheus API<br/><br/>Note that there are some other minor query APIs like /sql are not included | `reqps` | `prometheus` | `mysql` |
|
||||
# Resources
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{instance}}]-[{{ pod }}]` |
|
||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `decbytes` | `prometheus` | `[{{instance}}]-[{{ pod }}]` |
|
||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `none` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `decbytes` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `none` | `prometheus` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `decbytes` | `prometheus` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `none` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `decbytes` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `none` | `prometheus` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
# Frontend Requests
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| HTTP QPS per Instance | `sum by(instance, pod, path, method, code) (rate(greptime_servers_http_requests_elapsed_count{path!~"/health\|/metrics"}[$__rate_interval]))` | `timeseries` | HTTP QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]` |
|
||||
| HTTP P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, method, code) (rate(greptime_servers_http_requests_elapsed_bucket{path!~"/health\|/metrics"}[$__rate_interval])))` | `timeseries` | HTTP P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
|
||||
| gRPC QPS per Instance | `sum by(instance, pod, path, code) (rate(greptime_servers_grpc_requests_elapsed_count{}[$__rate_interval]))` | `timeseries` | gRPC QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{code}}]` |
|
||||
| gRPC P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, code) (rate(greptime_servers_grpc_requests_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | gRPC P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
|
||||
| MySQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_mysql_query_elapsed_count{}[$__rate_interval]))` | `timeseries` | MySQL QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]` |
|
||||
| MySQL P99 per Instance | `histogram_quantile(0.99, sum by(pod, instance, le) (rate(greptime_servers_mysql_query_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | MySQL P99 per Instance. | `prometheus` | `s` | `[{{ instance }}]-[{{ pod }}]-p99` |
|
||||
| PostgreSQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_postgres_query_elapsed_count{}[$__rate_interval]))` | `timeseries` | PostgreSQL QPS per Instance. | `prometheus` | `reqps` | `[{{instance}}]-[{{pod}}]` |
|
||||
| PostgreSQL P99 per Instance | `histogram_quantile(0.99, sum by(pod,instance,le) (rate(greptime_servers_postgres_query_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | PostgreSQL P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-p99` |
|
||||
| HTTP QPS per Instance | `sum by(instance, pod, path, method, code) (rate(greptime_servers_http_requests_elapsed_count{path!~"/health\|/metrics"}[$__rate_interval]))` | `timeseries` | HTTP QPS per Instance. | `reqps` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]` |
|
||||
| HTTP P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, method, code) (rate(greptime_servers_http_requests_elapsed_bucket{path!~"/health\|/metrics"}[$__rate_interval])))` | `timeseries` | HTTP P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
|
||||
| gRPC QPS per Instance | `sum by(instance, pod, path, code) (rate(greptime_servers_grpc_requests_elapsed_count{}[$__rate_interval]))` | `timeseries` | gRPC QPS per Instance. | `reqps` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{code}}]` |
|
||||
| gRPC P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, path, code) (rate(greptime_servers_grpc_requests_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | gRPC P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99` |
|
||||
| MySQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_mysql_query_elapsed_count{}[$__rate_interval]))` | `timeseries` | MySQL QPS per Instance. | `reqps` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
||||
| MySQL P99 per Instance | `histogram_quantile(0.99, sum by(pod, instance, le) (rate(greptime_servers_mysql_query_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | MySQL P99 per Instance. | `s` | `prometheus` | `[{{ instance }}]-[{{ pod }}]-p99` |
|
||||
| PostgreSQL QPS per Instance | `sum by(pod, instance)(rate(greptime_servers_postgres_query_elapsed_count{}[$__rate_interval]))` | `timeseries` | PostgreSQL QPS per Instance. | `reqps` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
||||
| PostgreSQL P99 per Instance | `histogram_quantile(0.99, sum by(pod,instance,le) (rate(greptime_servers_postgres_query_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | PostgreSQL P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-p99` |
|
||||
# Frontend to Datanode
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Ingest Rows per Instance | `sum by(instance, pod)(rate(greptime_table_operator_ingest_rows{}[$__rate_interval]))` | `timeseries` | Ingestion rate by row as in each frontend | `prometheus` | `rowsps` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Region Call QPS per Instance | `sum by(instance, pod, request_type) (rate(greptime_grpc_region_request_count{}[$__rate_interval]))` | `timeseries` | Region Call QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
|
||||
| Region Call P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, request_type) (rate(greptime_grpc_region_request_bucket{}[$__rate_interval])))` | `timeseries` | Region Call P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
|
||||
| Ingest Rows per Instance | `sum by(instance, pod)(rate(greptime_table_operator_ingest_rows{}[$__rate_interval]))` | `timeseries` | Ingestion rate by row as in each frontend | `rowsps` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Region Call QPS per Instance | `sum by(instance, pod, request_type) (rate(greptime_grpc_region_request_count{}[$__rate_interval]))` | `timeseries` | Region Call QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
|
||||
| Region Call P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, request_type) (rate(greptime_grpc_region_request_bucket{}[$__rate_interval])))` | `timeseries` | Region Call P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{request_type}}]` |
|
||||
# Mito Engine
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Request OPS per Instance | `sum by(instance, pod, type) (rate(greptime_mito_handle_request_elapsed_count{}[$__rate_interval]))` | `timeseries` | Request QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||
| Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, type) (rate(greptime_mito_handle_request_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||
| Write Buffer per Instance | `greptime_mito_write_buffer_bytes{}` | `timeseries` | Write Buffer per Instance. | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Write Rows per Instance | `sum by (instance, pod) (rate(greptime_mito_write_rows_total{}[$__rate_interval]))` | `timeseries` | Ingestion size by row counts. | `prometheus` | `rowsps` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Flush OPS per Instance | `sum by(instance, pod, reason) (rate(greptime_mito_flush_requests_total{}[$__rate_interval]))` | `timeseries` | Flush QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{reason}}]` |
|
||||
| Write Stall per Instance | `sum by(instance, pod) (greptime_mito_write_stall_total{})` | `timeseries` | Write Stall per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]` |
|
||||
| Read Stage OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_read_stage_elapsed_count{ stage="total"}[$__rate_interval]))` | `timeseries` | Read Stage OPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Read Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Read Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
||||
| Write Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Write Stage P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
||||
| Compaction OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_compaction_total_elapsed_count{}[$__rate_interval]))` | `timeseries` | Compaction OPS per Instance. | `prometheus` | `ops` | `[{{ instance }}]-[{{pod}}]` |
|
||||
| Compaction P99 per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Compaction latency by stage | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
|
||||
| Compaction P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le,stage) (rate(greptime_mito_compaction_total_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Compaction P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-compaction` |
|
||||
| WAL write size | `histogram_quantile(0.95, sum by(le,instance, pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`histogram_quantile(0.99, sum by(le,instance,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`sum by (instance, pod)(rate(raft_engine_write_size_sum[$__rate_interval]))` | `timeseries` | Write-ahead logs write size as bytes. This chart includes stats of p95 and p99 size by instance, total WAL write rate. | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-req-size-p95` |
|
||||
| Cached Bytes per Instance | `greptime_mito_cache_bytes{}` | `timeseries` | Cached Bytes per Instance. | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||
| Inflight Compaction | `greptime_mito_inflight_compaction_count` | `timeseries` | Ongoing compaction task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
|
||||
| WAL sync duration seconds | `histogram_quantile(0.99, sum by(le, type, node, instance, pod) (rate(raft_engine_sync_log_duration_seconds_bucket[$__rate_interval])))` | `timeseries` | Raft engine (local disk) log store sync latency, p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-p99` |
|
||||
| Log Store op duration seconds | `histogram_quantile(0.99, sum by(le,logstore,optype,instance, pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))` | `timeseries` | Write-ahead log operations latency at p99 | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{logstore}}]-[{{optype}}]-p99` |
|
||||
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Request OPS per Instance | `sum by(instance, pod, type) (rate(greptime_mito_handle_request_elapsed_count{}[$__rate_interval]))` | `timeseries` | Request QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||
| Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, type) (rate(greptime_mito_handle_request_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Request P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||
| Write Buffer per Instance | `greptime_mito_write_buffer_bytes{}` | `timeseries` | Write Buffer per Instance. | `decbytes` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Write Rows per Instance | `sum by (instance, pod) (rate(greptime_mito_write_rows_total{}[$__rate_interval]))` | `timeseries` | Ingestion size by row counts. | `rowsps` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Flush OPS per Instance | `sum by(instance, pod, reason) (rate(greptime_mito_flush_requests_total{}[$__rate_interval]))` | `timeseries` | Flush QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{reason}}]` |
|
||||
| Write Stall per Instance | `sum by(instance, pod) (greptime_mito_write_stall_total{})` | `timeseries` | Write Stall per Instance. | `decbytes` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Read Stage OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_read_stage_elapsed_count{ stage="total"}[$__rate_interval]))` | `timeseries` | Read Stage OPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
||||
| Read Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Read Stage P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
||||
| Write Stage P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Write Stage P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{stage}}]` |
|
||||
| Compaction OPS per Instance | `sum by(instance, pod) (rate(greptime_mito_compaction_total_elapsed_count{}[$__rate_interval]))` | `timeseries` | Compaction OPS per Instance. | `ops` | `prometheus` | `[{{ instance }}]-[{{pod}}]` |
|
||||
| Compaction P99 per Instance by Stage | `histogram_quantile(0.99, sum by(instance, pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Compaction latency by stage | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-p99` |
|
||||
| Compaction P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le,stage) (rate(greptime_mito_compaction_total_elapsed_bucket{}[$__rate_interval])))` | `timeseries` | Compaction P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-compaction` |
|
||||
| WAL write size | `histogram_quantile(0.95, sum by(le,instance, pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`histogram_quantile(0.99, sum by(le,instance,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))`<br/>`sum by (instance, pod)(rate(raft_engine_write_size_sum[$__rate_interval]))` | `timeseries` | Write-ahead logs write size as bytes. This chart includes stats of p95 and p99 size by instance, total WAL write rate. | `bytes` | `prometheus` | `[{{instance}}]-[{{pod}}]-req-size-p95` |
|
||||
| Cached Bytes per Instance | `greptime_mito_cache_bytes{}` | `timeseries` | Cached Bytes per Instance. | `decbytes` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||
| Inflight Compaction | `greptime_mito_inflight_compaction_count` | `timeseries` | Ongoing compaction task count | `none` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
||||
| WAL sync duration seconds | `histogram_quantile(0.99, sum by(le, type, node, instance, pod) (rate(raft_engine_sync_log_duration_seconds_bucket[$__rate_interval])))` | `timeseries` | Raft engine (local disk) log store sync latency, p99 | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-p99` |
|
||||
| Log Store op duration seconds | `histogram_quantile(0.99, sum by(le,logstore,optype,instance, pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))` | `timeseries` | Write-ahead log operations latency at p99 | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{logstore}}]-[{{optype}}]-p99` |
|
||||
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `none` | `prometheus` | `[{{instance}}]-[{{pod}}]` |
|
||||
# OpenDAL
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| QPS per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{}[$__rate_interval]))` | `timeseries` | QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Read QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="read"}[$__rate_interval]))` | `timeseries` | Read QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| Read P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{operation="read"}[$__rate_interval])))` | `timeseries` | Read P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
|
||||
| Write QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="write"}[$__rate_interval]))` | `timeseries` | Write QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
|
||||
| Write P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{ operation="write"}[$__rate_interval])))` | `timeseries` | Write P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| List QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="list"}[$__rate_interval]))` | `timeseries` | List QPS per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| List P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{ operation="list"}[$__rate_interval])))` | `timeseries` | List P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| Other Requests per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{operation!~"read\|write\|list\|stat"}[$__rate_interval]))` | `timeseries` | Other Requests per Instance. | `prometheus` | `ops` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{ operation!~"read\|write\|list"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| OpenDAL errors per Instance | `sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{ error!="NotFound"}[$__rate_interval]))` | `timeseries` | OpenDAL error counts per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]` |
|
||||
| QPS per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{}[$__rate_interval]))` | `timeseries` | QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Read QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="read"}[$__rate_interval]))` | `timeseries` | Read QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| Read P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{operation="read"}[$__rate_interval])))` | `timeseries` | Read P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
|
||||
| Write QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="write"}[$__rate_interval]))` | `timeseries` | Write QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-{{scheme}}` |
|
||||
| Write P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{ operation="write"}[$__rate_interval])))` | `timeseries` | Write P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| List QPS per Instance | `sum by(instance, pod, scheme) (rate(opendal_operation_duration_seconds_count{ operation="list"}[$__rate_interval]))` | `timeseries` | List QPS per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| List P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{ operation="list"}[$__rate_interval])))` | `timeseries` | List P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]` |
|
||||
| Other Requests per Instance | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{operation!~"read\|write\|list\|stat"}[$__rate_interval]))` | `timeseries` | Other Requests per Instance. | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{ operation!~"read\|write\|list"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `s` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `ops` | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
# Metasrv
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Region migration datanode | `greptime_meta_region_migration_stat{datanode_type="src"}`<br/>`greptime_meta_region_migration_stat{datanode_type="desc"}` | `state-timeline` | Counter of region migration by source and destination | `prometheus` | `none` | `from-datanode-{{datanode_id}}` |
|
||||
| Region migration error | `greptime_meta_region_migration_error` | `timeseries` | Counter of region migration error | `prometheus` | `none` | `__auto` |
|
||||
| Datanode load | `greptime_datanode_load` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `__auto` |
|
||||
| Region migration datanode | `greptime_meta_region_migration_stat{datanode_type="src"}`<br/>`greptime_meta_region_migration_stat{datanode_type="desc"}` | `state-timeline` | Counter of region migration by source and destination | `none` | `prometheus` | `from-datanode-{{datanode_id}}` |
|
||||
| Region migration error | `greptime_meta_region_migration_error` | `timeseries` | Counter of region migration error | `none` | `prometheus` | `__auto` |
|
||||
| Datanode load | `greptime_datanode_load` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `none` | `prometheus` | `__auto` |
|
||||
# Flownode
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Flow Ingest / Output Rate | `sum by(instance, pod, direction) (rate(greptime_flow_processed_rows[$__rate_interval]))` | `timeseries` | Flow Ingest / Output Rate. | `prometheus` | -- | `[{{pod}}]-[{{instance}}]-[{{direction}}]` |
|
||||
| Flow Ingest Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))` | `timeseries` | Flow Ingest Latency. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-p95` |
|
||||
| Flow Operation Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))` | `timeseries` | Flow Operation Latency. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{type}}]-p95` |
|
||||
| Flow Buffer Size per Instance | `greptime_flow_input_buf_size` | `timeseries` | Flow Buffer Size per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}]` |
|
||||
| Flow Processing Error per Instance | `sum by(instance,pod,code) (rate(greptime_flow_errors[$__rate_interval]))` | `timeseries` | Flow Processing Error per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{code}}]` |
|
||||
| Flow Ingest / Output Rate | `sum by(instance, pod, direction) (rate(greptime_flow_processed_rows[$__rate_interval]))` | `timeseries` | Flow Ingest / Output Rate. | -- | `prometheus` | `[{{pod}}]-[{{instance}}]-[{{direction}}]` |
|
||||
| Flow Ingest Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_insert_elapsed_bucket[$__rate_interval])) by (le, instance, pod))` | `timeseries` | Flow Ingest Latency. | -- | `prometheus` | `[{{instance}}]-[{{pod}}]-p95` |
|
||||
| Flow Operation Latency | `histogram_quantile(0.95, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))`<br/>`histogram_quantile(0.99, sum(rate(greptime_flow_processing_time_bucket[$__rate_interval])) by (le,instance,pod,type))` | `timeseries` | Flow Operation Latency. | -- | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{type}}]-p95` |
|
||||
| Flow Buffer Size per Instance | `greptime_flow_input_buf_size` | `timeseries` | Flow Buffer Size per Instance. | -- | `prometheus` | `[{{instance}}]-[{{pod}]` |
|
||||
| Flow Processing Error per Instance | `sum by(instance,pod,code) (rate(greptime_flow_errors[$__rate_interval]))` | `timeseries` | Flow Processing Error per Instance. | -- | `prometheus` | `[{{instance}}]-[{{pod}}]-[{{code}}]` |
|
||||
|
||||
@@ -426,6 +426,7 @@ groups:
|
||||
- title: Write Stall per Instance
|
||||
type: timeseries
|
||||
description: Write Stall per Instance.
|
||||
unit: decbytes
|
||||
queries:
|
||||
- expr: sum by(instance, pod) (greptime_mito_write_stall_total{})
|
||||
datasource:
|
||||
@@ -657,22 +658,13 @@ groups:
|
||||
- title: Opendal traffic
|
||||
type: timeseries
|
||||
description: Total traffic as in bytes by instance and operation
|
||||
unit: decbytes
|
||||
unit: ops
|
||||
queries:
|
||||
- expr: sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{}[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]'
|
||||
- title: OpenDAL errors per Instance
|
||||
type: timeseries
|
||||
description: OpenDAL error counts per Instance.
|
||||
queries:
|
||||
- expr: sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{ error!="NotFound"}[$__rate_interval]))
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]'
|
||||
- title: Metasrv
|
||||
panels:
|
||||
- title: Region migration datanode
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
CLUSTER_DASHBOARD_DIR=${1:-grafana/dashboards/cluster}
|
||||
STANDALONE_DASHBOARD_DIR=${2:-grafana/dashboards/standalone}
|
||||
DAC_IMAGE=ghcr.io/zyy17/dac:20250423-522bd35
|
||||
DAC_IMAGE=ghcr.io/zyy17/dac:20250422-c9435ce
|
||||
|
||||
remove_instance_filters() {
|
||||
# Remove the instance filters for the standalone dashboards.
|
||||
@@ -10,15 +10,8 @@ remove_instance_filters() {
|
||||
}
|
||||
|
||||
generate_intermediate_dashboards_and_docs() {
|
||||
docker run -v ${PWD}:/greptimedb --rm ${DAC_IMAGE} \
|
||||
-i /greptimedb/$CLUSTER_DASHBOARD_DIR/dashboard.json \
|
||||
-o /greptimedb/$CLUSTER_DASHBOARD_DIR/dashboard.yaml \
|
||||
-m /greptimedb/$CLUSTER_DASHBOARD_DIR/dashboard.md
|
||||
|
||||
docker run -v ${PWD}:/greptimedb --rm ${DAC_IMAGE} \
|
||||
-i /greptimedb/$STANDALONE_DASHBOARD_DIR/dashboard.json \
|
||||
-o /greptimedb/$STANDALONE_DASHBOARD_DIR/dashboard.yaml \
|
||||
-m /greptimedb/$STANDALONE_DASHBOARD_DIR/dashboard.md
|
||||
docker run -v ${PWD}:/greptimedb --rm ${DAC_IMAGE} -i /greptimedb/$CLUSTER_DASHBOARD_DIR/dashboard.json -o /greptimedb/$CLUSTER_DASHBOARD_DIR/dashboard.yaml -m > $CLUSTER_DASHBOARD_DIR/dashboard.md
|
||||
docker run -v ${PWD}:/greptimedb --rm ${DAC_IMAGE} -i /greptimedb/$STANDALONE_DASHBOARD_DIR/dashboard.json -o /greptimedb/$STANDALONE_DASHBOARD_DIR/dashboard.yaml -m > $STANDALONE_DASHBOARD_DIR/dashboard.md
|
||||
}
|
||||
|
||||
remove_instance_filters
|
||||
|
||||
@@ -132,7 +132,7 @@ impl SubCommand {
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Parser)]
|
||||
pub struct StartCommand {
|
||||
struct StartCommand {
|
||||
/// The address to bind the gRPC server.
|
||||
#[clap(long, alias = "bind-addr")]
|
||||
rpc_bind_addr: Option<String>,
|
||||
@@ -172,7 +172,7 @@ pub struct StartCommand {
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<MetasrvOptions> {
|
||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<MetasrvOptions> {
|
||||
let mut opts = MetasrvOptions::load_layered_options(
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
@@ -261,7 +261,7 @@ impl StartCommand {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn build(&self, opts: MetasrvOptions) -> Result<Instance> {
|
||||
async fn build(&self, opts: MetasrvOptions) -> Result<Instance> {
|
||||
common_runtime::init_global_runtimes(&opts.runtime);
|
||||
|
||||
let guard = common_telemetry::init_global_logging(
|
||||
|
||||
@@ -56,8 +56,8 @@ use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||
use datanode::region_server::RegionServer;
|
||||
use file_engine::config::EngineConfig as FileEngineConfig;
|
||||
use flow::{
|
||||
FlowConfig, FlownodeBuilder, FlownodeInstance, FlownodeOptions, FrontendClient,
|
||||
FrontendInvoker, GrpcQueryHandlerWithBoxedError, StreamingEngine,
|
||||
FlowConfig, FlowStreamingEngine, FlownodeBuilder, FlownodeInstance, FlownodeOptions,
|
||||
FrontendClient, FrontendInvoker, GrpcQueryHandlerWithBoxedError,
|
||||
};
|
||||
use frontend::frontend::{Frontend, FrontendOptions};
|
||||
use frontend::instance::builder::FrontendBuilder;
|
||||
@@ -544,9 +544,9 @@ impl StartCommand {
|
||||
|
||||
// set the ref to query for the local flow state
|
||||
{
|
||||
let flow_streaming_engine = flownode.flow_engine().streaming_engine();
|
||||
let flow_worker_manager = flownode.flow_engine().streaming_engine();
|
||||
information_extension
|
||||
.set_flow_streaming_engine(flow_streaming_engine)
|
||||
.set_flow_worker_manager(flow_worker_manager)
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -615,10 +615,10 @@ impl StartCommand {
|
||||
.replace(weak_grpc_handler);
|
||||
|
||||
// set the frontend invoker for flownode
|
||||
let flow_streaming_engine = flownode.flow_engine().streaming_engine();
|
||||
let flow_worker_manager = flownode.flow_engine().streaming_engine();
|
||||
// flow server need to be able to use frontend to write insert requests back
|
||||
let invoker = FrontendInvoker::build_from(
|
||||
flow_streaming_engine.clone(),
|
||||
flow_worker_manager.clone(),
|
||||
catalog_manager.clone(),
|
||||
kv_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
@@ -627,7 +627,7 @@ impl StartCommand {
|
||||
)
|
||||
.await
|
||||
.context(error::StartFlownodeSnafu)?;
|
||||
flow_streaming_engine.set_frontend_invoker(invoker).await;
|
||||
flow_worker_manager.set_frontend_invoker(invoker).await;
|
||||
|
||||
let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
|
||||
.context(error::ServersSnafu)?;
|
||||
@@ -703,7 +703,7 @@ pub struct StandaloneInformationExtension {
|
||||
region_server: RegionServer,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
start_time_ms: u64,
|
||||
flow_streaming_engine: RwLock<Option<Arc<StreamingEngine>>>,
|
||||
flow_worker_manager: RwLock<Option<Arc<FlowStreamingEngine>>>,
|
||||
}
|
||||
|
||||
impl StandaloneInformationExtension {
|
||||
@@ -712,14 +712,14 @@ impl StandaloneInformationExtension {
|
||||
region_server,
|
||||
procedure_manager,
|
||||
start_time_ms: common_time::util::current_time_millis() as u64,
|
||||
flow_streaming_engine: RwLock::new(None),
|
||||
flow_worker_manager: RwLock::new(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the flow streaming engine for the standalone instance.
|
||||
pub async fn set_flow_streaming_engine(&self, flow_streaming_engine: Arc<StreamingEngine>) {
|
||||
let mut guard = self.flow_streaming_engine.write().await;
|
||||
*guard = Some(flow_streaming_engine);
|
||||
/// Set the flow worker manager for the standalone instance.
|
||||
pub async fn set_flow_worker_manager(&self, flow_worker_manager: Arc<FlowStreamingEngine>) {
|
||||
let mut guard = self.flow_worker_manager.write().await;
|
||||
*guard = Some(flow_worker_manager);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -798,7 +798,7 @@ impl InformationExtension for StandaloneInformationExtension {
|
||||
|
||||
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error> {
|
||||
Ok(Some(
|
||||
self.flow_streaming_engine
|
||||
self.flow_worker_manager
|
||||
.read()
|
||||
.await
|
||||
.as_ref()
|
||||
|
||||
@@ -74,7 +74,6 @@ fn test_load_datanode_example_config() {
|
||||
RegionEngineConfig::File(FileEngineConfig {}),
|
||||
RegionEngineConfig::Metric(MetricEngineConfig {
|
||||
experimental_sparse_primary_key_encoding: false,
|
||||
flush_metadata_region_interval: Duration::from_secs(30),
|
||||
}),
|
||||
],
|
||||
logging: LoggingOptions {
|
||||
@@ -217,7 +216,6 @@ fn test_load_standalone_example_config() {
|
||||
RegionEngineConfig::File(FileEngineConfig {}),
|
||||
RegionEngineConfig::Metric(MetricEngineConfig {
|
||||
experimental_sparse_primary_key_encoding: false,
|
||||
flush_metadata_region_interval: Duration::from_secs(30),
|
||||
}),
|
||||
],
|
||||
storage: StorageConfig {
|
||||
|
||||
@@ -115,13 +115,6 @@ impl Function for UddSketchCalcFunction {
|
||||
}
|
||||
};
|
||||
|
||||
// Check if the sketch is empty, if so, return null
|
||||
// This is important to avoid panics when calling estimate_quantile on an empty sketch
|
||||
// In practice, this will happen if input is all null
|
||||
if sketch.bucket_iter().count() == 0 {
|
||||
builder.push_null();
|
||||
continue;
|
||||
}
|
||||
// Compute the estimated quantile from the sketch
|
||||
let result = sketch.estimate_quantile(perc);
|
||||
builder.push(Some(result));
|
||||
|
||||
@@ -187,7 +187,6 @@ mod tests {
|
||||
},
|
||||
flownode_ids: BTreeMap::from([(0, 1), (1, 2), (2, 3)]),
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
query_context: None,
|
||||
flow_name: "my_flow".to_string(),
|
||||
raw_sql: "sql".to_string(),
|
||||
expire_after: Some(300),
|
||||
|
||||
@@ -449,7 +449,6 @@ impl From<&CreateFlowData> for (FlowInfoValue, Vec<(FlowPartitionId, FlowRouteVa
|
||||
sink_table_name,
|
||||
flownode_ids,
|
||||
catalog_name,
|
||||
query_context: Some(value.query_context.clone()),
|
||||
flow_name,
|
||||
raw_sql: sql,
|
||||
expire_after,
|
||||
|
||||
@@ -790,14 +790,6 @@ pub enum Error {
|
||||
#[snafu(source)]
|
||||
source: common_procedure::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse timezone"))]
|
||||
InvalidTimeZone {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: common_time::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -869,8 +861,7 @@ impl ErrorExt for Error {
|
||||
| InvalidSetDatabaseOption { .. }
|
||||
| InvalidUnsetDatabaseOption { .. }
|
||||
| InvalidTopicNamePrefix { .. }
|
||||
| InvalidTimeZone { .. } => StatusCode::InvalidArguments,
|
||||
InvalidFlowRequestBody { .. } => StatusCode::InvalidArguments,
|
||||
| InvalidFlowRequestBody { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
FlowNotFound { .. } => StatusCode::FlowNotFound,
|
||||
FlowRouteNotFound { .. } => StatusCode::Unexpected,
|
||||
|
||||
@@ -452,7 +452,6 @@ mod tests {
|
||||
};
|
||||
FlowInfoValue {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
query_context: None,
|
||||
flow_name: flow_name.to_string(),
|
||||
source_table_ids,
|
||||
sink_table_name,
|
||||
@@ -626,7 +625,6 @@ mod tests {
|
||||
};
|
||||
let flow_value = FlowInfoValue {
|
||||
catalog_name: "greptime".to_string(),
|
||||
query_context: None,
|
||||
flow_name: "flow".to_string(),
|
||||
source_table_ids: vec![1024, 1025, 1026],
|
||||
sink_table_name: another_sink_table_name,
|
||||
@@ -866,7 +864,6 @@ mod tests {
|
||||
};
|
||||
let flow_value = FlowInfoValue {
|
||||
catalog_name: "greptime".to_string(),
|
||||
query_context: None,
|
||||
flow_name: "flow".to_string(),
|
||||
source_table_ids: vec![1024, 1025, 1026],
|
||||
sink_table_name: another_sink_table_name,
|
||||
|
||||
@@ -121,13 +121,6 @@ pub struct FlowInfoValue {
|
||||
pub(crate) flownode_ids: BTreeMap<FlowPartitionId, FlownodeId>,
|
||||
/// The catalog name.
|
||||
pub(crate) catalog_name: String,
|
||||
/// The query context used when create flow.
|
||||
/// Although flow doesn't belong to any schema, this query_context is needed to remember
|
||||
/// the query context when `create_flow` is executed
|
||||
/// for recovering flow using the same sql&query_context after db restart.
|
||||
/// if none, should use default query context
|
||||
#[serde(default)]
|
||||
pub(crate) query_context: Option<crate::rpc::ddl::QueryContext>,
|
||||
/// The flow name.
|
||||
pub(crate) flow_name: String,
|
||||
/// The raw sql.
|
||||
@@ -162,10 +155,6 @@ impl FlowInfoValue {
|
||||
&self.catalog_name
|
||||
}
|
||||
|
||||
pub fn query_context(&self) -> &Option<crate::rpc::ddl::QueryContext> {
|
||||
&self.query_context
|
||||
}
|
||||
|
||||
pub fn flow_name(&self) -> &String {
|
||||
&self.flow_name
|
||||
}
|
||||
|
||||
@@ -113,10 +113,8 @@ impl LeaderRegionManifestInfo {
|
||||
pub fn prunable_entry_id(&self) -> u64 {
|
||||
match self {
|
||||
LeaderRegionManifestInfo::Mito {
|
||||
flushed_entry_id,
|
||||
topic_latest_entry_id,
|
||||
..
|
||||
} => (*flushed_entry_id).max(*topic_latest_entry_id),
|
||||
flushed_entry_id, ..
|
||||
} => *flushed_entry_id,
|
||||
LeaderRegionManifestInfo::Metric {
|
||||
data_flushed_entry_id,
|
||||
data_topic_latest_entry_id,
|
||||
|
||||
@@ -35,20 +35,17 @@ use api::v1::{
|
||||
};
|
||||
use base64::engine::general_purpose;
|
||||
use base64::Engine as _;
|
||||
use common_time::{DatabaseTimeToLive, Timezone};
|
||||
use common_time::DatabaseTimeToLive;
|
||||
use prost::Message;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_with::{serde_as, DefaultOnNull};
|
||||
use session::context::{QueryContextBuilder, QueryContextRef};
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
use table::table_name::TableName;
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::error::{
|
||||
self, InvalidSetDatabaseOptionSnafu, InvalidTimeZoneSnafu, InvalidUnsetDatabaseOptionSnafu,
|
||||
Result,
|
||||
};
|
||||
use crate::error::{self, InvalidSetDatabaseOptionSnafu, InvalidUnsetDatabaseOptionSnafu, Result};
|
||||
use crate::key::FlowId;
|
||||
|
||||
/// DDL tasks
|
||||
@@ -1205,7 +1202,7 @@ impl From<DropFlowTask> for PbDropFlowTask {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct QueryContext {
|
||||
current_catalog: String,
|
||||
current_schema: String,
|
||||
@@ -1226,19 +1223,6 @@ impl From<QueryContextRef> for QueryContext {
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<QueryContext> for session::context::QueryContext {
|
||||
type Error = error::Error;
|
||||
fn try_from(value: QueryContext) -> std::result::Result<Self, Self::Error> {
|
||||
Ok(QueryContextBuilder::default()
|
||||
.current_catalog(value.current_catalog)
|
||||
.current_schema(value.current_schema)
|
||||
.timezone(Timezone::from_tz_string(&value.timezone).context(InvalidTimeZoneSnafu)?)
|
||||
.extensions(value.extensions)
|
||||
.channel((value.channel as u32).into())
|
||||
.build())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<QueryContext> for PbQueryContext {
|
||||
fn from(
|
||||
QueryContext {
|
||||
|
||||
@@ -57,9 +57,9 @@ use tokio::sync::Notify;
|
||||
|
||||
use crate::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
|
||||
use crate::error::{
|
||||
self, BuildMetricEngineSnafu, BuildMitoEngineSnafu, CreateDirSnafu, GetMetadataSnafu,
|
||||
MissingCacheSnafu, MissingKvBackendSnafu, MissingNodeIdSnafu, OpenLogStoreSnafu, Result,
|
||||
ShutdownInstanceSnafu, ShutdownServerSnafu, StartServerSnafu,
|
||||
self, BuildMitoEngineSnafu, CreateDirSnafu, GetMetadataSnafu, MissingCacheSnafu,
|
||||
MissingKvBackendSnafu, MissingNodeIdSnafu, OpenLogStoreSnafu, Result, ShutdownInstanceSnafu,
|
||||
ShutdownServerSnafu, StartServerSnafu,
|
||||
};
|
||||
use crate::event_listener::{
|
||||
new_region_server_event_channel, NoopRegionServerEventListener, RegionServerEventListenerRef,
|
||||
@@ -416,11 +416,10 @@ impl DatanodeBuilder {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let metric_engine = MetricEngine::try_new(
|
||||
let metric_engine = MetricEngine::new(
|
||||
mito_engine.clone(),
|
||||
metric_engine_config.take().unwrap_or_default(),
|
||||
)
|
||||
.context(BuildMetricEngineSnafu)?;
|
||||
);
|
||||
engines.push(Arc::new(mito_engine) as _);
|
||||
engines.push(Arc::new(metric_engine) as _);
|
||||
}
|
||||
|
||||
@@ -336,13 +336,6 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build metric engine"))]
|
||||
BuildMetricEngine {
|
||||
source: metric_engine::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to serialize options to TOML"))]
|
||||
TomlFormat {
|
||||
#[snafu(implicit)]
|
||||
@@ -459,7 +452,6 @@ impl ErrorExt for Error {
|
||||
|
||||
FindLogicalRegions { source, .. } => source.status_code(),
|
||||
BuildMitoEngine { source, .. } => source.status_code(),
|
||||
BuildMetricEngine { source, .. } => source.status_code(),
|
||||
ConcurrentQueryLimiterClosed { .. } | ConcurrentQueryLimiterTimeout { .. } => {
|
||||
StatusCode::RegionBusy
|
||||
}
|
||||
|
||||
@@ -135,13 +135,14 @@ impl Configurable for FlownodeOptions {
|
||||
}
|
||||
|
||||
/// Arc-ed FlowNodeManager, cheaper to clone
|
||||
pub type FlowStreamingEngineRef = Arc<StreamingEngine>;
|
||||
pub type FlowWorkerManagerRef = Arc<FlowStreamingEngine>;
|
||||
|
||||
/// FlowNodeManager manages the state of all tasks in the flow node, which should be run on the same thread
|
||||
///
|
||||
/// The choice of timestamp is just using current system timestamp for now
|
||||
///
|
||||
pub struct StreamingEngine {
|
||||
/// TODO(discord9): rename to FlowStreamingEngine
|
||||
pub struct FlowStreamingEngine {
|
||||
/// The handler to the worker that will run the dataflow
|
||||
/// which is `!Send` so a handle is used
|
||||
pub worker_handles: Vec<WorkerHandle>,
|
||||
@@ -170,7 +171,7 @@ pub struct StreamingEngine {
|
||||
}
|
||||
|
||||
/// Building FlownodeManager
|
||||
impl StreamingEngine {
|
||||
impl FlowStreamingEngine {
|
||||
/// set frontend invoker
|
||||
pub async fn set_frontend_invoker(&self, frontend: FrontendInvoker) {
|
||||
*self.frontend_invoker.write().await = Some(frontend);
|
||||
@@ -189,7 +190,7 @@ impl StreamingEngine {
|
||||
let node_context = FlownodeContext::new(Box::new(srv_map.clone()) as _);
|
||||
let tick_manager = FlowTickManager::new();
|
||||
let worker_handles = Vec::new();
|
||||
StreamingEngine {
|
||||
FlowStreamingEngine {
|
||||
worker_handles,
|
||||
worker_selector: Mutex::new(0),
|
||||
query_engine,
|
||||
@@ -265,7 +266,7 @@ pub fn batches_to_rows_req(batches: Vec<Batch>) -> Result<Vec<DiffRequest>, Erro
|
||||
}
|
||||
|
||||
/// This impl block contains methods to send writeback requests to frontend
|
||||
impl StreamingEngine {
|
||||
impl FlowStreamingEngine {
|
||||
/// Return the number of requests it made
|
||||
pub async fn send_writeback_requests(&self) -> Result<usize, Error> {
|
||||
let all_reqs = self.generate_writeback_request().await?;
|
||||
@@ -536,7 +537,7 @@ impl StreamingEngine {
|
||||
}
|
||||
|
||||
/// Flow Runtime related methods
|
||||
impl StreamingEngine {
|
||||
impl FlowStreamingEngine {
|
||||
/// Start state report handler, which will receive a sender from HeartbeatTask to send state size report back
|
||||
///
|
||||
/// if heartbeat task is shutdown, this future will exit too
|
||||
@@ -661,7 +662,7 @@ impl StreamingEngine {
|
||||
}
|
||||
// flow is now shutdown, drop frontend_invoker early so a ref cycle(in standalone mode) can be prevent:
|
||||
// FlowWorkerManager.frontend_invoker -> FrontendInvoker.inserter
|
||||
// -> Inserter.node_manager -> NodeManager.flownode -> Flownode.flow_streaming_engine.frontend_invoker
|
||||
// -> Inserter.node_manager -> NodeManager.flownode -> Flownode.flow_worker_manager.frontend_invoker
|
||||
self.frontend_invoker.write().await.take();
|
||||
}
|
||||
|
||||
@@ -730,7 +731,7 @@ impl StreamingEngine {
|
||||
}
|
||||
|
||||
/// Create&Remove flow
|
||||
impl StreamingEngine {
|
||||
impl FlowStreamingEngine {
|
||||
/// remove a flow by it's id
|
||||
pub async fn remove_flow_inner(&self, flow_id: FlowId) -> Result<(), Error> {
|
||||
for handle in self.worker_handles.iter() {
|
||||
@@ -748,6 +749,7 @@ impl StreamingEngine {
|
||||
/// steps to create task:
|
||||
/// 1. parse query into typed plan(and optional parse expire_after expr)
|
||||
/// 2. render source/sink with output table id and used input table id
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn create_flow_inner(&self, args: CreateFlowArgs) -> Result<Option<FlowId>, Error> {
|
||||
let CreateFlowArgs {
|
||||
flow_id,
|
||||
|
||||
@@ -35,7 +35,7 @@ use snafu::{ensure, IntoError, OptionExt, ResultExt};
|
||||
use store_api::storage::{RegionId, TableId};
|
||||
use tokio::sync::{Mutex, RwLock};
|
||||
|
||||
use crate::adapter::{CreateFlowArgs, StreamingEngine};
|
||||
use crate::adapter::{CreateFlowArgs, FlowStreamingEngine};
|
||||
use crate::batching_mode::engine::BatchingEngine;
|
||||
use crate::engine::FlowEngine;
|
||||
use crate::error::{
|
||||
@@ -55,7 +55,7 @@ pub type FlowDualEngineRef = Arc<FlowDualEngine>;
|
||||
/// including create/drop/flush flow
|
||||
/// and redirect insert requests to the appropriate engine
|
||||
pub struct FlowDualEngine {
|
||||
streaming_engine: Arc<StreamingEngine>,
|
||||
streaming_engine: Arc<FlowStreamingEngine>,
|
||||
batching_engine: Arc<BatchingEngine>,
|
||||
/// helper struct for faster query flow by table id or vice versa
|
||||
src_table2flow: RwLock<SrcTableToFlow>,
|
||||
@@ -66,7 +66,7 @@ pub struct FlowDualEngine {
|
||||
|
||||
impl FlowDualEngine {
|
||||
pub fn new(
|
||||
streaming_engine: Arc<StreamingEngine>,
|
||||
streaming_engine: Arc<FlowStreamingEngine>,
|
||||
batching_engine: Arc<BatchingEngine>,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
catalog_manager: Arc<dyn CatalogManager>,
|
||||
@@ -81,7 +81,7 @@ impl FlowDualEngine {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn streaming_engine(&self) -> Arc<StreamingEngine> {
|
||||
pub fn streaming_engine(&self) -> Arc<FlowStreamingEngine> {
|
||||
self.streaming_engine.clone()
|
||||
}
|
||||
|
||||
@@ -225,24 +225,11 @@ impl FlowDualEngine {
|
||||
comment: Some(info.comment().clone()),
|
||||
sql: info.raw_sql().clone(),
|
||||
flow_options: info.options().clone(),
|
||||
query_ctx: info
|
||||
.query_context()
|
||||
.clone()
|
||||
.map(|ctx| {
|
||||
ctx.try_into()
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)
|
||||
})
|
||||
.transpose()?
|
||||
// or use default QueryContext with catalog_name from info
|
||||
// to keep compatibility with old version
|
||||
.or_else(|| {
|
||||
Some(
|
||||
QueryContextBuilder::default()
|
||||
.current_catalog(info.catalog_name().to_string())
|
||||
.build(),
|
||||
)
|
||||
}),
|
||||
query_ctx: Some(
|
||||
QueryContextBuilder::default()
|
||||
.current_catalog(info.catalog_name().clone())
|
||||
.build(),
|
||||
),
|
||||
};
|
||||
if let Err(err) = self
|
||||
.create_flow(args)
|
||||
@@ -313,12 +300,11 @@ impl FlowDualEngine {
|
||||
}
|
||||
);
|
||||
|
||||
check_task.take().unwrap().stop().await?;
|
||||
check_task.take().expect("Already checked").stop().await?;
|
||||
info!("Stopped flow consistent check task");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// TODO(discord9): also add a `exists` api using flow metadata manager's `exists` method
|
||||
async fn flow_exist_in_metadata(&self, flow_id: FlowId) -> Result<bool, Error> {
|
||||
self.flow_metadata_manager
|
||||
.flow_info_manager()
|
||||
@@ -346,24 +332,23 @@ impl ConsistentCheckTask {
|
||||
let (trigger_tx, mut trigger_rx) =
|
||||
tokio::sync::mpsc::channel::<(bool, bool, tokio::sync::oneshot::Sender<()>)>(10);
|
||||
let handle = common_runtime::spawn_global(async move {
|
||||
let (mut allow_create, mut allow_drop) = (false, false);
|
||||
let mut args = (false, false);
|
||||
let mut ret_signal: Option<tokio::sync::oneshot::Sender<()>> = None;
|
||||
loop {
|
||||
if let Err(err) = inner.check_flow_consistent(allow_create, allow_drop).await {
|
||||
if let Err(err) = inner.check_flow_consistent(args.0, args.1).await {
|
||||
error!(err; "Failed to check flow consistent");
|
||||
}
|
||||
if let Some(done) = ret_signal.take() {
|
||||
let _ = done.send(());
|
||||
}
|
||||
|
||||
tokio::select! {
|
||||
_ = rx.recv() => break,
|
||||
incoming = trigger_rx.recv() => if let Some(incoming) = incoming {
|
||||
(allow_create, allow_drop) = (incoming.0, incoming.1);
|
||||
args = (incoming.0, incoming.1);
|
||||
ret_signal = Some(incoming.2);
|
||||
},
|
||||
_ = tokio::time::sleep(std::time::Duration::from_secs(10)) => {
|
||||
(allow_create, allow_drop) = (false, false);
|
||||
},
|
||||
_ = tokio::time::sleep(std::time::Duration::from_secs(10)) => args=(false,false),
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -707,7 +692,7 @@ fn to_meta_err(
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl common_meta::node_manager::Flownode for StreamingEngine {
|
||||
impl common_meta::node_manager::Flownode for FlowStreamingEngine {
|
||||
async fn handle(&self, request: FlowRequest) -> MetaResult<FlowResponse> {
|
||||
let query_ctx = request
|
||||
.header
|
||||
@@ -793,7 +778,7 @@ impl common_meta::node_manager::Flownode for StreamingEngine {
|
||||
}
|
||||
}
|
||||
|
||||
impl FlowEngine for StreamingEngine {
|
||||
impl FlowEngine for FlowStreamingEngine {
|
||||
async fn create_flow(&self, args: CreateFlowArgs) -> Result<Option<FlowId>, Error> {
|
||||
self.create_flow_inner(args).await
|
||||
}
|
||||
@@ -845,7 +830,7 @@ impl FetchFromRow {
|
||||
}
|
||||
}
|
||||
|
||||
impl StreamingEngine {
|
||||
impl FlowStreamingEngine {
|
||||
async fn handle_inserts_inner(
|
||||
&self,
|
||||
request: InsertRequests,
|
||||
|
||||
@@ -31,7 +31,7 @@ use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::metadata::TableId;
|
||||
|
||||
use crate::adapter::table_source::ManagedTableSource;
|
||||
use crate::adapter::{FlowId, FlowStreamingEngineRef, StreamingEngine};
|
||||
use crate::adapter::{FlowId, FlowStreamingEngine, FlowWorkerManagerRef};
|
||||
use crate::error::{FlowNotFoundSnafu, JoinTaskSnafu, UnexpectedSnafu};
|
||||
use crate::expr::error::ExternalSnafu;
|
||||
use crate::expr::utils::find_plan_time_window_expr_lower_bound;
|
||||
@@ -39,10 +39,10 @@ use crate::repr::RelationDesc;
|
||||
use crate::server::get_all_flow_ids;
|
||||
use crate::{Error, FrontendInvoker};
|
||||
|
||||
impl StreamingEngine {
|
||||
impl FlowStreamingEngine {
|
||||
/// Create and start refill flow tasks in background
|
||||
pub async fn create_and_start_refill_flow_tasks(
|
||||
self: &FlowStreamingEngineRef,
|
||||
self: &FlowWorkerManagerRef,
|
||||
flow_metadata_manager: &FlowMetadataManagerRef,
|
||||
catalog_manager: &CatalogManagerRef,
|
||||
) -> Result<(), Error> {
|
||||
@@ -130,7 +130,7 @@ impl StreamingEngine {
|
||||
|
||||
/// Starting to refill flows, if any error occurs, will rebuild the flow and retry
|
||||
pub(crate) async fn starting_refill_flows(
|
||||
self: &FlowStreamingEngineRef,
|
||||
self: &FlowWorkerManagerRef,
|
||||
tasks: Vec<RefillTask>,
|
||||
) -> Result<(), Error> {
|
||||
// TODO(discord9): add a back pressure mechanism
|
||||
@@ -266,7 +266,7 @@ impl TaskState<()> {
|
||||
fn start_running(
|
||||
&mut self,
|
||||
task_data: &TaskData,
|
||||
manager: FlowStreamingEngineRef,
|
||||
manager: FlowWorkerManagerRef,
|
||||
mut output_stream: SendableRecordBatchStream,
|
||||
) -> Result<(), Error> {
|
||||
let data = (*task_data).clone();
|
||||
@@ -383,7 +383,7 @@ impl RefillTask {
|
||||
/// Start running the task in background, non-blocking
|
||||
pub async fn start_running(
|
||||
&mut self,
|
||||
manager: FlowStreamingEngineRef,
|
||||
manager: FlowWorkerManagerRef,
|
||||
invoker: &FrontendInvoker,
|
||||
) -> Result<(), Error> {
|
||||
let TaskState::Prepared { sql } = &mut self.state else {
|
||||
|
||||
@@ -16,9 +16,9 @@ use std::collections::BTreeMap;
|
||||
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
|
||||
use crate::StreamingEngine;
|
||||
use crate::FlowStreamingEngine;
|
||||
|
||||
impl StreamingEngine {
|
||||
impl FlowStreamingEngine {
|
||||
pub async fn gen_state_report(&self) -> FlowStat {
|
||||
let mut full_report = BTreeMap::new();
|
||||
let mut last_exec_time_map = BTreeMap::new();
|
||||
|
||||
@@ -33,8 +33,8 @@ use crate::adapter::table_source::TableDesc;
|
||||
use crate::adapter::{TableName, WorkerHandle, AUTO_CREATED_PLACEHOLDER_TS_COL};
|
||||
use crate::error::{Error, ExternalSnafu, UnexpectedSnafu};
|
||||
use crate::repr::{ColumnType, RelationDesc, RelationType};
|
||||
use crate::StreamingEngine;
|
||||
impl StreamingEngine {
|
||||
use crate::FlowStreamingEngine;
|
||||
impl FlowStreamingEngine {
|
||||
/// Get a worker handle for creating flow, using round robin to select a worker
|
||||
pub(crate) async fn get_worker_handle_for_create_flow(&self) -> &WorkerHandle {
|
||||
let use_idx = {
|
||||
|
||||
@@ -32,9 +32,3 @@ pub const SLOW_QUERY_THRESHOLD: Duration = Duration::from_secs(60);
|
||||
|
||||
/// The minimum duration between two queries execution by batching mode task
|
||||
const MIN_REFRESH_DURATION: Duration = Duration::new(5, 0);
|
||||
|
||||
/// Grpc connection timeout
|
||||
const GRPC_CONN_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
|
||||
/// Grpc max retry number
|
||||
const GRPC_MAX_RETRIES: u32 = 3;
|
||||
|
||||
@@ -267,8 +267,7 @@ impl BatchingEngine {
|
||||
// also check table option to see if ttl!=instant
|
||||
let table_name = get_table_name(self.table_meta.table_info_manager(), &src_id).await?;
|
||||
let table_info = get_table_info(self.table_meta.table_info_manager(), &src_id).await?;
|
||||
ensure!(
|
||||
table_info.table_info.meta.options.ttl != Some(TimeToLive::Instant),
|
||||
if table_info.table_info.meta.options.ttl == Some(TimeToLive::Instant) {
|
||||
UnsupportedSnafu {
|
||||
reason: format!(
|
||||
"Source table `{}`(id={}) has instant TTL, Instant TTL is not supported under batching mode. Consider using a TTL longer than flush interval",
|
||||
@@ -276,8 +275,8 @@ impl BatchingEngine {
|
||||
src_id
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
.fail()?;
|
||||
}
|
||||
source_table_names.push(table_name);
|
||||
}
|
||||
|
||||
|
||||
@@ -25,15 +25,12 @@ use common_meta::cluster::{NodeInfo, NodeInfoKey, Role};
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::rpc::store::RangeRequest;
|
||||
use common_query::Output;
|
||||
use common_telemetry::warn;
|
||||
use meta_client::client::MetaClient;
|
||||
use servers::query_handler::grpc::GrpcQueryHandler;
|
||||
use session::context::{QueryContextBuilder, QueryContextRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::batching_mode::{
|
||||
DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT, GRPC_CONN_TIMEOUT, GRPC_MAX_RETRIES,
|
||||
};
|
||||
use crate::batching_mode::DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT;
|
||||
use crate::error::{ExternalSnafu, InvalidRequestSnafu, UnexpectedSnafu};
|
||||
use crate::Error;
|
||||
|
||||
@@ -82,6 +79,7 @@ pub enum FrontendClient {
|
||||
Standalone {
|
||||
/// for the sake of simplicity still use grpc even in standalone mode
|
||||
/// notice the client here should all be lazy, so that can wait after frontend is booted then make conn
|
||||
/// TODO(discord9): not use grpc under standalone mode
|
||||
database_client: HandlerMutable,
|
||||
},
|
||||
}
|
||||
@@ -102,9 +100,7 @@ impl FrontendClient {
|
||||
Self::Distributed {
|
||||
meta_client,
|
||||
chnl_mgr: {
|
||||
let cfg = ChannelConfig::new()
|
||||
.connect_timeout(GRPC_CONN_TIMEOUT)
|
||||
.timeout(DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT);
|
||||
let cfg = ChannelConfig::new().timeout(DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT);
|
||||
ChannelManager::with_config(cfg)
|
||||
},
|
||||
}
|
||||
@@ -228,32 +224,12 @@ impl FrontendClient {
|
||||
peer: db.peer.clone(),
|
||||
});
|
||||
|
||||
let mut retry = 0;
|
||||
|
||||
loop {
|
||||
let ret = db.database.handle(req.clone()).await.with_context(|_| {
|
||||
InvalidRequestSnafu {
|
||||
context: format!("Failed to handle request: {:?}", req),
|
||||
}
|
||||
});
|
||||
if let Err(err) = ret {
|
||||
if retry < GRPC_MAX_RETRIES {
|
||||
retry += 1;
|
||||
warn!(
|
||||
"Failed to send request to grpc handle at Peer={:?}, retry = {}, error = {:?}",
|
||||
db.peer, retry, err
|
||||
);
|
||||
continue;
|
||||
} else {
|
||||
common_telemetry::error!(
|
||||
"Failed to send request to grpc handle at Peer={:?} after {} retries, error = {:?}",
|
||||
db.peer, retry, err
|
||||
);
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
db.database
|
||||
.handle(req.clone())
|
||||
.await
|
||||
.with_context(|_| InvalidRequestSnafu {
|
||||
context: format!("Failed to handle request: {:?}", req),
|
||||
})
|
||||
}
|
||||
FrontendClient::Standalone { database_client } => {
|
||||
let ctx = QueryContextBuilder::default()
|
||||
|
||||
@@ -36,7 +36,7 @@ use operator::expr_helper::column_schemas_to_defs;
|
||||
use query::query_engine::DefaultSerializer;
|
||||
use query::QueryEngineRef;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||
use tokio::sync::oneshot;
|
||||
use tokio::sync::oneshot::error::TryRecvError;
|
||||
@@ -53,7 +53,6 @@ use crate::batching_mode::utils::{
|
||||
use crate::batching_mode::{
|
||||
DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT, MIN_REFRESH_DURATION, SLOW_QUERY_THRESHOLD,
|
||||
};
|
||||
use crate::df_optimizer::apply_df_optimizer;
|
||||
use crate::error::{
|
||||
ConvertColumnSchemaSnafu, DatafusionSnafu, ExternalSnafu, InvalidQuerySnafu,
|
||||
SubstraitEncodeLogicalPlanSnafu, UnexpectedSnafu,
|
||||
@@ -223,15 +222,15 @@ impl BatchingTask {
|
||||
.map(|c| c.name)
|
||||
.collect::<BTreeSet<_>>();
|
||||
for column in new_query.schema().columns() {
|
||||
ensure!(
|
||||
table_columns.contains(column.name()),
|
||||
InvalidQuerySnafu {
|
||||
if !table_columns.contains(column.name()) {
|
||||
return InvalidQuerySnafu {
|
||||
reason: format!(
|
||||
"Column {} not found in sink table with columns {:?}",
|
||||
column, table_columns
|
||||
),
|
||||
}
|
||||
);
|
||||
.fail();
|
||||
}
|
||||
}
|
||||
// update_at& time index placeholder (if exists) should have default value
|
||||
LogicalPlan::Dml(DmlStatement::new(
|
||||
@@ -542,10 +541,7 @@ impl BatchingTask {
|
||||
.clone()
|
||||
.rewrite(&mut add_auto_column)
|
||||
.with_context(|_| DatafusionSnafu {
|
||||
context: format!(
|
||||
"Failed to rewrite plan:\n {}\n",
|
||||
self.config.plan
|
||||
),
|
||||
context: format!("Failed to rewrite plan {:?}", self.config.plan),
|
||||
})?
|
||||
.data;
|
||||
let schema_len = plan.schema().fields().len();
|
||||
@@ -577,19 +573,16 @@ impl BatchingTask {
|
||||
|
||||
let mut add_filter = AddFilterRewriter::new(expr);
|
||||
let mut add_auto_column = AddAutoColumnRewriter::new(sink_table_schema.clone());
|
||||
|
||||
// make a not optimized plan for clearer unparse
|
||||
let plan = sql_to_df_plan(query_ctx.clone(), engine.clone(), &self.config.query, false)
|
||||
.await?;
|
||||
let rewrite = plan
|
||||
.clone()
|
||||
plan.clone()
|
||||
.rewrite(&mut add_filter)
|
||||
.and_then(|p| p.data.rewrite(&mut add_auto_column))
|
||||
.with_context(|_| DatafusionSnafu {
|
||||
context: format!("Failed to rewrite plan:\n {}\n", plan),
|
||||
context: format!("Failed to rewrite plan {plan:?}"),
|
||||
})?
|
||||
.data;
|
||||
// only apply optimize after complex rewrite is done
|
||||
apply_df_optimizer(rewrite).await?
|
||||
.data
|
||||
};
|
||||
|
||||
Ok(Some((new_plan, schema_len)))
|
||||
|
||||
@@ -704,28 +704,6 @@ mod test {
|
||||
),
|
||||
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('2025-02-24 10:48:00' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:49:00' AS TIMESTAMP))) GROUP BY arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)')"
|
||||
),
|
||||
// complex time window index with where
|
||||
(
|
||||
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE number in (2, 3, 4) GROUP BY time_window;",
|
||||
Timestamp::new(1740394109, TimeUnit::Second),
|
||||
(
|
||||
"ts".to_string(),
|
||||
Some(Timestamp::new(1740394080, TimeUnit::Second)),
|
||||
Some(Timestamp::new(1740394140, TimeUnit::Second)),
|
||||
),
|
||||
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE numbers_with_ts.number IN (2, 3, 4) AND ((ts >= CAST('2025-02-24 10:48:00' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:49:00' AS TIMESTAMP))) GROUP BY arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)')"
|
||||
),
|
||||
// complex time window index with between and
|
||||
(
|
||||
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE number BETWEEN 2 AND 4 GROUP BY time_window;",
|
||||
Timestamp::new(1740394109, TimeUnit::Second),
|
||||
(
|
||||
"ts".to_string(),
|
||||
Some(Timestamp::new(1740394080, TimeUnit::Second)),
|
||||
Some(Timestamp::new(1740394140, TimeUnit::Second)),
|
||||
),
|
||||
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE (numbers_with_ts.number BETWEEN 2 AND 4) AND ((ts >= CAST('2025-02-24 10:48:00' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:49:00' AS TIMESTAMP))) GROUP BY arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)')"
|
||||
),
|
||||
// no time index
|
||||
(
|
||||
"SELECT date_bin('5 minutes', ts) FROM numbers_with_ts;",
|
||||
|
||||
@@ -50,8 +50,8 @@ pub async fn get_table_info_df_schema(
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?
|
||||
.context(TableNotFoundSnafu {
|
||||
name: &full_table_name,
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
name: full_table_name.clone(),
|
||||
})?;
|
||||
let table_info = table.table_info().clone();
|
||||
|
||||
@@ -342,8 +342,8 @@ impl TreeNodeRewriter for AddAutoColumnRewriter {
|
||||
}
|
||||
} else {
|
||||
return Err(DataFusionError::Plan(format!(
|
||||
"Expect table have 0,1 or 2 columns more than query columns, found {} query columns {:?}, {} table columns {:?}",
|
||||
query_col_cnt, exprs, table_col_cnt, self.schema.column_schemas()
|
||||
"Expect table have 0,1 or 2 columns more than query columns, found {} query columns {:?}, {} table columns {:?} at node {:?}",
|
||||
query_col_cnt, exprs, table_col_cnt, self.schema.column_schemas(), node
|
||||
)));
|
||||
}
|
||||
|
||||
@@ -358,6 +358,8 @@ impl TreeNodeRewriter for AddAutoColumnRewriter {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(discord9): a method to found out the precise time window
|
||||
|
||||
/// Find out the `Filter` Node corresponding to innermost(deepest) `WHERE` and add a new filter expr to it
|
||||
#[derive(Debug)]
|
||||
pub struct AddFilterRewriter {
|
||||
@@ -406,9 +408,7 @@ mod test {
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, Schema};
|
||||
use pretty_assertions::assert_eq;
|
||||
use query::query_engine::DefaultSerializer;
|
||||
use session::context::QueryContext;
|
||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||
|
||||
use super::*;
|
||||
use crate::test_utils::create_test_query_engine;
|
||||
@@ -703,18 +703,4 @@ mod test {
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_null_cast() {
|
||||
let query_engine = create_test_query_engine();
|
||||
let ctx = QueryContext::arc();
|
||||
let sql = "SELECT NULL::DOUBLE FROM numbers_with_ts";
|
||||
let plan = sql_to_df_plan(ctx, query_engine.clone(), sql, false)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let _sub_plan = DFLogicalSubstraitConvertor {}
|
||||
.encode(&plan, DefaultSerializer)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ use datafusion::config::ConfigOptions;
|
||||
use datafusion::error::DataFusionError;
|
||||
use datafusion::functions_aggregate::count::count_udaf;
|
||||
use datafusion::functions_aggregate::sum::sum_udaf;
|
||||
use datafusion::optimizer::analyzer::count_wildcard_rule::CountWildcardRule;
|
||||
use datafusion::optimizer::analyzer::type_coercion::TypeCoercion;
|
||||
use datafusion::optimizer::common_subexpr_eliminate::CommonSubexprEliminate;
|
||||
use datafusion::optimizer::optimize_projections::OptimizeProjections;
|
||||
@@ -41,7 +42,6 @@ use datafusion_expr::{
|
||||
BinaryExpr, ColumnarValue, Expr, Operator, Projection, ScalarFunctionArgs, ScalarUDFImpl,
|
||||
Signature, TypeSignature, Volatility,
|
||||
};
|
||||
use query::optimizer::count_wildcard::CountWildcardToTimeIndexRule;
|
||||
use query::parser::QueryLanguageParser;
|
||||
use query::query_engine::DefaultSerializer;
|
||||
use query::QueryEngine;
|
||||
@@ -61,9 +61,9 @@ pub async fn apply_df_optimizer(
|
||||
) -> Result<datafusion_expr::LogicalPlan, Error> {
|
||||
let cfg = ConfigOptions::new();
|
||||
let analyzer = Analyzer::with_rules(vec![
|
||||
Arc::new(CountWildcardToTimeIndexRule),
|
||||
Arc::new(AvgExpandRule),
|
||||
Arc::new(TumbleExpandRule),
|
||||
Arc::new(CountWildcardRule::new()),
|
||||
Arc::new(AvgExpandRule::new()),
|
||||
Arc::new(TumbleExpandRule::new()),
|
||||
Arc::new(CheckGroupByRule::new()),
|
||||
Arc::new(TypeCoercion::new()),
|
||||
]);
|
||||
@@ -128,7 +128,13 @@ pub async fn sql_to_flow_plan(
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct AvgExpandRule;
|
||||
struct AvgExpandRule {}
|
||||
|
||||
impl AvgExpandRule {
|
||||
pub fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
}
|
||||
|
||||
impl AnalyzerRule for AvgExpandRule {
|
||||
fn analyze(
|
||||
@@ -325,7 +331,13 @@ impl TreeNodeRewriter for ExpandAvgRewriter<'_> {
|
||||
|
||||
/// expand tumble in aggr expr to tumble_start and tumble_end with column name like `window_start`
|
||||
#[derive(Debug)]
|
||||
struct TumbleExpandRule;
|
||||
struct TumbleExpandRule {}
|
||||
|
||||
impl TumbleExpandRule {
|
||||
pub fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
}
|
||||
|
||||
impl AnalyzerRule for TumbleExpandRule {
|
||||
fn analyze(
|
||||
|
||||
@@ -43,7 +43,7 @@ mod utils;
|
||||
#[cfg(test)]
|
||||
mod test_utils;
|
||||
|
||||
pub use adapter::{FlowConfig, FlowStreamingEngineRef, FlownodeOptions, StreamingEngine};
|
||||
pub use adapter::{FlowConfig, FlowStreamingEngine, FlowWorkerManagerRef, FlownodeOptions};
|
||||
pub use batching_mode::frontend_client::{FrontendClient, GrpcQueryHandlerWithBoxedError};
|
||||
pub(crate) use engine::{CreateFlowArgs, FlowId, TableName};
|
||||
pub use error::{Error, Result};
|
||||
|
||||
@@ -52,7 +52,7 @@ use tonic::transport::server::TcpIncoming;
|
||||
use tonic::{Request, Response, Status};
|
||||
|
||||
use crate::adapter::flownode_impl::{FlowDualEngine, FlowDualEngineRef};
|
||||
use crate::adapter::{create_worker, FlowStreamingEngineRef};
|
||||
use crate::adapter::{create_worker, FlowWorkerManagerRef};
|
||||
use crate::batching_mode::engine::BatchingEngine;
|
||||
use crate::engine::FlowEngine;
|
||||
use crate::error::{
|
||||
@@ -63,12 +63,13 @@ use crate::heartbeat::HeartbeatTask;
|
||||
use crate::metrics::{METRIC_FLOW_PROCESSING_TIME, METRIC_FLOW_ROWS};
|
||||
use crate::transform::register_function_to_query_engine;
|
||||
use crate::utils::{SizeReportSender, StateReportHandler};
|
||||
use crate::{CreateFlowArgs, Error, FlownodeOptions, FrontendClient, StreamingEngine};
|
||||
use crate::{CreateFlowArgs, Error, FlowStreamingEngine, FlownodeOptions, FrontendClient};
|
||||
|
||||
pub const FLOW_NODE_SERVER_NAME: &str = "FLOW_NODE_SERVER";
|
||||
/// wrapping flow node manager to avoid orphan rule with Arc<...>
|
||||
#[derive(Clone)]
|
||||
pub struct FlowService {
|
||||
/// TODO(discord9): replace with dual engine
|
||||
pub dual_engine: FlowDualEngineRef,
|
||||
}
|
||||
|
||||
@@ -437,7 +438,6 @@ impl FlownodeBuilder {
|
||||
let cnt = to_be_recovered.len();
|
||||
|
||||
// TODO(discord9): recover in parallel
|
||||
info!("Recovering {} flows: {:?}", cnt, to_be_recovered);
|
||||
for flow_id in to_be_recovered {
|
||||
let info = self
|
||||
.flow_metadata_manager
|
||||
@@ -453,7 +453,6 @@ impl FlownodeBuilder {
|
||||
info.sink_table_name().schema_name.clone(),
|
||||
info.sink_table_name().table_name.clone(),
|
||||
];
|
||||
|
||||
let args = CreateFlowArgs {
|
||||
flow_id: flow_id as _,
|
||||
sink_table_name,
|
||||
@@ -467,24 +466,11 @@ impl FlownodeBuilder {
|
||||
comment: Some(info.comment().clone()),
|
||||
sql: info.raw_sql().clone(),
|
||||
flow_options: info.options().clone(),
|
||||
query_ctx: info
|
||||
.query_context()
|
||||
.clone()
|
||||
.map(|ctx| {
|
||||
ctx.try_into()
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)
|
||||
})
|
||||
.transpose()?
|
||||
// or use default QueryContext with catalog_name from info
|
||||
// to keep compatibility with old version
|
||||
.or_else(|| {
|
||||
Some(
|
||||
QueryContextBuilder::default()
|
||||
.current_catalog(info.catalog_name().to_string())
|
||||
.build(),
|
||||
)
|
||||
}),
|
||||
query_ctx: Some(
|
||||
QueryContextBuilder::default()
|
||||
.current_catalog(info.catalog_name().clone())
|
||||
.build(),
|
||||
),
|
||||
};
|
||||
manager
|
||||
.create_flow(args)
|
||||
@@ -503,7 +489,7 @@ impl FlownodeBuilder {
|
||||
async fn build_manager(
|
||||
&mut self,
|
||||
query_engine: Arc<dyn QueryEngine>,
|
||||
) -> Result<StreamingEngine, Error> {
|
||||
) -> Result<FlowStreamingEngine, Error> {
|
||||
let table_meta = self.table_meta.clone();
|
||||
|
||||
register_function_to_query_engine(&query_engine);
|
||||
@@ -512,7 +498,7 @@ impl FlownodeBuilder {
|
||||
|
||||
let node_id = self.opts.node_id.map(|id| id as u32);
|
||||
|
||||
let mut man = StreamingEngine::new(node_id, query_engine, table_meta);
|
||||
let mut man = FlowStreamingEngine::new(node_id, query_engine, table_meta);
|
||||
for worker_id in 0..num_workers {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
@@ -619,7 +605,7 @@ impl FrontendInvoker {
|
||||
}
|
||||
|
||||
pub async fn build_from(
|
||||
flow_streaming_engine: FlowStreamingEngineRef,
|
||||
flow_worker_manager: FlowWorkerManagerRef,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
kv_backend: KvBackendRef,
|
||||
layered_cache_registry: LayeredCacheRegistryRef,
|
||||
@@ -654,7 +640,7 @@ impl FrontendInvoker {
|
||||
node_manager.clone(),
|
||||
));
|
||||
|
||||
let query_engine = flow_streaming_engine.query_engine.clone();
|
||||
let query_engine = flow_worker_manager.query_engine.clone();
|
||||
|
||||
let statement_executor = Arc::new(StatementExecutor::new(
|
||||
catalog_manager.clone(),
|
||||
|
||||
@@ -46,11 +46,7 @@ pub struct ChineseTokenizer;
|
||||
|
||||
impl Tokenizer for ChineseTokenizer {
|
||||
fn tokenize<'a>(&self, text: &'a str) -> Vec<&'a str> {
|
||||
if text.is_ascii() {
|
||||
EnglishTokenizer {}.tokenize(text)
|
||||
} else {
|
||||
JIEBA.cut(text, false)
|
||||
}
|
||||
JIEBA.cut(text, false)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -66,12 +66,10 @@ use crate::election::postgres::PgElection;
|
||||
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
|
||||
use crate::election::CANDIDATE_LEASE_SECS;
|
||||
use crate::metasrv::builder::MetasrvBuilder;
|
||||
use crate::metasrv::{BackendImpl, Metasrv, MetasrvOptions, SelectTarget, SelectorRef};
|
||||
use crate::node_excluder::NodeExcluderRef;
|
||||
use crate::metasrv::{BackendImpl, Metasrv, MetasrvOptions, SelectorRef};
|
||||
use crate::selector::lease_based::LeaseBasedSelector;
|
||||
use crate::selector::load_based::LoadBasedSelector;
|
||||
use crate::selector::round_robin::RoundRobinSelector;
|
||||
use crate::selector::weight_compute::RegionNumsBasedWeightCompute;
|
||||
use crate::selector::SelectorType;
|
||||
use crate::service::admin;
|
||||
use crate::{error, Result};
|
||||
@@ -296,25 +294,14 @@ pub async fn metasrv_builder(
|
||||
|
||||
let in_memory = Arc::new(MemoryKvBackend::new()) as ResettableKvBackendRef;
|
||||
|
||||
let node_excluder = plugins
|
||||
.get::<NodeExcluderRef>()
|
||||
.unwrap_or_else(|| Arc::new(Vec::new()) as NodeExcluderRef);
|
||||
let selector = if let Some(selector) = plugins.get::<SelectorRef>() {
|
||||
info!("Using selector from plugins");
|
||||
selector
|
||||
} else {
|
||||
let selector = match opts.selector {
|
||||
SelectorType::LoadBased => Arc::new(LoadBasedSelector::new(
|
||||
RegionNumsBasedWeightCompute,
|
||||
node_excluder,
|
||||
)) as SelectorRef,
|
||||
SelectorType::LeaseBased => {
|
||||
Arc::new(LeaseBasedSelector::new(node_excluder)) as SelectorRef
|
||||
}
|
||||
SelectorType::RoundRobin => Arc::new(RoundRobinSelector::new(
|
||||
SelectTarget::Datanode,
|
||||
node_excluder,
|
||||
)) as SelectorRef,
|
||||
SelectorType::LoadBased => Arc::new(LoadBasedSelector::default()) as SelectorRef,
|
||||
SelectorType::LeaseBased => Arc::new(LeaseBasedSelector) as SelectorRef,
|
||||
SelectorType::RoundRobin => Arc::new(RoundRobinSelector::default()) as SelectorRef,
|
||||
};
|
||||
info!(
|
||||
"Using selector from options, selector type: {}",
|
||||
|
||||
@@ -31,7 +31,6 @@ pub mod metasrv;
|
||||
pub mod metrics;
|
||||
#[cfg(feature = "mock")]
|
||||
pub mod mocks;
|
||||
pub mod node_excluder;
|
||||
pub mod procedure;
|
||||
pub mod pubsub;
|
||||
pub mod region;
|
||||
|
||||
@@ -111,11 +111,6 @@ pub struct MetasrvOptions {
|
||||
pub use_memory_store: bool,
|
||||
/// Whether to enable region failover.
|
||||
pub enable_region_failover: bool,
|
||||
/// Whether to allow region failover on local WAL.
|
||||
///
|
||||
/// If it's true, the region failover will be allowed even if the local WAL is used.
|
||||
/// Note that this option is not recommended to be set to true, because it may lead to data loss during failover.
|
||||
pub allow_region_failover_on_local_wal: bool,
|
||||
/// The HTTP server options.
|
||||
pub http: HttpOptions,
|
||||
/// The logging options.
|
||||
@@ -178,7 +173,6 @@ impl Default for MetasrvOptions {
|
||||
selector: SelectorType::default(),
|
||||
use_memory_store: false,
|
||||
enable_region_failover: false,
|
||||
allow_region_failover_on_local_wal: false,
|
||||
http: HttpOptions::default(),
|
||||
logging: LoggingOptions {
|
||||
dir: format!("{METASRV_HOME}/logs"),
|
||||
|
||||
@@ -40,8 +40,7 @@ use common_meta::state_store::KvStateStore;
|
||||
use common_meta::wal_options_allocator::{build_kafka_client, build_wal_options_allocator};
|
||||
use common_procedure::local::{LocalManager, ManagerConfig};
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use common_telemetry::warn;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::cache_invalidator::MetasrvCacheInvalidator;
|
||||
use crate::cluster::{MetaPeerClientBuilder, MetaPeerClientRef};
|
||||
@@ -191,7 +190,7 @@ impl MetasrvBuilder {
|
||||
|
||||
let meta_peer_client = meta_peer_client
|
||||
.unwrap_or_else(|| build_default_meta_peer_client(&election, &in_memory));
|
||||
let selector = selector.unwrap_or_else(|| Arc::new(LeaseBasedSelector::default()));
|
||||
let selector = selector.unwrap_or_else(|| Arc::new(LeaseBasedSelector));
|
||||
let pushers = Pushers::default();
|
||||
let mailbox = build_mailbox(&kv_backend, &pushers);
|
||||
let procedure_manager = build_procedure_manager(&options, &kv_backend);
|
||||
@@ -235,17 +234,13 @@ impl MetasrvBuilder {
|
||||
))
|
||||
});
|
||||
|
||||
let flow_selector = Arc::new(RoundRobinSelector::new(
|
||||
SelectTarget::Flownode,
|
||||
Arc::new(Vec::new()),
|
||||
)) as SelectorRef;
|
||||
|
||||
let flow_metadata_allocator = {
|
||||
// for now flownode just use round-robin selector
|
||||
let flow_selector = RoundRobinSelector::new(SelectTarget::Flownode);
|
||||
let flow_selector_ctx = selector_ctx.clone();
|
||||
let peer_allocator = Arc::new(FlowPeerAllocator::new(
|
||||
flow_selector_ctx,
|
||||
flow_selector.clone(),
|
||||
Arc::new(flow_selector),
|
||||
));
|
||||
let seq = Arc::new(
|
||||
SequenceBuilder::new(FLOW_ID_SEQ, kv_backend.clone())
|
||||
@@ -277,25 +272,18 @@ impl MetasrvBuilder {
|
||||
},
|
||||
));
|
||||
let peer_lookup_service = Arc::new(MetaPeerLookupService::new(meta_peer_client.clone()));
|
||||
|
||||
if !is_remote_wal && options.enable_region_failover {
|
||||
ensure!(
|
||||
options.allow_region_failover_on_local_wal,
|
||||
error::UnexpectedSnafu {
|
||||
violated: "Region failover is not supported in the local WAL implementation!
|
||||
If you want to enable region failover for local WAL, please set `allow_region_failover_on_local_wal` to true.",
|
||||
}
|
||||
);
|
||||
if options.allow_region_failover_on_local_wal {
|
||||
warn!("Region failover is force enabled in the local WAL implementation! This may lead to data loss during failover!");
|
||||
return error::UnexpectedSnafu {
|
||||
violated: "Region failover is not supported in the local WAL implementation!",
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
let (tx, rx) = RegionSupervisor::channel();
|
||||
let (region_failure_detector_controller, region_supervisor_ticker): (
|
||||
RegionFailureDetectorControllerRef,
|
||||
Option<std::sync::Arc<RegionSupervisorTicker>>,
|
||||
) = if options.enable_region_failover {
|
||||
) = if options.enable_region_failover && is_remote_wal {
|
||||
(
|
||||
Arc::new(RegionFailureDetectorControl::new(tx.clone())) as _,
|
||||
Some(Arc::new(RegionSupervisorTicker::new(
|
||||
@@ -321,7 +309,7 @@ impl MetasrvBuilder {
|
||||
));
|
||||
region_migration_manager.try_start()?;
|
||||
|
||||
let region_failover_handler = if options.enable_region_failover {
|
||||
let region_failover_handler = if options.enable_region_failover && is_remote_wal {
|
||||
let region_supervisor = RegionSupervisor::new(
|
||||
rx,
|
||||
options.failure_detector,
|
||||
@@ -432,7 +420,7 @@ impl MetasrvBuilder {
|
||||
meta_peer_client: meta_peer_client.clone(),
|
||||
selector,
|
||||
// TODO(jeremy): We do not allow configuring the flow selector.
|
||||
flow_selector,
|
||||
flow_selector: Arc::new(RoundRobinSelector::new(SelectTarget::Flownode)),
|
||||
handler_group: RwLock::new(None),
|
||||
handler_group_builder: Mutex::new(Some(handler_group_builder)),
|
||||
election,
|
||||
|
||||
@@ -71,13 +71,4 @@ lazy_static! {
|
||||
/// The remote WAL prune execute counter.
|
||||
pub static ref METRIC_META_REMOTE_WAL_PRUNE_EXECUTE: IntCounterVec =
|
||||
register_int_counter_vec!("greptime_meta_remote_wal_prune_execute", "meta remote wal prune execute", &["topic_name"]).unwrap();
|
||||
/// The migration stage elapsed histogram.
|
||||
pub static ref METRIC_META_REGION_MIGRATION_STAGE_ELAPSED: HistogramVec = register_histogram_vec!(
|
||||
"greptime_meta_region_migration_stage_elapsed",
|
||||
"meta region migration stage elapsed",
|
||||
&["stage"],
|
||||
// 0.01 ~ 1000
|
||||
exponential_buckets(0.01, 10.0, 7).unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_meta::DatanodeId;
|
||||
|
||||
pub type NodeExcluderRef = Arc<dyn NodeExcluder>;
|
||||
|
||||
/// [NodeExcluder] is used to help decide whether some nodes should be excluded (out of consideration)
|
||||
/// in certain situations. For example, in some node selectors.
|
||||
pub trait NodeExcluder: Send + Sync {
|
||||
/// Returns the excluded datanode ids.
|
||||
fn excluded_datanode_ids(&self) -> &Vec<DatanodeId>;
|
||||
}
|
||||
|
||||
impl NodeExcluder for Vec<DatanodeId> {
|
||||
fn excluded_datanode_ids(&self) -> &Vec<DatanodeId> {
|
||||
self
|
||||
}
|
||||
}
|
||||
@@ -25,7 +25,7 @@ pub(crate) mod update_metadata;
|
||||
pub(crate) mod upgrade_candidate_region;
|
||||
|
||||
use std::any::Any;
|
||||
use std::fmt::{Debug, Display};
|
||||
use std::fmt::Debug;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
@@ -43,7 +43,7 @@ use common_procedure::error::{
|
||||
Error as ProcedureError, FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu,
|
||||
};
|
||||
use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status, StringKey};
|
||||
use common_telemetry::{error, info};
|
||||
use common_telemetry::info;
|
||||
use manager::RegionMigrationProcedureGuard;
|
||||
pub use manager::{
|
||||
RegionMigrationManagerRef, RegionMigrationProcedureTask, RegionMigrationProcedureTracker,
|
||||
@@ -55,10 +55,7 @@ use tokio::time::Instant;
|
||||
|
||||
use self::migration_start::RegionMigrationStart;
|
||||
use crate::error::{self, Result};
|
||||
use crate::metrics::{
|
||||
METRIC_META_REGION_MIGRATION_ERROR, METRIC_META_REGION_MIGRATION_EXECUTE,
|
||||
METRIC_META_REGION_MIGRATION_STAGE_ELAPSED,
|
||||
};
|
||||
use crate::metrics::{METRIC_META_REGION_MIGRATION_ERROR, METRIC_META_REGION_MIGRATION_EXECUTE};
|
||||
use crate::service::mailbox::MailboxRef;
|
||||
|
||||
/// The default timeout for region migration.
|
||||
@@ -106,82 +103,6 @@ impl PersistentContext {
|
||||
}
|
||||
}
|
||||
|
||||
/// Metrics of region migration.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct Metrics {
|
||||
/// Elapsed time of downgrading region and upgrading region.
|
||||
operations_elapsed: Duration,
|
||||
/// Elapsed time of downgrading leader region.
|
||||
downgrade_leader_region_elapsed: Duration,
|
||||
/// Elapsed time of open candidate region.
|
||||
open_candidate_region_elapsed: Duration,
|
||||
/// Elapsed time of upgrade candidate region.
|
||||
upgrade_candidate_region_elapsed: Duration,
|
||||
}
|
||||
|
||||
impl Display for Metrics {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"operations_elapsed: {:?}, downgrade_leader_region_elapsed: {:?}, open_candidate_region_elapsed: {:?}, upgrade_candidate_region_elapsed: {:?}",
|
||||
self.operations_elapsed,
|
||||
self.downgrade_leader_region_elapsed,
|
||||
self.open_candidate_region_elapsed,
|
||||
self.upgrade_candidate_region_elapsed
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Metrics {
|
||||
/// Updates the elapsed time of downgrading region and upgrading region.
|
||||
pub fn update_operations_elapsed(&mut self, elapsed: Duration) {
|
||||
self.operations_elapsed += elapsed;
|
||||
}
|
||||
|
||||
/// Updates the elapsed time of downgrading leader region.
|
||||
pub fn update_downgrade_leader_region_elapsed(&mut self, elapsed: Duration) {
|
||||
self.downgrade_leader_region_elapsed += elapsed;
|
||||
}
|
||||
|
||||
/// Updates the elapsed time of open candidate region.
|
||||
pub fn update_open_candidate_region_elapsed(&mut self, elapsed: Duration) {
|
||||
self.open_candidate_region_elapsed += elapsed;
|
||||
}
|
||||
|
||||
/// Updates the elapsed time of upgrade candidate region.
|
||||
pub fn update_upgrade_candidate_region_elapsed(&mut self, elapsed: Duration) {
|
||||
self.upgrade_candidate_region_elapsed += elapsed;
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Metrics {
|
||||
fn drop(&mut self) {
|
||||
if !self.operations_elapsed.is_zero() {
|
||||
METRIC_META_REGION_MIGRATION_STAGE_ELAPSED
|
||||
.with_label_values(&["operations"])
|
||||
.observe(self.operations_elapsed.as_secs_f64());
|
||||
}
|
||||
|
||||
if !self.downgrade_leader_region_elapsed.is_zero() {
|
||||
METRIC_META_REGION_MIGRATION_STAGE_ELAPSED
|
||||
.with_label_values(&["downgrade_leader_region"])
|
||||
.observe(self.downgrade_leader_region_elapsed.as_secs_f64());
|
||||
}
|
||||
|
||||
if !self.open_candidate_region_elapsed.is_zero() {
|
||||
METRIC_META_REGION_MIGRATION_STAGE_ELAPSED
|
||||
.with_label_values(&["open_candidate_region"])
|
||||
.observe(self.open_candidate_region_elapsed.as_secs_f64());
|
||||
}
|
||||
|
||||
if !self.upgrade_candidate_region_elapsed.is_zero() {
|
||||
METRIC_META_REGION_MIGRATION_STAGE_ELAPSED
|
||||
.with_label_values(&["upgrade_candidate_region"])
|
||||
.observe(self.upgrade_candidate_region_elapsed.as_secs_f64());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// It's shared in each step and available in executing (including retrying).
|
||||
///
|
||||
/// It will be dropped if the procedure runner crashes.
|
||||
@@ -211,8 +132,8 @@ pub struct VolatileContext {
|
||||
leader_region_last_entry_id: Option<u64>,
|
||||
/// The last_entry_id of leader metadata region (Only used for metric engine).
|
||||
leader_region_metadata_last_entry_id: Option<u64>,
|
||||
/// Metrics of region migration.
|
||||
metrics: Metrics,
|
||||
/// Elapsed time of downgrading region and upgrading region.
|
||||
operations_elapsed: Duration,
|
||||
}
|
||||
|
||||
impl VolatileContext {
|
||||
@@ -310,35 +231,12 @@ impl Context {
|
||||
pub fn next_operation_timeout(&self) -> Option<Duration> {
|
||||
self.persistent_ctx
|
||||
.timeout
|
||||
.checked_sub(self.volatile_ctx.metrics.operations_elapsed)
|
||||
.checked_sub(self.volatile_ctx.operations_elapsed)
|
||||
}
|
||||
|
||||
/// Updates operations elapsed.
|
||||
pub fn update_operations_elapsed(&mut self, instant: Instant) {
|
||||
self.volatile_ctx
|
||||
.metrics
|
||||
.update_operations_elapsed(instant.elapsed());
|
||||
}
|
||||
|
||||
/// Updates the elapsed time of downgrading leader region.
|
||||
pub fn update_downgrade_leader_region_elapsed(&mut self, instant: Instant) {
|
||||
self.volatile_ctx
|
||||
.metrics
|
||||
.update_downgrade_leader_region_elapsed(instant.elapsed());
|
||||
}
|
||||
|
||||
/// Updates the elapsed time of open candidate region.
|
||||
pub fn update_open_candidate_region_elapsed(&mut self, instant: Instant) {
|
||||
self.volatile_ctx
|
||||
.metrics
|
||||
.update_open_candidate_region_elapsed(instant.elapsed());
|
||||
}
|
||||
|
||||
/// Updates the elapsed time of upgrade candidate region.
|
||||
pub fn update_upgrade_candidate_region_elapsed(&mut self, instant: Instant) {
|
||||
self.volatile_ctx
|
||||
.metrics
|
||||
.update_upgrade_candidate_region_elapsed(instant.elapsed());
|
||||
self.volatile_ctx.operations_elapsed += instant.elapsed();
|
||||
}
|
||||
|
||||
/// Returns address of meta server.
|
||||
@@ -652,14 +550,6 @@ impl Procedure for RegionMigrationProcedure {
|
||||
.inc();
|
||||
ProcedureError::retry_later(e)
|
||||
} else {
|
||||
error!(
|
||||
e;
|
||||
"Region migration procedure failed, region_id: {}, from_peer: {}, to_peer: {}, {}",
|
||||
self.context.region_id(),
|
||||
self.context.persistent_ctx.from_peer,
|
||||
self.context.persistent_ctx.to_peer,
|
||||
self.context.volatile_ctx.metrics,
|
||||
);
|
||||
METRIC_META_REGION_MIGRATION_ERROR
|
||||
.with_label_values(&[name, "external"])
|
||||
.inc();
|
||||
|
||||
@@ -46,13 +46,7 @@ impl State for CloseDowngradedRegion {
|
||||
let region_id = ctx.region_id();
|
||||
warn!(err; "Failed to close downgraded leader region: {region_id} on datanode {:?}", downgrade_leader_datanode);
|
||||
}
|
||||
info!(
|
||||
"Region migration is finished: region_id: {}, from_peer: {}, to_peer: {}, {}",
|
||||
ctx.region_id(),
|
||||
ctx.persistent_ctx.from_peer,
|
||||
ctx.persistent_ctx.to_peer,
|
||||
ctx.volatile_ctx.metrics,
|
||||
);
|
||||
|
||||
Ok((Box::new(RegionMigrationEnd), Status::done()))
|
||||
}
|
||||
|
||||
|
||||
@@ -54,7 +54,6 @@ impl Default for DowngradeLeaderRegion {
|
||||
#[typetag::serde]
|
||||
impl State for DowngradeLeaderRegion {
|
||||
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
|
||||
let now = Instant::now();
|
||||
// Ensures the `leader_region_lease_deadline` must exist after recovering.
|
||||
ctx.volatile_ctx
|
||||
.set_leader_region_lease_deadline(Duration::from_secs(REGION_LEASE_SECS));
|
||||
@@ -78,7 +77,6 @@ impl State for DowngradeLeaderRegion {
|
||||
}
|
||||
}
|
||||
}
|
||||
ctx.update_downgrade_leader_region_elapsed(now);
|
||||
|
||||
Ok((
|
||||
Box::new(UpgradeCandidateRegion::default()),
|
||||
@@ -350,8 +348,7 @@ mod tests {
|
||||
let env = TestingEnv::new();
|
||||
let mut ctx = env.context_factory().new_context(persistent_context);
|
||||
prepare_table_metadata(&ctx, HashMap::default()).await;
|
||||
ctx.volatile_ctx.metrics.operations_elapsed =
|
||||
ctx.persistent_ctx.timeout + Duration::from_secs(1);
|
||||
ctx.volatile_ctx.operations_elapsed = ctx.persistent_ctx.timeout + Duration::from_secs(1);
|
||||
|
||||
let err = state.downgrade_region(&mut ctx).await.unwrap_err();
|
||||
|
||||
@@ -594,8 +591,7 @@ mod tests {
|
||||
let mut ctx = env.context_factory().new_context(persistent_context);
|
||||
let mailbox_ctx = env.mailbox_context();
|
||||
let mailbox = mailbox_ctx.mailbox().clone();
|
||||
ctx.volatile_ctx.metrics.operations_elapsed =
|
||||
ctx.persistent_ctx.timeout + Duration::from_secs(1);
|
||||
ctx.volatile_ctx.operations_elapsed = ctx.persistent_ctx.timeout + Duration::from_secs(1);
|
||||
|
||||
let (tx, rx) = tokio::sync::mpsc::channel(1);
|
||||
mailbox_ctx
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
use std::any::Any;
|
||||
|
||||
use common_procedure::Status;
|
||||
use common_telemetry::warn;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::error::{self, Result};
|
||||
@@ -38,15 +37,7 @@ impl RegionMigrationAbort {
|
||||
#[async_trait::async_trait]
|
||||
#[typetag::serde]
|
||||
impl State for RegionMigrationAbort {
|
||||
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
|
||||
warn!(
|
||||
"Region migration is aborted: {}, region_id: {}, from_peer: {}, to_peer: {}, {}",
|
||||
self.reason,
|
||||
ctx.region_id(),
|
||||
ctx.persistent_ctx.from_peer,
|
||||
ctx.persistent_ctx.to_peer,
|
||||
ctx.volatile_ctx.metrics,
|
||||
);
|
||||
async fn next(&mut self, _: &mut Context) -> Result<(Box<dyn State>, Status)> {
|
||||
error::MigrationAbortSnafu {
|
||||
reason: &self.reason,
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::time::Duration;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use api::v1::meta::MailboxMessage;
|
||||
use common_meta::distributed_time_constants::REGION_LEASE_SECS;
|
||||
@@ -24,7 +24,6 @@ use common_procedure::Status;
|
||||
use common_telemetry::info;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tokio::time::Instant;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::handler::HeartbeatMailbox;
|
||||
@@ -43,9 +42,7 @@ pub struct OpenCandidateRegion;
|
||||
impl State for OpenCandidateRegion {
|
||||
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
|
||||
let instruction = self.build_open_region_instruction(ctx).await?;
|
||||
let now = Instant::now();
|
||||
self.open_candidate_region(ctx, instruction).await?;
|
||||
ctx.update_open_candidate_region_elapsed(now);
|
||||
|
||||
Ok((
|
||||
Box::new(UpdateMetadata::Downgrade),
|
||||
|
||||
@@ -54,12 +54,9 @@ impl Default for UpgradeCandidateRegion {
|
||||
#[typetag::serde]
|
||||
impl State for UpgradeCandidateRegion {
|
||||
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
|
||||
let now = Instant::now();
|
||||
if self.upgrade_region_with_retry(ctx).await {
|
||||
ctx.update_upgrade_candidate_region_elapsed(now);
|
||||
Ok((Box::new(UpdateMetadata::Upgrade), Status::executing(false)))
|
||||
} else {
|
||||
ctx.update_upgrade_candidate_region_elapsed(now);
|
||||
Ok((Box::new(UpdateMetadata::Rollback), Status::executing(false)))
|
||||
}
|
||||
}
|
||||
@@ -291,8 +288,7 @@ mod tests {
|
||||
let persistent_context = new_persistent_context();
|
||||
let env = TestingEnv::new();
|
||||
let mut ctx = env.context_factory().new_context(persistent_context);
|
||||
ctx.volatile_ctx.metrics.operations_elapsed =
|
||||
ctx.persistent_ctx.timeout + Duration::from_secs(1);
|
||||
ctx.volatile_ctx.operations_elapsed = ctx.persistent_ctx.timeout + Duration::from_secs(1);
|
||||
|
||||
let err = state.upgrade_region(&ctx).await.unwrap_err();
|
||||
|
||||
@@ -562,8 +558,7 @@ mod tests {
|
||||
let mut ctx = env.context_factory().new_context(persistent_context);
|
||||
let mailbox_ctx = env.mailbox_context();
|
||||
let mailbox = mailbox_ctx.mailbox().clone();
|
||||
ctx.volatile_ctx.metrics.operations_elapsed =
|
||||
ctx.persistent_ctx.timeout + Duration::from_secs(1);
|
||||
ctx.volatile_ctx.operations_elapsed = ctx.persistent_ctx.timeout + Duration::from_secs(1);
|
||||
|
||||
let (tx, rx) = tokio::sync::mpsc::channel(1);
|
||||
mailbox_ctx
|
||||
|
||||
@@ -335,21 +335,22 @@ impl WalPruneProcedure {
|
||||
})?;
|
||||
partition_client
|
||||
.delete_records(
|
||||
// notice here no "+1" is needed because the offset arg is exclusive, and it's defensive programming just in case somewhere else have a off by one error, see https://kafka.apache.org/36/javadoc/org/apache/kafka/clients/consumer/KafkaConsumer.html#endOffsets(java.util.Collection) which we use to get the end offset from high watermark
|
||||
self.data.prunable_entry_id as i64,
|
||||
(self.data.prunable_entry_id + 1) as i64,
|
||||
DELETE_RECORDS_TIMEOUT.as_millis() as i32,
|
||||
)
|
||||
.await
|
||||
.context(DeleteRecordsSnafu {
|
||||
topic: &self.data.topic,
|
||||
partition: DEFAULT_PARTITION,
|
||||
offset: self.data.prunable_entry_id,
|
||||
offset: (self.data.prunable_entry_id + 1),
|
||||
})
|
||||
.map_err(BoxedError::new)
|
||||
.with_context(|_| error::RetryLaterWithSourceSnafu {
|
||||
reason: format!(
|
||||
"Failed to delete records for topic: {}, partition: {}, offset: {}",
|
||||
self.data.topic, DEFAULT_PARTITION, self.data.prunable_entry_id
|
||||
self.data.topic,
|
||||
DEFAULT_PARTITION,
|
||||
self.data.prunable_entry_id + 1
|
||||
),
|
||||
})?;
|
||||
info!(
|
||||
@@ -604,19 +605,19 @@ mod tests {
|
||||
// Step 3: Test `on_prune`.
|
||||
let status = procedure.on_prune().await.unwrap();
|
||||
assert_matches!(status, Status::Done { output: None });
|
||||
// Check if the entry ids after(include) `prunable_entry_id` still exist.
|
||||
// Check if the entry ids after `prunable_entry_id` still exist.
|
||||
check_entry_id_existence(
|
||||
procedure.context.client.clone(),
|
||||
&topic_name,
|
||||
procedure.data.prunable_entry_id as i64 + 1,
|
||||
true,
|
||||
)
|
||||
.await;
|
||||
// Check if the entry s before `prunable_entry_id` are deleted.
|
||||
check_entry_id_existence(
|
||||
procedure.context.client.clone(),
|
||||
&topic_name,
|
||||
procedure.data.prunable_entry_id as i64,
|
||||
true,
|
||||
)
|
||||
.await;
|
||||
// Check if the entry ids before `prunable_entry_id` are deleted.
|
||||
check_entry_id_existence(
|
||||
procedure.context.client.clone(),
|
||||
&topic_name,
|
||||
procedure.data.prunable_entry_id as i64 - 1,
|
||||
false,
|
||||
)
|
||||
.await;
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::collections::HashSet;
|
||||
use std::fmt::Debug;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::Duration;
|
||||
@@ -25,7 +25,7 @@ use common_meta::leadership_notifier::LeadershipChangeListener;
|
||||
use common_meta::peer::PeerLookupServiceRef;
|
||||
use common_meta::DatanodeId;
|
||||
use common_runtime::JoinHandle;
|
||||
use common_telemetry::{debug, error, info, warn};
|
||||
use common_telemetry::{error, info, warn};
|
||||
use common_time::util::current_time_millis;
|
||||
use error::Error::{LeaderPeerChanged, MigrationRunning, TableRouteNotFound};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
@@ -208,8 +208,6 @@ pub const DEFAULT_TICK_INTERVAL: Duration = Duration::from_secs(1);
|
||||
pub struct RegionSupervisor {
|
||||
/// Used to detect the failure of regions.
|
||||
failure_detector: RegionFailureDetector,
|
||||
/// Tracks the number of failovers for each region.
|
||||
failover_counts: HashMap<DetectingRegion, u32>,
|
||||
/// Receives [Event]s.
|
||||
receiver: Receiver<Event>,
|
||||
/// The context of [`SelectorRef`]
|
||||
@@ -295,7 +293,6 @@ impl RegionSupervisor {
|
||||
) -> Self {
|
||||
Self {
|
||||
failure_detector: RegionFailureDetector::new(options),
|
||||
failover_counts: HashMap::new(),
|
||||
receiver: event_receiver,
|
||||
selector_context,
|
||||
selector,
|
||||
@@ -339,14 +336,13 @@ impl RegionSupervisor {
|
||||
}
|
||||
}
|
||||
|
||||
async fn deregister_failure_detectors(&mut self, detecting_regions: Vec<DetectingRegion>) {
|
||||
async fn deregister_failure_detectors(&self, detecting_regions: Vec<DetectingRegion>) {
|
||||
for region in detecting_regions {
|
||||
self.failure_detector.remove(®ion);
|
||||
self.failover_counts.remove(®ion);
|
||||
self.failure_detector.remove(®ion)
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_region_failures(&mut self, mut regions: Vec<(DatanodeId, RegionId)>) {
|
||||
async fn handle_region_failures(&self, mut regions: Vec<(DatanodeId, RegionId)>) {
|
||||
if regions.is_empty() {
|
||||
return;
|
||||
}
|
||||
@@ -369,7 +365,8 @@ impl RegionSupervisor {
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for (datanode_id, region_id) in migrating_regions {
|
||||
debug!(
|
||||
self.failure_detector.remove(&(datanode_id, region_id));
|
||||
warn!(
|
||||
"Removed region failover for region: {region_id}, datanode: {datanode_id} because it's migrating"
|
||||
);
|
||||
}
|
||||
@@ -389,12 +386,7 @@ impl RegionSupervisor {
|
||||
.context(error::MaintenanceModeManagerSnafu)
|
||||
}
|
||||
|
||||
async fn do_failover(&mut self, datanode_id: DatanodeId, region_id: RegionId) -> Result<()> {
|
||||
let count = *self
|
||||
.failover_counts
|
||||
.entry((datanode_id, region_id))
|
||||
.and_modify(|count| *count += 1)
|
||||
.or_insert(1);
|
||||
async fn do_failover(&self, datanode_id: DatanodeId, region_id: RegionId) -> Result<()> {
|
||||
let from_peer = self
|
||||
.peer_lookup
|
||||
.datanode(datanode_id)
|
||||
@@ -423,14 +415,11 @@ impl RegionSupervisor {
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
info!(
|
||||
"Failover for region: {region_id}, from_peer: {from_peer}, to_peer: {to_peer}, tries: {count}"
|
||||
);
|
||||
let task = RegionMigrationProcedureTask {
|
||||
region_id,
|
||||
from_peer,
|
||||
to_peer,
|
||||
timeout: DEFAULT_REGION_MIGRATION_TIMEOUT * count,
|
||||
timeout: DEFAULT_REGION_MIGRATION_TIMEOUT,
|
||||
};
|
||||
|
||||
if let Err(err) = self.region_migration_manager.submit_procedure(task).await {
|
||||
@@ -444,8 +433,7 @@ impl RegionSupervisor {
|
||||
Ok(())
|
||||
}
|
||||
TableRouteNotFound { .. } => {
|
||||
self.deregister_failure_detectors(vec![(datanode_id, region_id)])
|
||||
.await;
|
||||
self.failure_detector.remove(&(datanode_id, region_id));
|
||||
info!(
|
||||
"Table route is not found, the table is dropped, removed failover detector for region: {}, datanode: {}",
|
||||
region_id, datanode_id
|
||||
@@ -453,8 +441,7 @@ impl RegionSupervisor {
|
||||
Ok(())
|
||||
}
|
||||
LeaderPeerChanged { .. } => {
|
||||
self.deregister_failure_detectors(vec![(datanode_id, region_id)])
|
||||
.await;
|
||||
self.failure_detector.remove(&(datanode_id, region_id));
|
||||
info!(
|
||||
"Region's leader peer changed, removed failover detector for region: {}, datanode: {}",
|
||||
region_id, datanode_id
|
||||
|
||||
@@ -18,7 +18,7 @@ pub mod load_based;
|
||||
pub mod round_robin;
|
||||
#[cfg(test)]
|
||||
pub(crate) mod test_utils;
|
||||
pub mod weight_compute;
|
||||
mod weight_compute;
|
||||
pub mod weighted_choose;
|
||||
use std::collections::HashSet;
|
||||
|
||||
|
||||
@@ -12,37 +12,17 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_meta::peer::Peer;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::lease;
|
||||
use crate::metasrv::SelectorContext;
|
||||
use crate::node_excluder::NodeExcluderRef;
|
||||
use crate::selector::common::{choose_items, filter_out_excluded_peers};
|
||||
use crate::selector::weighted_choose::{RandomWeightedChoose, WeightedItem};
|
||||
use crate::selector::{Selector, SelectorOptions};
|
||||
|
||||
/// Select all alive datanodes based using a random weighted choose.
|
||||
pub struct LeaseBasedSelector {
|
||||
node_excluder: NodeExcluderRef,
|
||||
}
|
||||
|
||||
impl LeaseBasedSelector {
|
||||
pub fn new(node_excluder: NodeExcluderRef) -> Self {
|
||||
Self { node_excluder }
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for LeaseBasedSelector {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
node_excluder: Arc::new(Vec::new()),
|
||||
}
|
||||
}
|
||||
}
|
||||
pub struct LeaseBasedSelector;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Selector for LeaseBasedSelector {
|
||||
@@ -67,14 +47,7 @@ impl Selector for LeaseBasedSelector {
|
||||
.collect();
|
||||
|
||||
// 3. choose peers by weight_array.
|
||||
let mut exclude_peer_ids = self
|
||||
.node_excluder
|
||||
.excluded_datanode_ids()
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect::<HashSet<_>>();
|
||||
exclude_peer_ids.extend(opts.exclude_peer_ids.iter());
|
||||
filter_out_excluded_peers(&mut weight_array, &exclude_peer_ids);
|
||||
filter_out_excluded_peers(&mut weight_array, &opts.exclude_peer_ids);
|
||||
let mut weighted_choose = RandomWeightedChoose::new(weight_array);
|
||||
let selected = choose_items(&opts, &mut weighted_choose)?;
|
||||
|
||||
|
||||
@@ -12,8 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use common_meta::datanode::{DatanodeStatKey, DatanodeStatValue};
|
||||
use common_meta::key::TableMetadataManager;
|
||||
@@ -27,7 +26,6 @@ use crate::error::{self, Result};
|
||||
use crate::key::{DatanodeLeaseKey, LeaseValue};
|
||||
use crate::lease;
|
||||
use crate::metasrv::SelectorContext;
|
||||
use crate::node_excluder::NodeExcluderRef;
|
||||
use crate::selector::common::{choose_items, filter_out_excluded_peers};
|
||||
use crate::selector::weight_compute::{RegionNumsBasedWeightCompute, WeightCompute};
|
||||
use crate::selector::weighted_choose::RandomWeightedChoose;
|
||||
@@ -35,15 +33,11 @@ use crate::selector::{Selector, SelectorOptions};
|
||||
|
||||
pub struct LoadBasedSelector<C> {
|
||||
weight_compute: C,
|
||||
node_excluder: NodeExcluderRef,
|
||||
}
|
||||
|
||||
impl<C> LoadBasedSelector<C> {
|
||||
pub fn new(weight_compute: C, node_excluder: NodeExcluderRef) -> Self {
|
||||
Self {
|
||||
weight_compute,
|
||||
node_excluder,
|
||||
}
|
||||
pub fn new(weight_compute: C) -> Self {
|
||||
Self { weight_compute }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,7 +45,6 @@ impl Default for LoadBasedSelector<RegionNumsBasedWeightCompute> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
weight_compute: RegionNumsBasedWeightCompute,
|
||||
node_excluder: Arc::new(Vec::new()),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -95,14 +88,7 @@ where
|
||||
let mut weight_array = self.weight_compute.compute(&stat_kvs);
|
||||
|
||||
// 5. choose peers by weight_array.
|
||||
let mut exclude_peer_ids = self
|
||||
.node_excluder
|
||||
.excluded_datanode_ids()
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect::<HashSet<_>>();
|
||||
exclude_peer_ids.extend(opts.exclude_peer_ids.iter());
|
||||
filter_out_excluded_peers(&mut weight_array, &exclude_peer_ids);
|
||||
filter_out_excluded_peers(&mut weight_array, &opts.exclude_peer_ids);
|
||||
let mut weighted_choose = RandomWeightedChoose::new(weight_array);
|
||||
let selected = choose_items(&opts, &mut weighted_choose)?;
|
||||
|
||||
|
||||
@@ -12,9 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_meta::peer::Peer;
|
||||
use snafu::ensure;
|
||||
@@ -22,7 +20,6 @@ use snafu::ensure;
|
||||
use crate::error::{NoEnoughAvailableNodeSnafu, Result};
|
||||
use crate::lease;
|
||||
use crate::metasrv::{SelectTarget, SelectorContext};
|
||||
use crate::node_excluder::NodeExcluderRef;
|
||||
use crate::selector::{Selector, SelectorOptions};
|
||||
|
||||
/// Round-robin selector that returns the next peer in the list in sequence.
|
||||
@@ -35,7 +32,6 @@ use crate::selector::{Selector, SelectorOptions};
|
||||
pub struct RoundRobinSelector {
|
||||
select_target: SelectTarget,
|
||||
counter: AtomicUsize,
|
||||
node_excluder: NodeExcluderRef,
|
||||
}
|
||||
|
||||
impl Default for RoundRobinSelector {
|
||||
@@ -43,38 +39,32 @@ impl Default for RoundRobinSelector {
|
||||
Self {
|
||||
select_target: SelectTarget::Datanode,
|
||||
counter: AtomicUsize::new(0),
|
||||
node_excluder: Arc::new(Vec::new()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RoundRobinSelector {
|
||||
pub fn new(select_target: SelectTarget, node_excluder: NodeExcluderRef) -> Self {
|
||||
pub fn new(select_target: SelectTarget) -> Self {
|
||||
Self {
|
||||
select_target,
|
||||
node_excluder,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_peers(&self, opts: &SelectorOptions, ctx: &SelectorContext) -> Result<Vec<Peer>> {
|
||||
async fn get_peers(
|
||||
&self,
|
||||
min_required_items: usize,
|
||||
ctx: &SelectorContext,
|
||||
) -> Result<Vec<Peer>> {
|
||||
let mut peers = match self.select_target {
|
||||
SelectTarget::Datanode => {
|
||||
// 1. get alive datanodes.
|
||||
let lease_kvs =
|
||||
lease::alive_datanodes(&ctx.meta_peer_client, ctx.datanode_lease_secs).await?;
|
||||
|
||||
let mut exclude_peer_ids = self
|
||||
.node_excluder
|
||||
.excluded_datanode_ids()
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect::<HashSet<_>>();
|
||||
exclude_peer_ids.extend(opts.exclude_peer_ids.iter());
|
||||
// 2. map into peers
|
||||
lease_kvs
|
||||
.into_iter()
|
||||
.filter(|(k, _)| !exclude_peer_ids.contains(&k.node_id))
|
||||
.map(|(k, v)| Peer::new(k.node_id, v.node_addr))
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
@@ -94,8 +84,8 @@ impl RoundRobinSelector {
|
||||
ensure!(
|
||||
!peers.is_empty(),
|
||||
NoEnoughAvailableNodeSnafu {
|
||||
required: opts.min_required_items,
|
||||
available: peers.len(),
|
||||
required: min_required_items,
|
||||
available: 0usize,
|
||||
select_target: self.select_target
|
||||
}
|
||||
);
|
||||
@@ -113,7 +103,7 @@ impl Selector for RoundRobinSelector {
|
||||
type Output = Vec<Peer>;
|
||||
|
||||
async fn select(&self, ctx: &Self::Context, opts: SelectorOptions) -> Result<Vec<Peer>> {
|
||||
let peers = self.get_peers(&opts, ctx).await?;
|
||||
let peers = self.get_peers(opts.min_required_items, ctx).await?;
|
||||
// choose peers
|
||||
let mut selected = Vec::with_capacity(opts.min_required_items);
|
||||
for _ in 0..opts.min_required_items {
|
||||
@@ -186,42 +176,4 @@ mod test {
|
||||
assert_eq!(peers.len(), 2);
|
||||
assert_eq!(peers, vec![peer2.clone(), peer3.clone()]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_round_robin_selector_with_exclude_peer_ids() {
|
||||
let selector = RoundRobinSelector::new(SelectTarget::Datanode, Arc::new(vec![5]));
|
||||
let ctx = create_selector_context();
|
||||
// add three nodes
|
||||
let peer1 = Peer {
|
||||
id: 2,
|
||||
addr: "node1".to_string(),
|
||||
};
|
||||
let peer2 = Peer {
|
||||
id: 5,
|
||||
addr: "node2".to_string(),
|
||||
};
|
||||
let peer3 = Peer {
|
||||
id: 8,
|
||||
addr: "node3".to_string(),
|
||||
};
|
||||
put_datanodes(
|
||||
&ctx.meta_peer_client,
|
||||
vec![peer1.clone(), peer2.clone(), peer3.clone()],
|
||||
)
|
||||
.await;
|
||||
|
||||
let peers = selector
|
||||
.select(
|
||||
&ctx,
|
||||
SelectorOptions {
|
||||
min_required_items: 1,
|
||||
allow_duplication: true,
|
||||
exclude_peer_ids: HashSet::from([2]),
|
||||
},
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(peers.len(), 1);
|
||||
assert_eq!(peers, vec![peer3.clone()]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,13 +18,11 @@ common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-query.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
common-runtime.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
datafusion.workspace = true
|
||||
datatypes.workspace = true
|
||||
futures-util.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
itertools.workspace = true
|
||||
lazy_static = "1.4"
|
||||
mito2.workspace = true
|
||||
|
||||
@@ -12,49 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use common_telemetry::warn;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// The default flush interval of the metadata region.
|
||||
pub(crate) const DEFAULT_FLUSH_METADATA_REGION_INTERVAL: Duration = Duration::from_secs(30);
|
||||
|
||||
/// Configuration for the metric engine.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct EngineConfig {
|
||||
/// Experimental feature to use sparse primary key encoding.
|
||||
pub experimental_sparse_primary_key_encoding: bool,
|
||||
/// The flush interval of the metadata region.
|
||||
#[serde(
|
||||
with = "humantime_serde",
|
||||
default = "EngineConfig::default_flush_metadata_region_interval"
|
||||
)]
|
||||
pub flush_metadata_region_interval: Duration,
|
||||
}
|
||||
|
||||
impl Default for EngineConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
flush_metadata_region_interval: DEFAULT_FLUSH_METADATA_REGION_INTERVAL,
|
||||
experimental_sparse_primary_key_encoding: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl EngineConfig {
|
||||
fn default_flush_metadata_region_interval() -> Duration {
|
||||
DEFAULT_FLUSH_METADATA_REGION_INTERVAL
|
||||
}
|
||||
|
||||
/// Sanitizes the configuration.
|
||||
pub fn sanitize(&mut self) {
|
||||
if self.flush_metadata_region_interval.is_zero() {
|
||||
warn!(
|
||||
"Flush metadata region interval is zero, override with default value: {:?}. Disable metadata region flush is forbidden.",
|
||||
DEFAULT_FLUSH_METADATA_REGION_INTERVAL
|
||||
);
|
||||
self.flush_metadata_region_interval = DEFAULT_FLUSH_METADATA_REGION_INTERVAL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,11 +34,9 @@ use api::region::RegionResponse;
|
||||
use async_trait::async_trait;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_runtime::RepeatedTask;
|
||||
use mito2::engine::MitoEngine;
|
||||
pub(crate) use options::IndexOptions;
|
||||
use snafu::ResultExt;
|
||||
pub(crate) use state::MetricEngineState;
|
||||
use store_api::metadata::RegionMetadataRef;
|
||||
use store_api::metric_engine_consts::METRIC_ENGINE_NAME;
|
||||
use store_api::region_engine::{
|
||||
@@ -49,11 +47,11 @@ use store_api::region_engine::{
|
||||
use store_api::region_request::{BatchRegionDdlRequest, RegionRequest};
|
||||
use store_api::storage::{RegionId, ScanRequest, SequenceNumber};
|
||||
|
||||
use self::state::MetricEngineState;
|
||||
use crate::config::EngineConfig;
|
||||
use crate::data_region::DataRegion;
|
||||
use crate::error::{self, Error, Result, StartRepeatedTaskSnafu, UnsupportedRegionRequestSnafu};
|
||||
use crate::error::{self, Result, UnsupportedRegionRequestSnafu};
|
||||
use crate::metadata_region::MetadataRegion;
|
||||
use crate::repeated_task::FlushMetadataRegionTask;
|
||||
use crate::row_modifier::RowModifier;
|
||||
use crate::utils::{self, get_region_statistic};
|
||||
|
||||
@@ -361,32 +359,19 @@ impl RegionEngine for MetricEngine {
|
||||
}
|
||||
|
||||
impl MetricEngine {
|
||||
pub fn try_new(mito: MitoEngine, mut config: EngineConfig) -> Result<Self> {
|
||||
pub fn new(mito: MitoEngine, config: EngineConfig) -> Self {
|
||||
let metadata_region = MetadataRegion::new(mito.clone());
|
||||
let data_region = DataRegion::new(mito.clone());
|
||||
let state = Arc::new(RwLock::default());
|
||||
config.sanitize();
|
||||
let flush_interval = config.flush_metadata_region_interval;
|
||||
let inner = Arc::new(MetricEngineInner {
|
||||
mito: mito.clone(),
|
||||
metadata_region,
|
||||
data_region,
|
||||
state: state.clone(),
|
||||
config,
|
||||
row_modifier: RowModifier::new(),
|
||||
flush_task: RepeatedTask::new(
|
||||
flush_interval,
|
||||
Box::new(FlushMetadataRegionTask {
|
||||
state: state.clone(),
|
||||
mito: mito.clone(),
|
||||
}),
|
||||
),
|
||||
});
|
||||
inner
|
||||
.flush_task
|
||||
.start(common_runtime::global_runtime())
|
||||
.context(StartRepeatedTaskSnafu { name: "flush_task" })?;
|
||||
Ok(Self { inner })
|
||||
Self {
|
||||
inner: Arc::new(MetricEngineInner {
|
||||
mito,
|
||||
metadata_region,
|
||||
data_region,
|
||||
state: RwLock::default(),
|
||||
config,
|
||||
row_modifier: RowModifier::new(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn mito(&self) -> MitoEngine {
|
||||
@@ -441,21 +426,15 @@ impl MetricEngine {
|
||||
) -> Result<common_recordbatch::SendableRecordBatchStream, BoxedError> {
|
||||
self.inner.scan_to_stream(region_id, request).await
|
||||
}
|
||||
|
||||
/// Returns the configuration of the engine.
|
||||
pub fn config(&self) -> &EngineConfig {
|
||||
&self.inner.config
|
||||
}
|
||||
}
|
||||
|
||||
struct MetricEngineInner {
|
||||
mito: MitoEngine,
|
||||
metadata_region: MetadataRegion,
|
||||
data_region: DataRegion,
|
||||
state: Arc<RwLock<MetricEngineState>>,
|
||||
state: RwLock<MetricEngineState>,
|
||||
config: EngineConfig,
|
||||
row_modifier: RowModifier,
|
||||
flush_task: RepeatedTask<Error>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -737,7 +737,7 @@ mod test {
|
||||
|
||||
// set up
|
||||
let env = TestEnv::new().await;
|
||||
let engine = MetricEngine::try_new(env.mito(), EngineConfig::default()).unwrap();
|
||||
let engine = MetricEngine::new(env.mito(), EngineConfig::default());
|
||||
let engine_inner = engine.inner;
|
||||
|
||||
// check create data region request
|
||||
|
||||
@@ -282,14 +282,6 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start repeated task: {}", name))]
|
||||
StartRepeatedTask {
|
||||
name: String,
|
||||
source: common_runtime::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T, E = Error> = std::result::Result<T, E>;
|
||||
@@ -343,8 +335,6 @@ impl ErrorExt for Error {
|
||||
|
||||
CollectRecordBatchStream { source, .. } => source.status_code(),
|
||||
|
||||
StartRepeatedTask { source, .. } => source.status_code(),
|
||||
|
||||
MetricManifestInfo { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,7 +59,6 @@ pub mod engine;
|
||||
pub mod error;
|
||||
mod metadata_region;
|
||||
mod metrics;
|
||||
mod repeated_task;
|
||||
pub mod row_modifier;
|
||||
#[cfg(test)]
|
||||
mod test_util;
|
||||
|
||||
@@ -1,167 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::Instant;
|
||||
|
||||
use common_runtime::TaskFunction;
|
||||
use common_telemetry::{debug, error};
|
||||
use mito2::engine::MitoEngine;
|
||||
use store_api::region_engine::{RegionEngine, RegionRole};
|
||||
use store_api::region_request::{RegionFlushRequest, RegionRequest};
|
||||
|
||||
use crate::engine::MetricEngineState;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::utils;
|
||||
|
||||
/// Task to flush metadata regions.
|
||||
///
|
||||
/// This task is used to send flush requests to the metadata regions
|
||||
/// periodically.
|
||||
pub(crate) struct FlushMetadataRegionTask {
|
||||
pub(crate) state: Arc<RwLock<MetricEngineState>>,
|
||||
pub(crate) mito: MitoEngine,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TaskFunction<Error> for FlushMetadataRegionTask {
|
||||
fn name(&self) -> &str {
|
||||
"FlushMetadataRegionTask"
|
||||
}
|
||||
|
||||
async fn call(&mut self) -> Result<()> {
|
||||
let region_ids = {
|
||||
let state = self.state.read().unwrap();
|
||||
state
|
||||
.physical_region_states()
|
||||
.keys()
|
||||
.cloned()
|
||||
.collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
let num_region = region_ids.len();
|
||||
let now = Instant::now();
|
||||
for region_id in region_ids {
|
||||
let Some(role) = self.mito.role(region_id) else {
|
||||
continue;
|
||||
};
|
||||
if role == RegionRole::Follower {
|
||||
continue;
|
||||
}
|
||||
let metadata_region_id = utils::to_metadata_region_id(region_id);
|
||||
if let Err(e) = self
|
||||
.mito
|
||||
.handle_request(
|
||||
metadata_region_id,
|
||||
RegionRequest::Flush(RegionFlushRequest {
|
||||
row_group_size: None,
|
||||
}),
|
||||
)
|
||||
.await
|
||||
{
|
||||
error!(e; "Failed to flush metadata region {}", metadata_region_id);
|
||||
}
|
||||
}
|
||||
debug!(
|
||||
"Flushed {} metadata regions, elapsed: {:?}",
|
||||
num_region,
|
||||
now.elapsed()
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::time::Duration;
|
||||
|
||||
use store_api::region_engine::{RegionEngine, RegionManifestInfo};
|
||||
|
||||
use crate::config::{EngineConfig, DEFAULT_FLUSH_METADATA_REGION_INTERVAL};
|
||||
use crate::test_util::TestEnv;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_flush_metadata_region_task() {
|
||||
let env = TestEnv::with_prefix_and_config(
|
||||
"test_flush_metadata_region_task",
|
||||
EngineConfig {
|
||||
flush_metadata_region_interval: Duration::from_millis(100),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await;
|
||||
env.init_metric_region().await;
|
||||
let engine = env.metric();
|
||||
// Wait for flush task run
|
||||
tokio::time::sleep(Duration::from_millis(200)).await;
|
||||
let physical_region_id = env.default_physical_region_id();
|
||||
let stat = engine.region_statistic(physical_region_id).unwrap();
|
||||
|
||||
assert_matches!(
|
||||
stat.manifest,
|
||||
RegionManifestInfo::Metric {
|
||||
metadata_manifest_version: 1,
|
||||
metadata_flushed_entry_id: 1,
|
||||
..
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_flush_metadata_region_task_with_long_interval() {
|
||||
let env = TestEnv::with_prefix_and_config(
|
||||
"test_flush_metadata_region_task_with_long_interval",
|
||||
EngineConfig {
|
||||
flush_metadata_region_interval: Duration::from_secs(60),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await;
|
||||
env.init_metric_region().await;
|
||||
let engine = env.metric();
|
||||
// Wait for flush task run, should not flush metadata region
|
||||
tokio::time::sleep(Duration::from_millis(200)).await;
|
||||
let physical_region_id = env.default_physical_region_id();
|
||||
let stat = engine.region_statistic(physical_region_id).unwrap();
|
||||
|
||||
assert_matches!(
|
||||
stat.manifest,
|
||||
RegionManifestInfo::Metric {
|
||||
metadata_manifest_version: 0,
|
||||
metadata_flushed_entry_id: 0,
|
||||
..
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_flush_metadata_region_sanitize() {
|
||||
let env = TestEnv::with_prefix_and_config(
|
||||
"test_flush_metadata_region_sanitize",
|
||||
EngineConfig {
|
||||
flush_metadata_region_interval: Duration::from_secs(0),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await;
|
||||
let metric = env.metric();
|
||||
let config = metric.config();
|
||||
assert_eq!(
|
||||
config.flush_metadata_region_interval,
|
||||
DEFAULT_FLUSH_METADATA_REGION_INTERVAL
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -54,14 +54,9 @@ impl TestEnv {
|
||||
|
||||
/// Returns a new env with specific `prefix` for test.
|
||||
pub async fn with_prefix(prefix: &str) -> Self {
|
||||
Self::with_prefix_and_config(prefix, EngineConfig::default()).await
|
||||
}
|
||||
|
||||
/// Returns a new env with specific `prefix` and `config` for test.
|
||||
pub async fn with_prefix_and_config(prefix: &str, config: EngineConfig) -> Self {
|
||||
let mut mito_env = MitoTestEnv::with_prefix(prefix);
|
||||
let mito = mito_env.create_engine(MitoConfig::default()).await;
|
||||
let metric = MetricEngine::try_new(mito.clone(), config).unwrap();
|
||||
let metric = MetricEngine::new(mito.clone(), EngineConfig::default());
|
||||
Self {
|
||||
mito_env,
|
||||
mito,
|
||||
@@ -89,7 +84,7 @@ impl TestEnv {
|
||||
.mito_env
|
||||
.create_follower_engine(MitoConfig::default())
|
||||
.await;
|
||||
let metric = MetricEngine::try_new(mito.clone(), EngineConfig::default()).unwrap();
|
||||
let metric = MetricEngine::new(mito.clone(), EngineConfig::default());
|
||||
|
||||
let region_id = self.default_physical_region_id();
|
||||
debug!("opening default physical region: {region_id}");
|
||||
|
||||
@@ -141,6 +141,11 @@ impl<S: LogStore> RegionWorkerLoop<S> {
|
||||
// But the flush is skipped if memtables are empty. Thus should update the `topic_latest_entry_id`
|
||||
// when handling flush request instead of in `schedule_flush` or `flush_finished`.
|
||||
self.update_topic_latest_entry_id(®ion);
|
||||
info!(
|
||||
"Region {} flush request, high watermark: {}",
|
||||
region_id,
|
||||
region.topic_latest_entry_id.load(Ordering::Relaxed)
|
||||
);
|
||||
|
||||
let reason = if region.is_downgrading() {
|
||||
FlushReason::Downgrading
|
||||
@@ -263,17 +268,15 @@ impl<S: LogStore> RegionWorkerLoop<S> {
|
||||
.store()
|
||||
.high_watermark(®ion.provider)
|
||||
.unwrap_or(0);
|
||||
let topic_last_entry_id = region.topic_latest_entry_id.load(Ordering::Relaxed);
|
||||
|
||||
if high_watermark != 0 && high_watermark > topic_last_entry_id {
|
||||
if high_watermark != 0 {
|
||||
region
|
||||
.topic_latest_entry_id
|
||||
.store(high_watermark, Ordering::Relaxed);
|
||||
info!(
|
||||
"Region {} high watermark updated to {}",
|
||||
region.region_id, high_watermark
|
||||
);
|
||||
}
|
||||
info!(
|
||||
"Region {} high watermark updated to {}",
|
||||
region.region_id, high_watermark
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ pub use etl::{
|
||||
DispatchedTo, Pipeline, PipelineExecOutput, PipelineMap,
|
||||
};
|
||||
pub use manager::{
|
||||
pipeline_operator, table, util, IdentityTimeIndex, PipelineContext, PipelineDefinition,
|
||||
PipelineInfo, PipelineRef, PipelineTableRef, PipelineVersion, PipelineWay, SelectInfo,
|
||||
pipeline_operator, table, util, IdentityTimeIndex, PipelineDefinition, PipelineInfo,
|
||||
PipelineRef, PipelineTableRef, PipelineVersion, PipelineWay, SelectInfo,
|
||||
GREPTIME_INTERNAL_IDENTITY_PIPELINE_NAME, GREPTIME_INTERNAL_TRACE_PIPELINE_V1_NAME,
|
||||
};
|
||||
|
||||
@@ -26,7 +26,7 @@ use util::to_pipeline_version;
|
||||
use crate::error::{CastTypeSnafu, InvalidCustomTimeIndexSnafu, PipelineMissingSnafu, Result};
|
||||
use crate::etl::value::time::{MS_RESOLUTION, NS_RESOLUTION, S_RESOLUTION, US_RESOLUTION};
|
||||
use crate::table::PipelineTable;
|
||||
use crate::{GreptimePipelineParams, Pipeline, Value};
|
||||
use crate::{Pipeline, Value};
|
||||
|
||||
pub mod pipeline_operator;
|
||||
pub mod table;
|
||||
@@ -104,22 +104,6 @@ impl PipelineDefinition {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PipelineContext<'a> {
|
||||
pub pipeline_definition: &'a PipelineDefinition,
|
||||
pub pipeline_param: &'a GreptimePipelineParams,
|
||||
}
|
||||
|
||||
impl<'a> PipelineContext<'a> {
|
||||
pub fn new(
|
||||
pipeline_definition: &'a PipelineDefinition,
|
||||
pipeline_param: &'a GreptimePipelineParams,
|
||||
) -> Self {
|
||||
Self {
|
||||
pipeline_definition,
|
||||
pipeline_param,
|
||||
}
|
||||
}
|
||||
}
|
||||
pub enum PipelineWay {
|
||||
OtlpLogDirect(Box<SelectInfo>),
|
||||
Pipeline(PipelineDefinition),
|
||||
|
||||
@@ -44,13 +44,13 @@ pub use quantile_aggr::quantile_udaf;
|
||||
pub use resets::Resets;
|
||||
pub use round::Round;
|
||||
|
||||
/// Extracts an array from a `ColumnarValue`.
|
||||
///
|
||||
/// If the `ColumnarValue` is a scalar, it converts it to an array of size 1.
|
||||
pub(crate) fn extract_array(columnar_value: &ColumnarValue) -> Result<ArrayRef, DataFusionError> {
|
||||
match columnar_value {
|
||||
ColumnarValue::Array(array) => Ok(array.clone()),
|
||||
ColumnarValue::Scalar(scalar) => Ok(scalar.to_array_of_size(1)?),
|
||||
if let ColumnarValue::Array(array) = columnar_value {
|
||||
Ok(array.clone())
|
||||
} else {
|
||||
Err(DataFusionError::Execution(
|
||||
"expect array as input, found scalar value".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -231,7 +231,6 @@ mod test {
|
||||
AvgOverTime::scalar_udf(),
|
||||
ts_array,
|
||||
value_array,
|
||||
vec![],
|
||||
vec![
|
||||
Some(49.9999995),
|
||||
Some(45.8618844),
|
||||
@@ -254,7 +253,6 @@ mod test {
|
||||
MinOverTime::scalar_udf(),
|
||||
ts_array,
|
||||
value_array,
|
||||
vec![],
|
||||
vec![
|
||||
Some(12.345678),
|
||||
Some(12.345678),
|
||||
@@ -277,7 +275,6 @@ mod test {
|
||||
MaxOverTime::scalar_udf(),
|
||||
ts_array,
|
||||
value_array,
|
||||
vec![],
|
||||
vec![
|
||||
Some(87.654321),
|
||||
Some(87.654321),
|
||||
@@ -300,7 +297,6 @@ mod test {
|
||||
SumOverTime::scalar_udf(),
|
||||
ts_array,
|
||||
value_array,
|
||||
vec![],
|
||||
vec![
|
||||
Some(99.999999),
|
||||
Some(229.309422),
|
||||
@@ -323,7 +319,6 @@ mod test {
|
||||
CountOverTime::scalar_udf(),
|
||||
ts_array,
|
||||
value_array,
|
||||
vec![],
|
||||
vec![
|
||||
Some(2.0),
|
||||
Some(5.0),
|
||||
@@ -346,7 +341,6 @@ mod test {
|
||||
LastOverTime::scalar_udf(),
|
||||
ts_array,
|
||||
value_array,
|
||||
vec![],
|
||||
vec![
|
||||
Some(87.654321),
|
||||
Some(70.710678),
|
||||
@@ -369,7 +363,6 @@ mod test {
|
||||
AbsentOverTime::scalar_udf(),
|
||||
ts_array,
|
||||
value_array,
|
||||
vec![],
|
||||
vec![
|
||||
None,
|
||||
None,
|
||||
@@ -392,7 +385,6 @@ mod test {
|
||||
PresentOverTime::scalar_udf(),
|
||||
ts_array,
|
||||
value_array,
|
||||
vec![],
|
||||
vec![
|
||||
Some(1.0),
|
||||
Some(1.0),
|
||||
@@ -415,7 +407,6 @@ mod test {
|
||||
StdvarOverTime::scalar_udf(),
|
||||
ts_array,
|
||||
value_array,
|
||||
vec![],
|
||||
vec![
|
||||
Some(1417.8479276253622),
|
||||
Some(808.999919713209),
|
||||
@@ -451,7 +442,6 @@ mod test {
|
||||
StdvarOverTime::scalar_udf(),
|
||||
RangeArray::from_ranges(ts_array, ranges).unwrap(),
|
||||
RangeArray::from_ranges(values_array, ranges).unwrap(),
|
||||
vec![],
|
||||
vec![Some(0.0), Some(10.559999999999999)],
|
||||
);
|
||||
}
|
||||
@@ -463,7 +453,6 @@ mod test {
|
||||
StddevOverTime::scalar_udf(),
|
||||
ts_array,
|
||||
value_array,
|
||||
vec![],
|
||||
vec![
|
||||
Some(37.6543215),
|
||||
Some(28.442923895289123),
|
||||
@@ -499,7 +488,6 @@ mod test {
|
||||
StddevOverTime::scalar_udf(),
|
||||
RangeArray::from_ranges(ts_array, ranges).unwrap(),
|
||||
RangeArray::from_ranges(values_array, ranges).unwrap(),
|
||||
vec![],
|
||||
vec![Some(0.0), Some(3.249615361854384)],
|
||||
);
|
||||
}
|
||||
|
||||
@@ -90,7 +90,6 @@ mod test {
|
||||
Changes::scalar_udf(),
|
||||
ts_array_1,
|
||||
value_array_1,
|
||||
vec![],
|
||||
vec![Some(0.0), Some(3.0), Some(5.0), Some(8.0), None],
|
||||
);
|
||||
|
||||
@@ -102,7 +101,6 @@ mod test {
|
||||
Changes::scalar_udf(),
|
||||
ts_array_2,
|
||||
value_array_2,
|
||||
vec![],
|
||||
vec![Some(0.0), Some(3.0), Some(5.0), Some(9.0), None],
|
||||
);
|
||||
|
||||
@@ -113,7 +111,6 @@ mod test {
|
||||
Changes::scalar_udf(),
|
||||
ts_array_3,
|
||||
value_array_3,
|
||||
vec![],
|
||||
vec![Some(0.0), Some(0.0), Some(1.0), Some(1.0), None],
|
||||
);
|
||||
}
|
||||
|
||||
@@ -74,7 +74,6 @@ mod test {
|
||||
Deriv::scalar_udf(),
|
||||
ts_array,
|
||||
value_array,
|
||||
vec![],
|
||||
vec![Some(10.606060606060607), None],
|
||||
);
|
||||
}
|
||||
@@ -100,7 +99,6 @@ mod test {
|
||||
Deriv::scalar_udf(),
|
||||
ts_range_array,
|
||||
value_range_array,
|
||||
vec![],
|
||||
vec![Some(0.0)],
|
||||
);
|
||||
}
|
||||
|
||||
@@ -34,11 +34,11 @@ use std::sync::Arc;
|
||||
|
||||
use datafusion::arrow::array::{Float64Array, TimestampMillisecondArray};
|
||||
use datafusion::arrow::datatypes::TimeUnit;
|
||||
use datafusion::common::{DataFusionError, Result as DfResult};
|
||||
use datafusion::common::DataFusionError;
|
||||
use datafusion::logical_expr::{ScalarUDF, Volatility};
|
||||
use datafusion::physical_plan::ColumnarValue;
|
||||
use datafusion_expr::create_udf;
|
||||
use datatypes::arrow::array::{Array, Int64Array};
|
||||
use datatypes::arrow::array::Array;
|
||||
use datatypes::arrow::datatypes::DataType;
|
||||
|
||||
use crate::extension_plan::Millisecond;
|
||||
@@ -53,7 +53,7 @@ pub type Increase = ExtrapolatedRate<true, false>;
|
||||
/// from <https://github.com/prometheus/prometheus/blob/v0.40.1/promql/functions.go#L66>
|
||||
#[derive(Debug)]
|
||||
pub struct ExtrapolatedRate<const IS_COUNTER: bool, const IS_RATE: bool> {
|
||||
/// Range length in milliseconds.
|
||||
/// Range duration in millisecond
|
||||
range_length: i64,
|
||||
}
|
||||
|
||||
@@ -63,7 +63,7 @@ impl<const IS_COUNTER: bool, const IS_RATE: bool> ExtrapolatedRate<IS_COUNTER, I
|
||||
Self { range_length }
|
||||
}
|
||||
|
||||
fn scalar_udf_with_name(name: &str) -> ScalarUDF {
|
||||
fn scalar_udf_with_name(name: &str, range_length: i64) -> ScalarUDF {
|
||||
let input_types = vec![
|
||||
// timestamp range vector
|
||||
RangeArray::convert_data_type(DataType::Timestamp(TimeUnit::Millisecond, None)),
|
||||
@@ -71,8 +71,6 @@ impl<const IS_COUNTER: bool, const IS_RATE: bool> ExtrapolatedRate<IS_COUNTER, I
|
||||
RangeArray::convert_data_type(DataType::Float64),
|
||||
// timestamp vector
|
||||
DataType::Timestamp(TimeUnit::Millisecond, None),
|
||||
// range length
|
||||
DataType::Int64,
|
||||
];
|
||||
|
||||
create_udf(
|
||||
@@ -80,34 +78,12 @@ impl<const IS_COUNTER: bool, const IS_RATE: bool> ExtrapolatedRate<IS_COUNTER, I
|
||||
input_types,
|
||||
DataType::Float64,
|
||||
Volatility::Volatile,
|
||||
Arc::new(move |input: &_| Self::create_function(input)?.calc(input)) as _,
|
||||
Arc::new(move |input: &_| Self::new(range_length).calc(input)) as _,
|
||||
)
|
||||
}
|
||||
|
||||
fn create_function(inputs: &[ColumnarValue]) -> DfResult<Self> {
|
||||
if inputs.len() != 4 {
|
||||
return Err(DataFusionError::Plan(
|
||||
"ExtrapolatedRate function should have 4 inputs".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let range_length_array = extract_array(&inputs[3])?;
|
||||
let range_length = range_length_array
|
||||
.as_any()
|
||||
.downcast_ref::<Int64Array>()
|
||||
.unwrap()
|
||||
.value(0) as i64;
|
||||
|
||||
Ok(Self::new(range_length))
|
||||
}
|
||||
|
||||
/// Input parameters:
|
||||
/// * 0: timestamp range vector
|
||||
/// * 1: value range vector
|
||||
/// * 2: timestamp vector
|
||||
/// * 3: range length. Range duration in millisecond. Not used here
|
||||
fn calc(&self, input: &[ColumnarValue]) -> DfResult<ColumnarValue> {
|
||||
assert_eq!(input.len(), 4);
|
||||
fn calc(&self, input: &[ColumnarValue]) -> Result<ColumnarValue, DataFusionError> {
|
||||
assert_eq!(input.len(), 3);
|
||||
|
||||
// construct matrix from input
|
||||
let ts_array = extract_array(&input[0])?;
|
||||
@@ -232,8 +208,8 @@ impl ExtrapolatedRate<false, false> {
|
||||
"prom_delta"
|
||||
}
|
||||
|
||||
pub fn scalar_udf() -> ScalarUDF {
|
||||
Self::scalar_udf_with_name(Self::name())
|
||||
pub fn scalar_udf(range_length: i64) -> ScalarUDF {
|
||||
Self::scalar_udf_with_name(Self::name(), range_length)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -243,8 +219,8 @@ impl ExtrapolatedRate<true, true> {
|
||||
"prom_rate"
|
||||
}
|
||||
|
||||
pub fn scalar_udf() -> ScalarUDF {
|
||||
Self::scalar_udf_with_name(Self::name())
|
||||
pub fn scalar_udf(range_length: i64) -> ScalarUDF {
|
||||
Self::scalar_udf_with_name(Self::name(), range_length)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -254,8 +230,8 @@ impl ExtrapolatedRate<true, false> {
|
||||
"prom_increase"
|
||||
}
|
||||
|
||||
pub fn scalar_udf() -> ScalarUDF {
|
||||
Self::scalar_udf_with_name(Self::name())
|
||||
pub fn scalar_udf(range_length: i64) -> ScalarUDF {
|
||||
Self::scalar_udf_with_name(Self::name(), range_length)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -295,7 +271,6 @@ mod test {
|
||||
ColumnarValue::Array(Arc::new(ts_range.into_dict())),
|
||||
ColumnarValue::Array(Arc::new(value_range.into_dict())),
|
||||
ColumnarValue::Array(timestamps),
|
||||
ColumnarValue::Array(Arc::new(Int64Array::from(vec![5]))),
|
||||
];
|
||||
let output = extract_array(
|
||||
&ExtrapolatedRate::<IS_COUNTER, IS_RATE>::new(5)
|
||||
|
||||
@@ -22,7 +22,6 @@ use datafusion::arrow::datatypes::TimeUnit;
|
||||
use datafusion::common::DataFusionError;
|
||||
use datafusion::logical_expr::{ScalarUDF, Volatility};
|
||||
use datafusion::physical_plan::ColumnarValue;
|
||||
use datafusion_common::ScalarValue;
|
||||
use datafusion_expr::create_udf;
|
||||
use datatypes::arrow::array::Array;
|
||||
use datatypes::arrow::datatypes::DataType;
|
||||
@@ -63,10 +62,6 @@ impl HoltWinters {
|
||||
vec![
|
||||
RangeArray::convert_data_type(DataType::Timestamp(TimeUnit::Millisecond, None)),
|
||||
RangeArray::convert_data_type(DataType::Float64),
|
||||
// sf
|
||||
DataType::Float64,
|
||||
// tf
|
||||
DataType::Float64,
|
||||
]
|
||||
}
|
||||
|
||||
@@ -74,39 +69,20 @@ impl HoltWinters {
|
||||
DataType::Float64
|
||||
}
|
||||
|
||||
pub fn scalar_udf() -> ScalarUDF {
|
||||
pub fn scalar_udf(level: f64, trend: f64) -> ScalarUDF {
|
||||
create_udf(
|
||||
Self::name(),
|
||||
Self::input_type(),
|
||||
Self::return_type(),
|
||||
Volatility::Volatile,
|
||||
Arc::new(move |input: &_| Self::create_function(input)?.calc(input)) as _,
|
||||
Arc::new(move |input: &_| Self::new(level, trend).calc(input)) as _,
|
||||
)
|
||||
}
|
||||
|
||||
fn create_function(inputs: &[ColumnarValue]) -> Result<Self, DataFusionError> {
|
||||
if inputs.len() != 4 {
|
||||
return Err(DataFusionError::Plan(
|
||||
"HoltWinters function should have 4 inputs".to_string(),
|
||||
));
|
||||
}
|
||||
let ColumnarValue::Scalar(ScalarValue::Float64(Some(sf))) = inputs[2] else {
|
||||
return Err(DataFusionError::Plan(
|
||||
"HoltWinters function's third input should be a scalar float64".to_string(),
|
||||
));
|
||||
};
|
||||
let ColumnarValue::Scalar(ScalarValue::Float64(Some(tf))) = inputs[3] else {
|
||||
return Err(DataFusionError::Plan(
|
||||
"HoltWinters function's fourth input should be a scalar float64".to_string(),
|
||||
));
|
||||
};
|
||||
Ok(Self::new(sf, tf))
|
||||
}
|
||||
|
||||
fn calc(&self, input: &[ColumnarValue]) -> Result<ColumnarValue, DataFusionError> {
|
||||
// construct matrix from input.
|
||||
// The third one is level param, the fourth - trend param which are included in fields.
|
||||
assert_eq!(input.len(), 4);
|
||||
assert_eq!(input.len(), 2);
|
||||
|
||||
let ts_array = extract_array(&input[0])?;
|
||||
let value_array = extract_array(&input[1])?;
|
||||
@@ -288,13 +264,9 @@ mod tests {
|
||||
let ts_range_array = RangeArray::from_ranges(ts_array, ranges).unwrap();
|
||||
let value_range_array = RangeArray::from_ranges(values_array, ranges).unwrap();
|
||||
simple_range_udf_runner(
|
||||
HoltWinters::scalar_udf(),
|
||||
HoltWinters::scalar_udf(0.5, 0.1),
|
||||
ts_range_array,
|
||||
value_range_array,
|
||||
vec![
|
||||
ScalarValue::Float64(Some(0.5)),
|
||||
ScalarValue::Float64(Some(0.1)),
|
||||
],
|
||||
vec![Some(5.0)],
|
||||
);
|
||||
}
|
||||
@@ -315,13 +287,9 @@ mod tests {
|
||||
let ts_range_array = RangeArray::from_ranges(ts_array, ranges).unwrap();
|
||||
let value_range_array = RangeArray::from_ranges(values_array, ranges).unwrap();
|
||||
simple_range_udf_runner(
|
||||
HoltWinters::scalar_udf(),
|
||||
HoltWinters::scalar_udf(0.5, 0.1),
|
||||
ts_range_array,
|
||||
value_range_array,
|
||||
vec![
|
||||
ScalarValue::Float64(Some(0.5)),
|
||||
ScalarValue::Float64(Some(0.1)),
|
||||
],
|
||||
vec![Some(38.18119566835938)],
|
||||
);
|
||||
}
|
||||
@@ -347,13 +315,9 @@ mod tests {
|
||||
let (ts_range_array, value_range_array) =
|
||||
create_ts_and_value_range_arrays(query, ranges.clone());
|
||||
simple_range_udf_runner(
|
||||
HoltWinters::scalar_udf(),
|
||||
HoltWinters::scalar_udf(0.01, 0.1),
|
||||
ts_range_array,
|
||||
value_range_array,
|
||||
vec![
|
||||
ScalarValue::Float64(Some(0.01)),
|
||||
ScalarValue::Float64(Some(0.1)),
|
||||
],
|
||||
vec![Some(expected)],
|
||||
);
|
||||
}
|
||||
|
||||
@@ -190,7 +190,6 @@ mod test {
|
||||
IDelta::<false>::scalar_udf(),
|
||||
ts_range_array,
|
||||
value_range_array,
|
||||
vec![],
|
||||
vec![Some(1.0), Some(-5.0), None, Some(6.0), None, None],
|
||||
);
|
||||
|
||||
@@ -201,7 +200,6 @@ mod test {
|
||||
IDelta::<true>::scalar_udf(),
|
||||
ts_range_array,
|
||||
value_range_array,
|
||||
vec![],
|
||||
// the second point represent counter reset
|
||||
vec![Some(0.5), Some(0.0), None, Some(3.0), None, None],
|
||||
);
|
||||
|
||||
@@ -22,7 +22,6 @@ use datafusion::arrow::datatypes::TimeUnit;
|
||||
use datafusion::common::DataFusionError;
|
||||
use datafusion::logical_expr::{ScalarUDF, Volatility};
|
||||
use datafusion::physical_plan::ColumnarValue;
|
||||
use datafusion_common::ScalarValue;
|
||||
use datafusion_expr::create_udf;
|
||||
use datatypes::arrow::array::Array;
|
||||
use datatypes::arrow::datatypes::DataType;
|
||||
@@ -45,41 +44,25 @@ impl PredictLinear {
|
||||
"prom_predict_linear"
|
||||
}
|
||||
|
||||
pub fn scalar_udf() -> ScalarUDF {
|
||||
pub fn scalar_udf(t: i64) -> ScalarUDF {
|
||||
let input_types = vec![
|
||||
// time index column
|
||||
RangeArray::convert_data_type(DataType::Timestamp(TimeUnit::Millisecond, None)),
|
||||
// value column
|
||||
RangeArray::convert_data_type(DataType::Float64),
|
||||
// t
|
||||
DataType::Int64,
|
||||
];
|
||||
create_udf(
|
||||
Self::name(),
|
||||
input_types,
|
||||
DataType::Float64,
|
||||
Volatility::Volatile,
|
||||
Arc::new(move |input: &_| Self::create_function(input)?.predict_linear(input)) as _,
|
||||
Arc::new(move |input: &_| Self::new(t).predict_linear(input)) as _,
|
||||
)
|
||||
}
|
||||
|
||||
fn create_function(inputs: &[ColumnarValue]) -> Result<Self, DataFusionError> {
|
||||
if inputs.len() != 3 {
|
||||
return Err(DataFusionError::Plan(
|
||||
"PredictLinear function should have 3 inputs".to_string(),
|
||||
));
|
||||
}
|
||||
let ColumnarValue::Scalar(ScalarValue::Int64(Some(t))) = inputs[2] else {
|
||||
return Err(DataFusionError::Plan(
|
||||
"PredictLinear function's third input should be a scalar int64".to_string(),
|
||||
));
|
||||
};
|
||||
Ok(Self::new(t))
|
||||
}
|
||||
|
||||
fn predict_linear(&self, input: &[ColumnarValue]) -> Result<ColumnarValue, DataFusionError> {
|
||||
// construct matrix from input.
|
||||
assert_eq!(input.len(), 3);
|
||||
assert_eq!(input.len(), 2);
|
||||
let ts_array = extract_array(&input[0])?;
|
||||
let value_array = extract_array(&input[1])?;
|
||||
|
||||
@@ -207,10 +190,9 @@ mod test {
|
||||
let ts_array = RangeArray::from_ranges(ts_array, ranges).unwrap();
|
||||
let value_array = RangeArray::from_ranges(values_array, ranges).unwrap();
|
||||
simple_range_udf_runner(
|
||||
PredictLinear::scalar_udf(),
|
||||
PredictLinear::scalar_udf(0),
|
||||
ts_array,
|
||||
value_array,
|
||||
vec![ScalarValue::Int64(Some(0))],
|
||||
vec![None, None],
|
||||
);
|
||||
}
|
||||
@@ -219,10 +201,9 @@ mod test {
|
||||
fn calculate_predict_linear_test1() {
|
||||
let (ts_array, value_array) = build_test_range_arrays();
|
||||
simple_range_udf_runner(
|
||||
PredictLinear::scalar_udf(),
|
||||
PredictLinear::scalar_udf(0),
|
||||
ts_array,
|
||||
value_array,
|
||||
vec![ScalarValue::Int64(Some(0))],
|
||||
// value at t = 0
|
||||
vec![Some(38.63636363636364)],
|
||||
);
|
||||
@@ -232,10 +213,9 @@ mod test {
|
||||
fn calculate_predict_linear_test2() {
|
||||
let (ts_array, value_array) = build_test_range_arrays();
|
||||
simple_range_udf_runner(
|
||||
PredictLinear::scalar_udf(),
|
||||
PredictLinear::scalar_udf(3000),
|
||||
ts_array,
|
||||
value_array,
|
||||
vec![ScalarValue::Int64(Some(3000))],
|
||||
// value at t = 3000
|
||||
vec![Some(31856.818181818187)],
|
||||
);
|
||||
@@ -245,10 +225,9 @@ mod test {
|
||||
fn calculate_predict_linear_test3() {
|
||||
let (ts_array, value_array) = build_test_range_arrays();
|
||||
simple_range_udf_runner(
|
||||
PredictLinear::scalar_udf(),
|
||||
PredictLinear::scalar_udf(4200),
|
||||
ts_array,
|
||||
value_array,
|
||||
vec![ScalarValue::Int64(Some(4200))],
|
||||
// value at t = 4200
|
||||
vec![Some(44584.09090909091)],
|
||||
);
|
||||
@@ -258,10 +237,9 @@ mod test {
|
||||
fn calculate_predict_linear_test4() {
|
||||
let (ts_array, value_array) = build_test_range_arrays();
|
||||
simple_range_udf_runner(
|
||||
PredictLinear::scalar_udf(),
|
||||
PredictLinear::scalar_udf(6600),
|
||||
ts_array,
|
||||
value_array,
|
||||
vec![ScalarValue::Int64(Some(6600))],
|
||||
// value at t = 6600
|
||||
vec![Some(70038.63636363638)],
|
||||
);
|
||||
@@ -271,10 +249,9 @@ mod test {
|
||||
fn calculate_predict_linear_test5() {
|
||||
let (ts_array, value_array) = build_test_range_arrays();
|
||||
simple_range_udf_runner(
|
||||
PredictLinear::scalar_udf(),
|
||||
PredictLinear::scalar_udf(7800),
|
||||
ts_array,
|
||||
value_array,
|
||||
vec![ScalarValue::Int64(Some(7800))],
|
||||
// value at t = 7800
|
||||
vec![Some(82765.9090909091)],
|
||||
);
|
||||
|
||||
@@ -19,7 +19,6 @@ use datafusion::arrow::datatypes::TimeUnit;
|
||||
use datafusion::common::DataFusionError;
|
||||
use datafusion::logical_expr::{ScalarUDF, Volatility};
|
||||
use datafusion::physical_plan::ColumnarValue;
|
||||
use datafusion_common::ScalarValue;
|
||||
use datafusion_expr::create_udf;
|
||||
use datatypes::arrow::array::Array;
|
||||
use datatypes::arrow::datatypes::DataType;
|
||||
@@ -41,38 +40,22 @@ impl QuantileOverTime {
|
||||
"prom_quantile_over_time"
|
||||
}
|
||||
|
||||
pub fn scalar_udf() -> ScalarUDF {
|
||||
pub fn scalar_udf(quantile: f64) -> ScalarUDF {
|
||||
let input_types = vec![
|
||||
// time index column
|
||||
RangeArray::convert_data_type(DataType::Timestamp(TimeUnit::Millisecond, None)),
|
||||
// value column
|
||||
RangeArray::convert_data_type(DataType::Float64),
|
||||
// quantile
|
||||
DataType::Float64,
|
||||
];
|
||||
create_udf(
|
||||
Self::name(),
|
||||
input_types,
|
||||
DataType::Float64,
|
||||
Volatility::Volatile,
|
||||
Arc::new(move |input: &_| Self::create_function(input)?.quantile_over_time(input)) as _,
|
||||
Arc::new(move |input: &_| Self::new(quantile).quantile_over_time(input)) as _,
|
||||
)
|
||||
}
|
||||
|
||||
fn create_function(inputs: &[ColumnarValue]) -> Result<Self, DataFusionError> {
|
||||
if inputs.len() != 3 {
|
||||
return Err(DataFusionError::Plan(
|
||||
"QuantileOverTime function should have 3 inputs".to_string(),
|
||||
));
|
||||
}
|
||||
let ColumnarValue::Scalar(ScalarValue::Float64(Some(quantile))) = inputs[2] else {
|
||||
return Err(DataFusionError::Plan(
|
||||
"QuantileOverTime function's third input should be a scalar float64".to_string(),
|
||||
));
|
||||
};
|
||||
Ok(Self::new(quantile))
|
||||
}
|
||||
|
||||
fn quantile_over_time(
|
||||
&self,
|
||||
input: &[ColumnarValue],
|
||||
|
||||
@@ -16,12 +16,10 @@ use std::sync::Arc;
|
||||
|
||||
use datafusion::arrow::array::{ArrayRef, AsArray};
|
||||
use datafusion::common::cast::{as_list_array, as_primitive_array, as_struct_array};
|
||||
use datafusion::error::{DataFusionError, Result as DfResult};
|
||||
use datafusion::error::Result as DfResult;
|
||||
use datafusion::logical_expr::{Accumulator as DfAccumulator, AggregateUDF, Volatility};
|
||||
use datafusion::physical_plan::expressions::Literal;
|
||||
use datafusion::prelude::create_udaf;
|
||||
use datafusion_common::ScalarValue;
|
||||
use datafusion_expr::function::AccumulatorArgs;
|
||||
use datatypes::arrow::array::{ListArray, StructArray};
|
||||
use datatypes::arrow::datatypes::{DataType, Field, Float64Type};
|
||||
|
||||
@@ -40,16 +38,16 @@ pub struct QuantileAccumulator {
|
||||
|
||||
/// Create a quantile `AggregateUDF` for PromQL quantile operator,
|
||||
/// which calculates φ-quantile (0 ≤ φ ≤ 1) over dimensions
|
||||
pub fn quantile_udaf() -> Arc<AggregateUDF> {
|
||||
pub fn quantile_udaf(q: f64) -> Arc<AggregateUDF> {
|
||||
Arc::new(create_udaf(
|
||||
QUANTILE_NAME,
|
||||
// Input type: (φ, values)
|
||||
vec![DataType::Float64, DataType::Float64],
|
||||
// Input type: (values)
|
||||
vec![DataType::Float64],
|
||||
// Output type: the φ-quantile
|
||||
Arc::new(DataType::Float64),
|
||||
Volatility::Volatile,
|
||||
// Create the accumulator
|
||||
Arc::new(QuantileAccumulator::from_args),
|
||||
Arc::new(move |_| Ok(Box::new(QuantileAccumulator::new(q)))),
|
||||
// Intermediate state types
|
||||
Arc::new(vec![DataType::Struct(
|
||||
vec![Field::new(
|
||||
@@ -67,40 +65,17 @@ pub fn quantile_udaf() -> Arc<AggregateUDF> {
|
||||
}
|
||||
|
||||
impl QuantileAccumulator {
|
||||
fn new(q: f64) -> Self {
|
||||
pub fn new(q: f64) -> Self {
|
||||
Self {
|
||||
q,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_args(args: AccumulatorArgs) -> DfResult<Box<dyn DfAccumulator>> {
|
||||
if args.exprs.len() != 2 {
|
||||
return Err(DataFusionError::Plan(
|
||||
"Quantile function should have 2 inputs".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let q = match &args.exprs[0]
|
||||
.as_any()
|
||||
.downcast_ref::<Literal>()
|
||||
.map(|lit| lit.value())
|
||||
{
|
||||
Some(ScalarValue::Float64(Some(q))) => *q,
|
||||
_ => {
|
||||
return Err(DataFusionError::Internal(
|
||||
"Invalid quantile value".to_string(),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Box::new(Self::new(q)))
|
||||
}
|
||||
}
|
||||
|
||||
impl DfAccumulator for QuantileAccumulator {
|
||||
fn update_batch(&mut self, values: &[ArrayRef]) -> DfResult<()> {
|
||||
let f64_array = values[1].as_primitive::<Float64Type>();
|
||||
let f64_array = values[0].as_primitive::<Float64Type>();
|
||||
|
||||
self.values.extend(f64_array);
|
||||
|
||||
@@ -187,10 +162,9 @@ mod tests {
|
||||
#[test]
|
||||
fn test_quantile_accumulator_single_value() {
|
||||
let mut accumulator = QuantileAccumulator::new(0.5);
|
||||
let q = create_f64_array(vec![Some(0.5)]);
|
||||
let input = create_f64_array(vec![Some(10.0)]);
|
||||
|
||||
accumulator.update_batch(&[q, input]).unwrap();
|
||||
accumulator.update_batch(&[input]).unwrap();
|
||||
let result = accumulator.evaluate().unwrap();
|
||||
|
||||
assert_eq!(result, ScalarValue::Float64(Some(10.0)));
|
||||
@@ -199,10 +173,9 @@ mod tests {
|
||||
#[test]
|
||||
fn test_quantile_accumulator_multiple_values() {
|
||||
let mut accumulator = QuantileAccumulator::new(0.5);
|
||||
let q = create_f64_array(vec![Some(0.5)]);
|
||||
let input = create_f64_array(vec![Some(1.0), Some(2.0), Some(3.0), Some(4.0), Some(5.0)]);
|
||||
|
||||
accumulator.update_batch(&[q, input]).unwrap();
|
||||
accumulator.update_batch(&[input]).unwrap();
|
||||
let result = accumulator.evaluate().unwrap();
|
||||
|
||||
assert_eq!(result, ScalarValue::Float64(Some(3.0)));
|
||||
@@ -211,10 +184,9 @@ mod tests {
|
||||
#[test]
|
||||
fn test_quantile_accumulator_with_nulls() {
|
||||
let mut accumulator = QuantileAccumulator::new(0.5);
|
||||
let q = create_f64_array(vec![Some(0.5)]);
|
||||
let input = create_f64_array(vec![Some(1.0), None, Some(3.0), Some(4.0), Some(5.0)]);
|
||||
|
||||
accumulator.update_batch(&[q, input]).unwrap();
|
||||
accumulator.update_batch(&[input]).unwrap();
|
||||
|
||||
let result = accumulator.evaluate().unwrap();
|
||||
assert_eq!(result, ScalarValue::Float64(Some(3.0)));
|
||||
@@ -223,12 +195,11 @@ mod tests {
|
||||
#[test]
|
||||
fn test_quantile_accumulator_multiple_batches() {
|
||||
let mut accumulator = QuantileAccumulator::new(0.5);
|
||||
let q = create_f64_array(vec![Some(0.5)]);
|
||||
let input1 = create_f64_array(vec![Some(1.0), Some(2.0)]);
|
||||
let input2 = create_f64_array(vec![Some(3.0), Some(4.0), Some(5.0)]);
|
||||
|
||||
accumulator.update_batch(&[q.clone(), input1]).unwrap();
|
||||
accumulator.update_batch(&[q, input2]).unwrap();
|
||||
accumulator.update_batch(&[input1]).unwrap();
|
||||
accumulator.update_batch(&[input2]).unwrap();
|
||||
|
||||
let result = accumulator.evaluate().unwrap();
|
||||
assert_eq!(result, ScalarValue::Float64(Some(3.0)));
|
||||
@@ -237,33 +208,29 @@ mod tests {
|
||||
#[test]
|
||||
fn test_quantile_accumulator_different_quantiles() {
|
||||
let mut min_accumulator = QuantileAccumulator::new(0.0);
|
||||
let q = create_f64_array(vec![Some(0.0)]);
|
||||
let input = create_f64_array(vec![Some(1.0), Some(2.0), Some(3.0), Some(4.0), Some(5.0)]);
|
||||
min_accumulator.update_batch(&[q, input.clone()]).unwrap();
|
||||
min_accumulator.update_batch(&[input.clone()]).unwrap();
|
||||
assert_eq!(
|
||||
min_accumulator.evaluate().unwrap(),
|
||||
ScalarValue::Float64(Some(1.0))
|
||||
);
|
||||
|
||||
let mut q1_accumulator = QuantileAccumulator::new(0.25);
|
||||
let q = create_f64_array(vec![Some(0.25)]);
|
||||
q1_accumulator.update_batch(&[q, input.clone()]).unwrap();
|
||||
q1_accumulator.update_batch(&[input.clone()]).unwrap();
|
||||
assert_eq!(
|
||||
q1_accumulator.evaluate().unwrap(),
|
||||
ScalarValue::Float64(Some(2.0))
|
||||
);
|
||||
|
||||
let mut q3_accumulator = QuantileAccumulator::new(0.75);
|
||||
let q = create_f64_array(vec![Some(0.75)]);
|
||||
q3_accumulator.update_batch(&[q, input.clone()]).unwrap();
|
||||
q3_accumulator.update_batch(&[input.clone()]).unwrap();
|
||||
assert_eq!(
|
||||
q3_accumulator.evaluate().unwrap(),
|
||||
ScalarValue::Float64(Some(4.0))
|
||||
);
|
||||
|
||||
let mut max_accumulator = QuantileAccumulator::new(1.0);
|
||||
let q = create_f64_array(vec![Some(1.0)]);
|
||||
max_accumulator.update_batch(&[q, input]).unwrap();
|
||||
max_accumulator.update_batch(&[input]).unwrap();
|
||||
assert_eq!(
|
||||
max_accumulator.evaluate().unwrap(),
|
||||
ScalarValue::Float64(Some(5.0))
|
||||
@@ -273,11 +240,10 @@ mod tests {
|
||||
#[test]
|
||||
fn test_quantile_accumulator_size() {
|
||||
let mut accumulator = QuantileAccumulator::new(0.5);
|
||||
let q = create_f64_array(vec![Some(0.5)]);
|
||||
let input = create_f64_array(vec![Some(1.0), Some(2.0), Some(3.0)]);
|
||||
|
||||
let initial_size = accumulator.size();
|
||||
accumulator.update_batch(&[q, input]).unwrap();
|
||||
accumulator.update_batch(&[input]).unwrap();
|
||||
let after_update_size = accumulator.size();
|
||||
|
||||
assert!(after_update_size >= initial_size);
|
||||
@@ -286,16 +252,14 @@ mod tests {
|
||||
#[test]
|
||||
fn test_quantile_accumulator_state_and_merge() -> DfResult<()> {
|
||||
let mut acc1 = QuantileAccumulator::new(0.5);
|
||||
let q = create_f64_array(vec![Some(0.5)]);
|
||||
let input1 = create_f64_array(vec![Some(1.0), Some(2.0)]);
|
||||
acc1.update_batch(&[q, input1])?;
|
||||
acc1.update_batch(&[input1])?;
|
||||
|
||||
let state1 = acc1.state()?;
|
||||
|
||||
let mut acc2 = QuantileAccumulator::new(0.5);
|
||||
let q = create_f64_array(vec![Some(0.5)]);
|
||||
let input2 = create_f64_array(vec![Some(3.0), Some(4.0), Some(5.0)]);
|
||||
acc2.update_batch(&[q, input2])?;
|
||||
acc2.update_batch(&[input2])?;
|
||||
|
||||
let mut struct_builders = vec![];
|
||||
for scalar in &state1 {
|
||||
@@ -316,16 +280,16 @@ mod tests {
|
||||
#[test]
|
||||
fn test_quantile_accumulator_with_extreme_values() {
|
||||
let mut accumulator = QuantileAccumulator::new(0.5);
|
||||
let q = create_f64_array(vec![Some(0.5)]);
|
||||
let input = create_f64_array(vec![Some(f64::MAX), Some(f64::MIN), Some(0.0)]);
|
||||
|
||||
accumulator.update_batch(&[q, input]).unwrap();
|
||||
accumulator.update_batch(&[input]).unwrap();
|
||||
let _result = accumulator.evaluate().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_quantile_udaf_creation() {
|
||||
let udaf = quantile_udaf();
|
||||
let q = 0.5;
|
||||
let udaf = quantile_udaf(q);
|
||||
|
||||
assert_eq!(udaf.name(), QUANTILE_NAME);
|
||||
assert_eq!(udaf.return_type(&[]).unwrap(), DataType::Float64);
|
||||
|
||||
@@ -90,7 +90,6 @@ mod test {
|
||||
Resets::scalar_udf(),
|
||||
ts_array_1,
|
||||
value_array_1,
|
||||
vec![],
|
||||
vec![Some(0.0), Some(1.0), Some(2.0), Some(3.0), None],
|
||||
);
|
||||
|
||||
@@ -102,7 +101,6 @@ mod test {
|
||||
Resets::scalar_udf(),
|
||||
ts_array_2,
|
||||
value_array_2,
|
||||
vec![],
|
||||
vec![Some(0.0), Some(0.0), Some(1.0), Some(1.0), None],
|
||||
);
|
||||
|
||||
@@ -113,7 +111,6 @@ mod test {
|
||||
Resets::scalar_udf(),
|
||||
ts_array_3,
|
||||
value_array_3,
|
||||
vec![],
|
||||
vec![Some(0.0), Some(0.0), Some(0.0), Some(0.0), None],
|
||||
);
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use datafusion::error::DataFusionError;
|
||||
use datafusion_common::ScalarValue;
|
||||
use datafusion_expr::{create_udf, ColumnarValue, ScalarUDF, Volatility};
|
||||
use datatypes::arrow::array::AsArray;
|
||||
use datatypes::arrow::datatypes::{DataType, Float64Type};
|
||||
@@ -37,39 +36,25 @@ impl Round {
|
||||
}
|
||||
|
||||
fn input_type() -> Vec<DataType> {
|
||||
vec![DataType::Float64, DataType::Float64]
|
||||
vec![DataType::Float64]
|
||||
}
|
||||
|
||||
pub fn return_type() -> DataType {
|
||||
DataType::Float64
|
||||
}
|
||||
|
||||
pub fn scalar_udf() -> ScalarUDF {
|
||||
pub fn scalar_udf(nearest: f64) -> ScalarUDF {
|
||||
create_udf(
|
||||
Self::name(),
|
||||
Self::input_type(),
|
||||
Self::return_type(),
|
||||
Volatility::Volatile,
|
||||
Arc::new(move |input: &_| Self::create_function(input)?.calc(input)) as _,
|
||||
Arc::new(move |input: &_| Self::new(nearest).calc(input)) as _,
|
||||
)
|
||||
}
|
||||
|
||||
fn create_function(inputs: &[ColumnarValue]) -> Result<Self, DataFusionError> {
|
||||
if inputs.len() != 2 {
|
||||
return Err(DataFusionError::Plan(
|
||||
"Round function should have 2 inputs".to_string(),
|
||||
));
|
||||
}
|
||||
let ColumnarValue::Scalar(ScalarValue::Float64(Some(nearest))) = inputs[1] else {
|
||||
return Err(DataFusionError::Plan(
|
||||
"Round function's second input should be a scalar float64".to_string(),
|
||||
));
|
||||
};
|
||||
Ok(Self::new(nearest))
|
||||
}
|
||||
|
||||
fn calc(&self, input: &[ColumnarValue]) -> Result<ColumnarValue, DataFusionError> {
|
||||
assert_eq!(input.len(), 2);
|
||||
assert_eq!(input.len(), 1);
|
||||
|
||||
let value_array = extract_array(&input[0])?;
|
||||
|
||||
@@ -95,11 +80,8 @@ mod tests {
|
||||
use super::*;
|
||||
|
||||
fn test_round_f64(value: Vec<f64>, nearest: f64, expected: Vec<f64>) {
|
||||
let round_udf = Round::scalar_udf();
|
||||
let input = vec![
|
||||
ColumnarValue::Array(Arc::new(Float64Array::from(value))),
|
||||
ColumnarValue::Scalar(ScalarValue::Float64(Some(nearest))),
|
||||
];
|
||||
let round_udf = Round::scalar_udf(nearest);
|
||||
let input = vec![ColumnarValue::Array(Arc::new(Float64Array::from(value)))];
|
||||
let args = ScalarFunctionArgs {
|
||||
args: input,
|
||||
number_rows: 1,
|
||||
|
||||
@@ -17,7 +17,6 @@ use std::sync::Arc;
|
||||
use datafusion::arrow::array::Float64Array;
|
||||
use datafusion::logical_expr::ScalarUDF;
|
||||
use datafusion::physical_plan::ColumnarValue;
|
||||
use datafusion_common::ScalarValue;
|
||||
use datafusion_expr::ScalarFunctionArgs;
|
||||
use datatypes::arrow::datatypes::DataType;
|
||||
|
||||
@@ -29,17 +28,13 @@ pub fn simple_range_udf_runner(
|
||||
range_fn: ScalarUDF,
|
||||
input_ts: RangeArray,
|
||||
input_value: RangeArray,
|
||||
other_args: Vec<ScalarValue>,
|
||||
expected: Vec<Option<f64>>,
|
||||
) {
|
||||
let num_rows = input_ts.len();
|
||||
let input = [
|
||||
let input = vec![
|
||||
ColumnarValue::Array(Arc::new(input_ts.into_dict())),
|
||||
ColumnarValue::Array(Arc::new(input_value.into_dict())),
|
||||
]
|
||||
.into_iter()
|
||||
.chain(other_args.into_iter().map(ColumnarValue::Scalar))
|
||||
.collect::<Vec<_>>();
|
||||
];
|
||||
let args = ScalarFunctionArgs {
|
||||
args: input,
|
||||
number_rows: num_rows,
|
||||
|
||||
@@ -28,7 +28,7 @@ pub mod error;
|
||||
pub mod executor;
|
||||
pub mod log_query;
|
||||
pub mod metrics;
|
||||
pub mod optimizer;
|
||||
mod optimizer;
|
||||
pub mod options;
|
||||
pub mod parser;
|
||||
mod part_sort;
|
||||
|
||||
@@ -31,7 +31,7 @@ use datafusion::functions_aggregate::stddev::stddev_pop_udaf;
|
||||
use datafusion::functions_aggregate::sum::sum_udaf;
|
||||
use datafusion::functions_aggregate::variance::var_pop_udaf;
|
||||
use datafusion::functions_window::row_number::RowNumber;
|
||||
use datafusion::logical_expr::expr::{Alias, ScalarFunction, WindowFunction};
|
||||
use datafusion::logical_expr::expr::{AggregateFunction, Alias, ScalarFunction, WindowFunction};
|
||||
use datafusion::logical_expr::expr_rewriter::normalize_cols;
|
||||
use datafusion::logical_expr::{
|
||||
BinaryExpr, Cast, Extension, LogicalPlan, LogicalPlanBuilder, Operator,
|
||||
@@ -1425,18 +1425,15 @@ impl PromPlanner {
|
||||
let field_column_pos = 0;
|
||||
let mut exprs = Vec::with_capacity(self.ctx.field_columns.len());
|
||||
let scalar_func = match func.name {
|
||||
"increase" => ScalarFunc::ExtrapolateUdf(
|
||||
Arc::new(Increase::scalar_udf()),
|
||||
"increase" => ScalarFunc::ExtrapolateUdf(Arc::new(Increase::scalar_udf(
|
||||
self.ctx.range.context(ExpectRangeSelectorSnafu)?,
|
||||
),
|
||||
"rate" => ScalarFunc::ExtrapolateUdf(
|
||||
Arc::new(Rate::scalar_udf()),
|
||||
))),
|
||||
"rate" => ScalarFunc::ExtrapolateUdf(Arc::new(Rate::scalar_udf(
|
||||
self.ctx.range.context(ExpectRangeSelectorSnafu)?,
|
||||
),
|
||||
"delta" => ScalarFunc::ExtrapolateUdf(
|
||||
Arc::new(Delta::scalar_udf()),
|
||||
))),
|
||||
"delta" => ScalarFunc::ExtrapolateUdf(Arc::new(Delta::scalar_udf(
|
||||
self.ctx.range.context(ExpectRangeSelectorSnafu)?,
|
||||
),
|
||||
))),
|
||||
"idelta" => ScalarFunc::Udf(Arc::new(IDelta::<false>::scalar_udf())),
|
||||
"irate" => ScalarFunc::Udf(Arc::new(IDelta::<true>::scalar_udf())),
|
||||
"resets" => ScalarFunc::Udf(Arc::new(Resets::scalar_udf())),
|
||||
@@ -1452,9 +1449,50 @@ impl PromPlanner {
|
||||
"present_over_time" => ScalarFunc::Udf(Arc::new(PresentOverTime::scalar_udf())),
|
||||
"stddev_over_time" => ScalarFunc::Udf(Arc::new(StddevOverTime::scalar_udf())),
|
||||
"stdvar_over_time" => ScalarFunc::Udf(Arc::new(StdvarOverTime::scalar_udf())),
|
||||
"quantile_over_time" => ScalarFunc::Udf(Arc::new(QuantileOverTime::scalar_udf())),
|
||||
"predict_linear" => ScalarFunc::Udf(Arc::new(PredictLinear::scalar_udf())),
|
||||
"holt_winters" => ScalarFunc::Udf(Arc::new(HoltWinters::scalar_udf())),
|
||||
"quantile_over_time" => {
|
||||
let quantile_expr = match other_input_exprs.pop_front() {
|
||||
Some(DfExpr::Literal(ScalarValue::Float64(Some(quantile)))) => quantile,
|
||||
other => UnexpectedPlanExprSnafu {
|
||||
desc: format!("expected f64 literal as quantile, but found {:?}", other),
|
||||
}
|
||||
.fail()?,
|
||||
};
|
||||
ScalarFunc::Udf(Arc::new(QuantileOverTime::scalar_udf(quantile_expr)))
|
||||
}
|
||||
"predict_linear" => {
|
||||
let t_expr = match other_input_exprs.pop_front() {
|
||||
Some(DfExpr::Literal(ScalarValue::Float64(Some(t)))) => t as i64,
|
||||
Some(DfExpr::Literal(ScalarValue::Int64(Some(t)))) => t,
|
||||
other => UnexpectedPlanExprSnafu {
|
||||
desc: format!("expected i64 literal as t, but found {:?}", other),
|
||||
}
|
||||
.fail()?,
|
||||
};
|
||||
ScalarFunc::Udf(Arc::new(PredictLinear::scalar_udf(t_expr)))
|
||||
}
|
||||
"holt_winters" => {
|
||||
let sf_exp = match other_input_exprs.pop_front() {
|
||||
Some(DfExpr::Literal(ScalarValue::Float64(Some(sf)))) => sf,
|
||||
other => UnexpectedPlanExprSnafu {
|
||||
desc: format!(
|
||||
"expected f64 literal as smoothing factor, but found {:?}",
|
||||
other
|
||||
),
|
||||
}
|
||||
.fail()?,
|
||||
};
|
||||
let tf_exp = match other_input_exprs.pop_front() {
|
||||
Some(DfExpr::Literal(ScalarValue::Float64(Some(tf)))) => tf,
|
||||
other => UnexpectedPlanExprSnafu {
|
||||
desc: format!(
|
||||
"expected f64 literal as trend factor, but found {:?}",
|
||||
other
|
||||
),
|
||||
}
|
||||
.fail()?,
|
||||
};
|
||||
ScalarFunc::Udf(Arc::new(HoltWinters::scalar_udf(sf_exp, tf_exp)))
|
||||
}
|
||||
"time" => {
|
||||
exprs.push(build_special_time_expr(
|
||||
self.ctx.time_index_column.as_ref().unwrap(),
|
||||
@@ -1589,10 +1627,17 @@ impl PromPlanner {
|
||||
ScalarFunc::GeneratedExpr
|
||||
}
|
||||
"round" => {
|
||||
if other_input_exprs.is_empty() {
|
||||
other_input_exprs.push_front(DfExpr::Literal(ScalarValue::Float64(Some(0.0))));
|
||||
}
|
||||
ScalarFunc::DataFusionUdf(Arc::new(Round::scalar_udf()))
|
||||
let nearest = match other_input_exprs.pop_front() {
|
||||
Some(DfExpr::Literal(ScalarValue::Float64(Some(t)))) => t,
|
||||
Some(DfExpr::Literal(ScalarValue::Int64(Some(t)))) => t as f64,
|
||||
None => 0.0,
|
||||
other => UnexpectedPlanExprSnafu {
|
||||
desc: format!("expected f64 literal as t, but found {:?}", other),
|
||||
}
|
||||
.fail()?,
|
||||
};
|
||||
|
||||
ScalarFunc::DataFusionUdf(Arc::new(Round::scalar_udf(nearest)))
|
||||
}
|
||||
|
||||
_ => {
|
||||
@@ -1650,7 +1695,7 @@ impl PromPlanner {
|
||||
let _ = other_input_exprs.remove(field_column_pos + 1);
|
||||
let _ = other_input_exprs.remove(field_column_pos);
|
||||
}
|
||||
ScalarFunc::ExtrapolateUdf(func, range_length) => {
|
||||
ScalarFunc::ExtrapolateUdf(func) => {
|
||||
let ts_range_expr = DfExpr::Column(Column::from_name(
|
||||
RangeManipulate::build_timestamp_range_name(
|
||||
self.ctx.time_index_column.as_ref().unwrap(),
|
||||
@@ -1660,13 +1705,11 @@ impl PromPlanner {
|
||||
other_input_exprs.insert(field_column_pos + 1, col_expr);
|
||||
other_input_exprs
|
||||
.insert(field_column_pos + 2, self.create_time_index_column_expr()?);
|
||||
other_input_exprs.push_back(lit(range_length));
|
||||
let fn_expr = DfExpr::ScalarFunction(ScalarFunction {
|
||||
func,
|
||||
args: other_input_exprs.clone().into(),
|
||||
});
|
||||
exprs.push(fn_expr);
|
||||
let _ = other_input_exprs.pop_back();
|
||||
let _ = other_input_exprs.remove(field_column_pos + 2);
|
||||
let _ = other_input_exprs.remove(field_column_pos + 1);
|
||||
let _ = other_input_exprs.remove(field_column_pos);
|
||||
@@ -1929,13 +1972,11 @@ impl PromPlanner {
|
||||
param: &Option<Box<PromExpr>>,
|
||||
input_plan: &LogicalPlan,
|
||||
) -> Result<(Vec<DfExpr>, Vec<DfExpr>)> {
|
||||
let mut non_col_args = Vec::new();
|
||||
let aggr = match op.id() {
|
||||
token::T_SUM => sum_udaf(),
|
||||
token::T_QUANTILE => {
|
||||
let q = Self::get_param_value_as_f64(op, param)?;
|
||||
non_col_args.push(lit(q));
|
||||
quantile_udaf()
|
||||
quantile_udaf(q)
|
||||
}
|
||||
token::T_AVG => avg_udaf(),
|
||||
token::T_COUNT_VALUES | token::T_COUNT => count_udaf(),
|
||||
@@ -1957,12 +1998,16 @@ impl PromPlanner {
|
||||
.field_columns
|
||||
.iter()
|
||||
.map(|col| {
|
||||
non_col_args.push(DfExpr::Column(Column::from_name(col)));
|
||||
let expr = aggr.call(non_col_args.clone());
|
||||
non_col_args.pop();
|
||||
expr
|
||||
Ok(DfExpr::AggregateFunction(AggregateFunction {
|
||||
func: aggr.clone(),
|
||||
args: vec![DfExpr::Column(Column::from_name(col))],
|
||||
distinct: false,
|
||||
filter: None,
|
||||
order_by: None,
|
||||
null_treatment: None,
|
||||
}))
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
// if the aggregator is `count_values`, it must be grouped by current fields.
|
||||
let prev_field_exprs = if op.id() == token::T_COUNT_VALUES {
|
||||
@@ -2896,8 +2941,7 @@ enum ScalarFunc {
|
||||
Udf(Arc<ScalarUdfDef>),
|
||||
// todo(ruihang): maybe merge with Udf later
|
||||
/// UDF that require extra information like range length to be evaluated.
|
||||
/// The second argument is range length.
|
||||
ExtrapolateUdf(Arc<ScalarUdfDef>, i64),
|
||||
ExtrapolateUdf(Arc<ScalarUdfDef>),
|
||||
/// Func that doesn't require input, like `time()`.
|
||||
GeneratedExpr,
|
||||
}
|
||||
@@ -3551,8 +3595,8 @@ mod test {
|
||||
async fn increase_aggr() {
|
||||
let query = "increase(some_metric[5m])";
|
||||
let expected = String::from(
|
||||
"Filter: prom_increase(timestamp_range,field_0,timestamp,Int64(300000)) IS NOT NULL [timestamp:Timestamp(Millisecond, None), prom_increase(timestamp_range,field_0,timestamp,Int64(300000)):Float64;N, tag_0:Utf8]\
|
||||
\n Projection: some_metric.timestamp, prom_increase(timestamp_range, field_0, some_metric.timestamp, Int64(300000)) AS prom_increase(timestamp_range,field_0,timestamp,Int64(300000)), some_metric.tag_0 [timestamp:Timestamp(Millisecond, None), prom_increase(timestamp_range,field_0,timestamp,Int64(300000)):Float64;N, tag_0:Utf8]\
|
||||
"Filter: prom_increase(timestamp_range,field_0,timestamp) IS NOT NULL [timestamp:Timestamp(Millisecond, None), prom_increase(timestamp_range,field_0,timestamp):Float64;N, tag_0:Utf8]\
|
||||
\n Projection: some_metric.timestamp, prom_increase(timestamp_range, field_0, some_metric.timestamp) AS prom_increase(timestamp_range,field_0,timestamp), some_metric.tag_0 [timestamp:Timestamp(Millisecond, None), prom_increase(timestamp_range,field_0,timestamp):Float64;N, tag_0:Utf8]\
|
||||
\n PromRangeManipulate: req range=[0..100000000], interval=[5000], eval range=[300000], time index=[timestamp], values=[\"field_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Dictionary(Int64, Float64);N, timestamp_range:Dictionary(Int64, Timestamp(Millisecond, None))]\
|
||||
\n PromSeriesNormalize: offset=[0], time index=[timestamp], filter NaN: [true] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
\n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\
|
||||
@@ -4351,8 +4395,8 @@ mod test {
|
||||
let plan = PromPlanner::stmt_to_plan(table_provider, &eval_stmt, &build_session_state())
|
||||
.await
|
||||
.unwrap();
|
||||
let expected = "Sort: prometheus_tsdb_head_series.greptime_timestamp ASC NULLS LAST [greptime_timestamp:Timestamp(Millisecond, None), quantile(Float64(0.3),sum(prometheus_tsdb_head_series.greptime_value)):Float64;N]\
|
||||
\n Aggregate: groupBy=[[prometheus_tsdb_head_series.greptime_timestamp]], aggr=[[quantile(Float64(0.3), sum(prometheus_tsdb_head_series.greptime_value))]] [greptime_timestamp:Timestamp(Millisecond, None), quantile(Float64(0.3),sum(prometheus_tsdb_head_series.greptime_value)):Float64;N]\
|
||||
let expected = "Sort: prometheus_tsdb_head_series.greptime_timestamp ASC NULLS LAST [greptime_timestamp:Timestamp(Millisecond, None), quantile(sum(prometheus_tsdb_head_series.greptime_value)):Float64;N]\
|
||||
\n Aggregate: groupBy=[[prometheus_tsdb_head_series.greptime_timestamp]], aggr=[[quantile(sum(prometheus_tsdb_head_series.greptime_value))]] [greptime_timestamp:Timestamp(Millisecond, None), quantile(sum(prometheus_tsdb_head_series.greptime_value)):Float64;N]\
|
||||
\n Sort: prometheus_tsdb_head_series.ip ASC NULLS LAST, prometheus_tsdb_head_series.greptime_timestamp ASC NULLS LAST [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), sum(prometheus_tsdb_head_series.greptime_value):Float64;N]\
|
||||
\n Aggregate: groupBy=[[prometheus_tsdb_head_series.ip, prometheus_tsdb_head_series.greptime_timestamp]], aggr=[[sum(prometheus_tsdb_head_series.greptime_value)]] [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), sum(prometheus_tsdb_head_series.greptime_value):Float64;N]\
|
||||
\n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[greptime_timestamp] [ip:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]\
|
||||
|
||||
@@ -1 +1 @@
|
||||
v0.9.0
|
||||
v0.8.0
|
||||
|
||||
@@ -33,9 +33,7 @@ use crate::error::{
|
||||
status_code_to_http_status, InvalidElasticsearchInputSnafu, ParseJsonSnafu, PipelineSnafu,
|
||||
Result as ServersResult,
|
||||
};
|
||||
use crate::http::event::{
|
||||
ingest_logs_inner, LogIngesterQueryParams, LogState, PipelineIngestRequest,
|
||||
};
|
||||
use crate::http::event::{ingest_logs_inner, LogIngestRequest, LogIngesterQueryParams, LogState};
|
||||
use crate::metrics::{
|
||||
METRIC_ELASTICSEARCH_LOGS_DOCS_COUNT, METRIC_ELASTICSEARCH_LOGS_INGESTION_ELAPSED,
|
||||
};
|
||||
@@ -278,7 +276,7 @@ fn parse_bulk_request(
|
||||
input: &str,
|
||||
index_from_url: &Option<String>,
|
||||
msg_field: &Option<String>,
|
||||
) -> ServersResult<Vec<PipelineIngestRequest>> {
|
||||
) -> ServersResult<Vec<LogIngestRequest>> {
|
||||
// Read the ndjson payload and convert it to `Vec<Value>`. Return error if the input is not a valid JSON.
|
||||
let values: Vec<Value> = Deserializer::from_str(input)
|
||||
.into_iter::<Value>()
|
||||
@@ -293,7 +291,7 @@ fn parse_bulk_request(
|
||||
}
|
||||
);
|
||||
|
||||
let mut requests: Vec<PipelineIngestRequest> = Vec::with_capacity(values.len() / 2);
|
||||
let mut requests: Vec<LogIngestRequest> = Vec::with_capacity(values.len() / 2);
|
||||
let mut values = values.into_iter();
|
||||
|
||||
// Read the ndjson payload and convert it to a (index, value) vector.
|
||||
@@ -333,7 +331,7 @@ fn parse_bulk_request(
|
||||
);
|
||||
|
||||
let log_value = pipeline::json_to_map(log_value).context(PipelineSnafu)?;
|
||||
requests.push(PipelineIngestRequest {
|
||||
requests.push(LogIngestRequest {
|
||||
table: index.unwrap_or_else(|| index_from_url.as_ref().unwrap().clone()),
|
||||
values: vec![log_value],
|
||||
});
|
||||
@@ -404,13 +402,13 @@ mod tests {
|
||||
None,
|
||||
None,
|
||||
Ok(vec![
|
||||
PipelineIngestRequest {
|
||||
LogIngestRequest {
|
||||
table: "test".to_string(),
|
||||
values: vec![
|
||||
pipeline::json_to_map(json!({"foo1": "foo1_value", "bar1": "bar1_value"})).unwrap(),
|
||||
],
|
||||
},
|
||||
PipelineIngestRequest {
|
||||
LogIngestRequest {
|
||||
table: "test".to_string(),
|
||||
values: vec![pipeline::json_to_map(json!({"foo2": "foo2_value", "bar2": "bar2_value"})).unwrap()],
|
||||
},
|
||||
@@ -427,11 +425,11 @@ mod tests {
|
||||
Some("logs".to_string()),
|
||||
None,
|
||||
Ok(vec![
|
||||
PipelineIngestRequest {
|
||||
LogIngestRequest {
|
||||
table: "test".to_string(),
|
||||
values: vec![pipeline::json_to_map(json!({"foo1": "foo1_value", "bar1": "bar1_value"})).unwrap()],
|
||||
},
|
||||
PipelineIngestRequest {
|
||||
LogIngestRequest {
|
||||
table: "logs".to_string(),
|
||||
values: vec![pipeline::json_to_map(json!({"foo2": "foo2_value", "bar2": "bar2_value"})).unwrap()],
|
||||
},
|
||||
@@ -448,11 +446,11 @@ mod tests {
|
||||
Some("logs".to_string()),
|
||||
None,
|
||||
Ok(vec![
|
||||
PipelineIngestRequest {
|
||||
LogIngestRequest {
|
||||
table: "test".to_string(),
|
||||
values: vec![pipeline::json_to_map(json!({"foo1": "foo1_value", "bar1": "bar1_value"})).unwrap()],
|
||||
},
|
||||
PipelineIngestRequest {
|
||||
LogIngestRequest {
|
||||
table: "logs".to_string(),
|
||||
values: vec![pipeline::json_to_map(json!({"foo2": "foo2_value", "bar2": "bar2_value"})).unwrap()],
|
||||
},
|
||||
@@ -468,7 +466,7 @@ mod tests {
|
||||
Some("logs".to_string()),
|
||||
None,
|
||||
Ok(vec![
|
||||
PipelineIngestRequest {
|
||||
LogIngestRequest {
|
||||
table: "test".to_string(),
|
||||
values: vec![pipeline::json_to_map(json!({"foo1": "foo1_value", "bar1": "bar1_value"})).unwrap()],
|
||||
},
|
||||
@@ -485,11 +483,11 @@ mod tests {
|
||||
None,
|
||||
Some("data".to_string()),
|
||||
Ok(vec![
|
||||
PipelineIngestRequest {
|
||||
LogIngestRequest {
|
||||
table: "test".to_string(),
|
||||
values: vec![pipeline::json_to_map(json!({"foo1": "foo1_value", "bar1": "bar1_value"})).unwrap()],
|
||||
},
|
||||
PipelineIngestRequest {
|
||||
LogIngestRequest {
|
||||
table: "test".to_string(),
|
||||
values: vec![pipeline::json_to_map(json!({"foo2": "foo2_value", "bar2": "bar2_value"})).unwrap()],
|
||||
},
|
||||
@@ -506,13 +504,13 @@ mod tests {
|
||||
None,
|
||||
Some("message".to_string()),
|
||||
Ok(vec![
|
||||
PipelineIngestRequest {
|
||||
LogIngestRequest {
|
||||
table: "logs-generic-default".to_string(),
|
||||
values: vec![
|
||||
pipeline::json_to_map(json!({"message": "172.16.0.1 - - [25/May/2024:20:19:37 +0000] \"GET /contact HTTP/1.1\" 404 162 \"-\" \"Mozilla/5.0 (iPhone; CPU iPhone OS 14_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Mobile/15E148 Safari/604.1\""})).unwrap(),
|
||||
],
|
||||
},
|
||||
PipelineIngestRequest {
|
||||
LogIngestRequest {
|
||||
table: "logs-generic-default".to_string(),
|
||||
values: vec![
|
||||
pipeline::json_to_map(json!({"message": "10.0.0.1 - - [25/May/2024:20:18:37 +0000] \"GET /images/logo.png HTTP/1.1\" 304 0 \"-\" \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:89.0) Gecko/20100101 Firefox/89.0\""})).unwrap(),
|
||||
|
||||
@@ -33,7 +33,7 @@ use datatypes::value::column_data_to_json;
|
||||
use headers::ContentType;
|
||||
use lazy_static::lazy_static;
|
||||
use pipeline::util::to_pipeline_version;
|
||||
use pipeline::{GreptimePipelineParams, PipelineContext, PipelineDefinition, PipelineMap};
|
||||
use pipeline::{GreptimePipelineParams, PipelineDefinition, PipelineMap};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::{json, Deserializer, Map, Value};
|
||||
use session::context::{Channel, QueryContext, QueryContextRef};
|
||||
@@ -100,7 +100,7 @@ pub struct LogIngesterQueryParams {
|
||||
/// LogIngestRequest is the internal request for log ingestion. The raw log input can be transformed into multiple LogIngestRequests.
|
||||
/// Multiple LogIngestRequests will be ingested into the same database with the same pipeline.
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub(crate) struct PipelineIngestRequest {
|
||||
pub(crate) struct LogIngestRequest {
|
||||
/// The table where the log data will be written to.
|
||||
pub table: String,
|
||||
/// The log data to be ingested.
|
||||
@@ -325,15 +325,12 @@ async fn dryrun_pipeline_inner(
|
||||
) -> Result<Response> {
|
||||
let params = GreptimePipelineParams::default();
|
||||
|
||||
let pipeline_def = PipelineDefinition::Resolved(pipeline);
|
||||
let pipeline_ctx = PipelineContext::new(&pipeline_def, ¶ms);
|
||||
let results = run_pipeline(
|
||||
&pipeline_handler,
|
||||
&pipeline_ctx,
|
||||
PipelineIngestRequest {
|
||||
table: "dry_run".to_owned(),
|
||||
values: value,
|
||||
},
|
||||
&PipelineDefinition::Resolved(pipeline),
|
||||
¶ms,
|
||||
value,
|
||||
"dry_run".to_owned(),
|
||||
query_ctx,
|
||||
true,
|
||||
)
|
||||
@@ -606,7 +603,7 @@ pub async fn log_ingester(
|
||||
ingest_logs_inner(
|
||||
handler,
|
||||
pipeline,
|
||||
vec![PipelineIngestRequest {
|
||||
vec![LogIngestRequest {
|
||||
table: table_name,
|
||||
values: value,
|
||||
}],
|
||||
@@ -676,9 +673,9 @@ fn extract_pipeline_value_by_content_type(
|
||||
}
|
||||
|
||||
pub(crate) async fn ingest_logs_inner(
|
||||
handler: PipelineHandlerRef,
|
||||
state: PipelineHandlerRef,
|
||||
pipeline: PipelineDefinition,
|
||||
log_ingest_requests: Vec<PipelineIngestRequest>,
|
||||
log_ingest_requests: Vec<LogIngestRequest>,
|
||||
query_ctx: QueryContextRef,
|
||||
headers: HeaderMap,
|
||||
) -> Result<HttpResponse> {
|
||||
@@ -693,15 +690,22 @@ pub(crate) async fn ingest_logs_inner(
|
||||
.and_then(|v| v.to_str().ok()),
|
||||
);
|
||||
|
||||
let pipeline_ctx = PipelineContext::new(&pipeline, &pipeline_params);
|
||||
for pipeline_req in log_ingest_requests {
|
||||
let requests =
|
||||
run_pipeline(&handler, &pipeline_ctx, pipeline_req, &query_ctx, true).await?;
|
||||
for request in log_ingest_requests {
|
||||
let requests = run_pipeline(
|
||||
&state,
|
||||
&pipeline,
|
||||
&pipeline_params,
|
||||
request.values,
|
||||
request.table,
|
||||
&query_ctx,
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
|
||||
insert_requests.extend(requests);
|
||||
}
|
||||
|
||||
let output = handler
|
||||
let output = state
|
||||
.insert(
|
||||
RowInsertRequests {
|
||||
inserts: insert_requests,
|
||||
|
||||
@@ -83,17 +83,33 @@ impl Default for RemoteWriteQuery {
|
||||
)]
|
||||
pub async fn remote_write(
|
||||
State(state): State<PromStoreState>,
|
||||
query: Query<RemoteWriteQuery>,
|
||||
extension: Extension<QueryContext>,
|
||||
content_encoding: TypedHeader<headers::ContentEncoding>,
|
||||
raw_body: Bytes,
|
||||
) -> Result<impl IntoResponse> {
|
||||
remote_write_impl(
|
||||
state.prom_store_handler,
|
||||
query,
|
||||
extension,
|
||||
content_encoding,
|
||||
raw_body,
|
||||
state.is_strict_mode,
|
||||
state.prom_store_with_metric_engine,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn remote_write_impl(
|
||||
handler: PromStoreProtocolHandlerRef,
|
||||
Query(params): Query<RemoteWriteQuery>,
|
||||
Extension(mut query_ctx): Extension<QueryContext>,
|
||||
content_encoding: TypedHeader<headers::ContentEncoding>,
|
||||
body: Bytes,
|
||||
is_strict_mode: bool,
|
||||
is_metric_engine: bool,
|
||||
) -> Result<impl IntoResponse> {
|
||||
let PromStoreState {
|
||||
prom_store_handler,
|
||||
prom_store_with_metric_engine,
|
||||
is_strict_mode,
|
||||
} = state;
|
||||
|
||||
// VictoriaMetrics handshake
|
||||
if let Some(_vm_handshake) = params.get_vm_proto_version {
|
||||
return Ok(VM_PROTO_VERSION.into_response());
|
||||
}
|
||||
@@ -112,9 +128,7 @@ pub async fn remote_write(
|
||||
}
|
||||
let query_ctx = Arc::new(query_ctx);
|
||||
|
||||
let output = prom_store_handler
|
||||
.write(request, query_ctx, prom_store_with_metric_engine)
|
||||
.await?;
|
||||
let output = handler.write(request, query_ctx, is_metric_engine).await?;
|
||||
crate::metrics::PROM_STORE_REMOTE_WRITE_SAMPLES.inc_by(samples as u64);
|
||||
Ok((
|
||||
StatusCode::NO_CONTENT,
|
||||
|
||||
@@ -24,7 +24,7 @@ use jsonb::{Number as JsonbNumber, Value as JsonbValue};
|
||||
use opentelemetry_proto::tonic::collector::logs::v1::ExportLogsServiceRequest;
|
||||
use opentelemetry_proto::tonic::common::v1::{any_value, AnyValue, InstrumentationScope, KeyValue};
|
||||
use opentelemetry_proto::tonic::logs::v1::{LogRecord, ResourceLogs, ScopeLogs};
|
||||
use pipeline::{GreptimePipelineParams, PipelineContext, PipelineWay, SchemaInfo, SelectInfo};
|
||||
use pipeline::{GreptimePipelineParams, PipelineWay, SchemaInfo, SelectInfo};
|
||||
use serde_json::{Map, Value};
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, ResultExt};
|
||||
@@ -33,7 +33,6 @@ use crate::error::{
|
||||
IncompatibleSchemaSnafu, NotSupportedSnafu, PipelineSnafu, Result,
|
||||
UnsupportedJsonDataTypeForTagSnafu,
|
||||
};
|
||||
use crate::http::event::PipelineIngestRequest;
|
||||
use crate::otlp::trace::attributes::OtlpAnyValue;
|
||||
use crate::otlp::utils::{bytes_to_hex_string, key_value_to_jsonb};
|
||||
use crate::pipeline::run_pipeline;
|
||||
@@ -75,14 +74,12 @@ pub async fn to_grpc_insert_requests(
|
||||
let data = parse_export_logs_service_request(request);
|
||||
let array = pipeline::json_array_to_map(data).context(PipelineSnafu)?;
|
||||
|
||||
let pipeline_ctx = PipelineContext::new(&pipeline_def, &pipeline_params);
|
||||
let inserts = run_pipeline(
|
||||
&pipeline_handler,
|
||||
&pipeline_ctx,
|
||||
PipelineIngestRequest {
|
||||
table: table_name,
|
||||
values: array,
|
||||
},
|
||||
&pipeline_def,
|
||||
&pipeline_params,
|
||||
array,
|
||||
table_name,
|
||||
query_ctx,
|
||||
true,
|
||||
)
|
||||
|
||||
@@ -18,14 +18,13 @@ use std::sync::Arc;
|
||||
use api::v1::{RowInsertRequest, Rows};
|
||||
use hashbrown::HashMap;
|
||||
use pipeline::{
|
||||
DispatchedTo, GreptimePipelineParams, IdentityTimeIndex, Pipeline, PipelineContext,
|
||||
PipelineDefinition, PipelineExecOutput, PipelineMap, GREPTIME_INTERNAL_IDENTITY_PIPELINE_NAME,
|
||||
DispatchedTo, GreptimePipelineParams, IdentityTimeIndex, Pipeline, PipelineDefinition,
|
||||
PipelineExecOutput, PipelineMap, GREPTIME_INTERNAL_IDENTITY_PIPELINE_NAME,
|
||||
};
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{CatalogSnafu, PipelineSnafu, Result};
|
||||
use crate::http::event::PipelineIngestRequest;
|
||||
use crate::metrics::{
|
||||
METRIC_FAILURE_VALUE, METRIC_HTTP_LOGS_TRANSFORM_ELAPSED, METRIC_SUCCESS_VALUE,
|
||||
};
|
||||
@@ -52,24 +51,36 @@ pub async fn get_pipeline(
|
||||
|
||||
pub(crate) async fn run_pipeline(
|
||||
handler: &PipelineHandlerRef,
|
||||
pipeline_ctx: &PipelineContext<'_>,
|
||||
pipeline_req: PipelineIngestRequest,
|
||||
pipeline_definition: &PipelineDefinition,
|
||||
pipeline_parameters: &GreptimePipelineParams,
|
||||
data_array: Vec<PipelineMap>,
|
||||
table_name: String,
|
||||
query_ctx: &QueryContextRef,
|
||||
is_top_level: bool,
|
||||
) -> Result<Vec<RowInsertRequest>> {
|
||||
match &pipeline_ctx.pipeline_definition {
|
||||
match pipeline_definition {
|
||||
PipelineDefinition::GreptimeIdentityPipeline(custom_ts) => {
|
||||
run_identity_pipeline(
|
||||
handler,
|
||||
custom_ts.as_ref(),
|
||||
pipeline_ctx.pipeline_param,
|
||||
pipeline_req,
|
||||
pipeline_parameters,
|
||||
data_array,
|
||||
table_name,
|
||||
query_ctx,
|
||||
)
|
||||
.await
|
||||
}
|
||||
_ => {
|
||||
run_custom_pipeline(handler, pipeline_ctx, pipeline_req, query_ctx, is_top_level).await
|
||||
run_custom_pipeline(
|
||||
handler,
|
||||
pipeline_definition,
|
||||
pipeline_parameters,
|
||||
data_array,
|
||||
table_name,
|
||||
query_ctx,
|
||||
is_top_level,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -78,13 +89,10 @@ async fn run_identity_pipeline(
|
||||
handler: &PipelineHandlerRef,
|
||||
custom_ts: Option<&IdentityTimeIndex>,
|
||||
pipeline_parameters: &GreptimePipelineParams,
|
||||
pipeline_req: PipelineIngestRequest,
|
||||
data_array: Vec<PipelineMap>,
|
||||
table_name: String,
|
||||
query_ctx: &QueryContextRef,
|
||||
) -> Result<Vec<RowInsertRequest>> {
|
||||
let PipelineIngestRequest {
|
||||
table: table_name,
|
||||
values: data_array,
|
||||
} = pipeline_req;
|
||||
let table = handler
|
||||
.get_table(&table_name, query_ctx)
|
||||
.await
|
||||
@@ -101,20 +109,18 @@ async fn run_identity_pipeline(
|
||||
|
||||
async fn run_custom_pipeline(
|
||||
handler: &PipelineHandlerRef,
|
||||
pipeline_ctx: &PipelineContext<'_>,
|
||||
pipeline_req: PipelineIngestRequest,
|
||||
pipeline_definition: &PipelineDefinition,
|
||||
pipeline_parameters: &GreptimePipelineParams,
|
||||
data_array: Vec<PipelineMap>,
|
||||
table_name: String,
|
||||
query_ctx: &QueryContextRef,
|
||||
is_top_level: bool,
|
||||
) -> Result<Vec<RowInsertRequest>> {
|
||||
let db = query_ctx.get_db_string();
|
||||
let pipeline = get_pipeline(pipeline_ctx.pipeline_definition, handler, query_ctx).await?;
|
||||
let pipeline = get_pipeline(pipeline_definition, handler, query_ctx).await?;
|
||||
|
||||
let transform_timer = std::time::Instant::now();
|
||||
|
||||
let PipelineIngestRequest {
|
||||
table: table_name,
|
||||
values: data_array,
|
||||
} = pipeline_req;
|
||||
let arr_len = data_array.len();
|
||||
let mut req_map = HashMap::new();
|
||||
let mut dispatched: BTreeMap<DispatchedTo, Vec<PipelineMap>> = BTreeMap::new();
|
||||
@@ -179,15 +185,12 @@ async fn run_custom_pipeline(
|
||||
// run pipeline recursively.
|
||||
let next_pipeline_def =
|
||||
PipelineDefinition::from_name(next_pipeline_name, None, None).context(PipelineSnafu)?;
|
||||
let next_pipeline_ctx =
|
||||
PipelineContext::new(&next_pipeline_def, pipeline_ctx.pipeline_param);
|
||||
let requests = Box::pin(run_pipeline(
|
||||
handler,
|
||||
&next_pipeline_ctx,
|
||||
PipelineIngestRequest {
|
||||
table: table_name,
|
||||
values: coll,
|
||||
},
|
||||
&next_pipeline_def,
|
||||
pipeline_parameters,
|
||||
coll,
|
||||
table_name,
|
||||
query_ctx,
|
||||
false,
|
||||
))
|
||||
|
||||
@@ -258,9 +258,9 @@ impl GreptimeDbStandaloneBuilder {
|
||||
.unwrap()
|
||||
.replace(weak_grpc_handler);
|
||||
|
||||
let flow_streaming_engine = flownode.flow_engine().streaming_engine();
|
||||
let flow_worker_manager = flownode.flow_engine().streaming_engine();
|
||||
let invoker = flow::FrontendInvoker::build_from(
|
||||
flow_streaming_engine.clone(),
|
||||
flow_worker_manager.clone(),
|
||||
catalog_manager.clone(),
|
||||
kv_backend.clone(),
|
||||
cache_registry.clone(),
|
||||
@@ -271,7 +271,7 @@ impl GreptimeDbStandaloneBuilder {
|
||||
.context(StartFlownodeSnafu)
|
||||
.unwrap();
|
||||
|
||||
flow_streaming_engine.set_frontend_invoker(invoker).await;
|
||||
flow_worker_manager.set_frontend_invoker(invoker).await;
|
||||
|
||||
procedure_manager.start().await.unwrap();
|
||||
wal_options_allocator.start().await.unwrap();
|
||||
|
||||
@@ -658,117 +658,3 @@ DROP TABLE out_basic;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
-- check if different schema is working as expected
|
||||
CREATE DATABASE jsdp_log;
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
USE jsdp_log;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `api_log` (
|
||||
`time` TIMESTAMP(9) NOT NULL,
|
||||
`key` STRING NULL SKIPPING INDEX WITH(granularity = '1024', type = 'BLOOM'),
|
||||
`status_code` TINYINT NULL,
|
||||
`method` STRING NULL,
|
||||
`path` STRING NULL,
|
||||
`raw_query` STRING NULL,
|
||||
`user_agent` STRING NULL,
|
||||
`client_ip` STRING NULL,
|
||||
`duration` INT NULL,
|
||||
`count` INT NULL,
|
||||
TIME INDEX (`time`)
|
||||
) ENGINE=mito WITH(
|
||||
append_mode = 'true'
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `api_stats` (
|
||||
`time` TIMESTAMP(0) NOT NULL,
|
||||
`key` STRING NULL,
|
||||
`qpm` BIGINT NULL,
|
||||
`rpm` BIGINT NULL,
|
||||
`update_at` TIMESTAMP(3) NULL,
|
||||
TIME INDEX (`time`),
|
||||
PRIMARY KEY (`key`)
|
||||
) ENGINE=mito WITH(
|
||||
append_mode = 'false',
|
||||
merge_mode = 'last_row'
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE FLOW IF NOT EXISTS api_stats_flow
|
||||
SINK TO api_stats EXPIRE AFTER '10 minute'::INTERVAL AS
|
||||
SELECT date_trunc('minute', `time`::TimestampSecond) AS `time1`, `key`, count(*), sum(`count`)
|
||||
FROM api_log
|
||||
GROUP BY `time1`, `key`;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO `api_log` (`time`, `key`, `status_code`, `method`, `path`, `raw_query`, `user_agent`, `client_ip`, `duration`, `count`) VALUES (now(), '1', 0, 'GET', '/lightning/v1/query', 'key=1&since=600', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36', '1', 21, 1);
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
|
||||
ADMIN FLUSH_FLOW('api_stats_flow');
|
||||
|
||||
+------------------------------------+
|
||||
| ADMIN FLUSH_FLOW('api_stats_flow') |
|
||||
+------------------------------------+
|
||||
| FLOW_FLUSHED |
|
||||
+------------------------------------+
|
||||
|
||||
SELECT key FROM api_stats;
|
||||
|
||||
+-----+
|
||||
| key |
|
||||
+-----+
|
||||
| 1 |
|
||||
+-----+
|
||||
|
||||
-- SQLNESS ARG restart=true
|
||||
INSERT INTO `api_log` (`time`, `key`, `status_code`, `method`, `path`, `raw_query`, `user_agent`, `client_ip`, `duration`, `count`) VALUES (now(), '2', 0, 'GET', '/lightning/v1/query', 'key=1&since=600', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36', '1', 21, 1);
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
|
||||
ADMIN FLUSH_FLOW('api_stats_flow');
|
||||
|
||||
+------------------------------------+
|
||||
| ADMIN FLUSH_FLOW('api_stats_flow') |
|
||||
+------------------------------------+
|
||||
| FLOW_FLUSHED |
|
||||
+------------------------------------+
|
||||
|
||||
SELECT key FROM api_stats;
|
||||
|
||||
+-----+
|
||||
| key |
|
||||
+-----+
|
||||
| 1 |
|
||||
| 2 |
|
||||
+-----+
|
||||
|
||||
DROP FLOW api_stats_flow;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE api_log;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE api_stats;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
USE public;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP DATABASE jsdp_log;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
|
||||
@@ -350,66 +350,3 @@ DROP FLOW test_wildcard_basic;
|
||||
DROP TABLE input_basic;
|
||||
|
||||
DROP TABLE out_basic;
|
||||
|
||||
-- check if different schema is working as expected
|
||||
|
||||
CREATE DATABASE jsdp_log;
|
||||
USE jsdp_log;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `api_log` (
|
||||
`time` TIMESTAMP(9) NOT NULL,
|
||||
`key` STRING NULL SKIPPING INDEX WITH(granularity = '1024', type = 'BLOOM'),
|
||||
`status_code` TINYINT NULL,
|
||||
`method` STRING NULL,
|
||||
`path` STRING NULL,
|
||||
`raw_query` STRING NULL,
|
||||
`user_agent` STRING NULL,
|
||||
`client_ip` STRING NULL,
|
||||
`duration` INT NULL,
|
||||
`count` INT NULL,
|
||||
TIME INDEX (`time`)
|
||||
) ENGINE=mito WITH(
|
||||
append_mode = 'true'
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `api_stats` (
|
||||
`time` TIMESTAMP(0) NOT NULL,
|
||||
`key` STRING NULL,
|
||||
`qpm` BIGINT NULL,
|
||||
`rpm` BIGINT NULL,
|
||||
`update_at` TIMESTAMP(3) NULL,
|
||||
TIME INDEX (`time`),
|
||||
PRIMARY KEY (`key`)
|
||||
) ENGINE=mito WITH(
|
||||
append_mode = 'false',
|
||||
merge_mode = 'last_row'
|
||||
);
|
||||
|
||||
CREATE FLOW IF NOT EXISTS api_stats_flow
|
||||
SINK TO api_stats EXPIRE AFTER '10 minute'::INTERVAL AS
|
||||
SELECT date_trunc('minute', `time`::TimestampSecond) AS `time1`, `key`, count(*), sum(`count`)
|
||||
FROM api_log
|
||||
GROUP BY `time1`, `key`;
|
||||
|
||||
INSERT INTO `api_log` (`time`, `key`, `status_code`, `method`, `path`, `raw_query`, `user_agent`, `client_ip`, `duration`, `count`) VALUES (now(), '1', 0, 'GET', '/lightning/v1/query', 'key=1&since=600', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36', '1', 21, 1);
|
||||
|
||||
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
|
||||
ADMIN FLUSH_FLOW('api_stats_flow');
|
||||
|
||||
SELECT key FROM api_stats;
|
||||
|
||||
-- SQLNESS ARG restart=true
|
||||
INSERT INTO `api_log` (`time`, `key`, `status_code`, `method`, `path`, `raw_query`, `user_agent`, `client_ip`, `duration`, `count`) VALUES (now(), '2', 0, 'GET', '/lightning/v1/query', 'key=1&since=600', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36', '1', 21, 1);
|
||||
|
||||
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
|
||||
ADMIN FLUSH_FLOW('api_stats_flow');
|
||||
|
||||
SELECT key FROM api_stats;
|
||||
|
||||
DROP FLOW api_stats_flow;
|
||||
|
||||
DROP TABLE api_log;
|
||||
DROP TABLE api_stats;
|
||||
|
||||
USE public;
|
||||
DROP DATABASE jsdp_log;
|
||||
|
||||
@@ -1,266 +0,0 @@
|
||||
CREATE TABLE access_log (
|
||||
"url" STRING,
|
||||
user_id BIGINT,
|
||||
ts TIMESTAMP TIME INDEX,
|
||||
PRIMARY KEY ("url", user_id)
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE TABLE access_log_10s (
|
||||
"url" STRING,
|
||||
time_window timestamp time INDEX,
|
||||
state BINARY,
|
||||
PRIMARY KEY ("url")
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE FLOW calc_access_log_10s SINK TO access_log_10s
|
||||
AS
|
||||
SELECT
|
||||
"url",
|
||||
date_bin('10s'::INTERVAL, ts) AS time_window,
|
||||
hll(user_id) AS state
|
||||
FROM
|
||||
access_log
|
||||
GROUP BY
|
||||
"url",
|
||||
time_window;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
-- insert 4 rows of data
|
||||
INSERT INTO access_log VALUES
|
||||
("/dashboard", 1, "2025-03-04 00:00:00"),
|
||||
("/dashboard", 1, "2025-03-04 00:00:01"),
|
||||
("/dashboard", 2, "2025-03-04 00:00:05"),
|
||||
("/not_found", 3, "2025-03-04 00:00:11"),
|
||||
("/dashboard", 4, "2025-03-04 00:00:15");
|
||||
|
||||
Affected Rows: 5
|
||||
|
||||
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
|
||||
ADMIN FLUSH_FLOW('calc_access_log_10s');
|
||||
|
||||
+-----------------------------------------+
|
||||
| ADMIN FLUSH_FLOW('calc_access_log_10s') |
|
||||
+-----------------------------------------+
|
||||
| FLOW_FLUSHED |
|
||||
+-----------------------------------------+
|
||||
|
||||
-- query should return 3 rows
|
||||
SELECT "url", time_window FROM access_log_10s
|
||||
ORDER BY
|
||||
time_window;
|
||||
|
||||
+------------+---------------------+
|
||||
| url | time_window |
|
||||
+------------+---------------------+
|
||||
| /dashboard | 2025-03-04T00:00:00 |
|
||||
| /dashboard | 2025-03-04T00:00:10 |
|
||||
| /not_found | 2025-03-04T00:00:10 |
|
||||
+------------+---------------------+
|
||||
|
||||
-- use hll_count to query the approximate data in access_log_10s
|
||||
SELECT "url", time_window, hll_count(state) FROM access_log_10s
|
||||
ORDER BY
|
||||
time_window;
|
||||
|
||||
+------------+---------------------+---------------------------------+
|
||||
| url | time_window | hll_count(access_log_10s.state) |
|
||||
+------------+---------------------+---------------------------------+
|
||||
| /dashboard | 2025-03-04T00:00:00 | 2 |
|
||||
| /dashboard | 2025-03-04T00:00:10 | 1 |
|
||||
| /not_found | 2025-03-04T00:00:10 | 1 |
|
||||
+------------+---------------------+---------------------------------+
|
||||
|
||||
-- further, we can aggregate 10 seconds of data to every minute, by using hll_merge to merge 10 seconds of hyperloglog state
|
||||
SELECT
|
||||
"url",
|
||||
date_bin('1 minute'::INTERVAL, time_window) AS time_window_1m,
|
||||
hll_count(hll_merge(state)) as uv_per_min
|
||||
FROM
|
||||
access_log_10s
|
||||
GROUP BY
|
||||
"url",
|
||||
time_window_1m
|
||||
ORDER BY
|
||||
time_window_1m;
|
||||
|
||||
+------------+---------------------+------------+
|
||||
| url | time_window_1m | uv_per_min |
|
||||
+------------+---------------------+------------+
|
||||
| /not_found | 2025-03-04T00:00:00 | 1 |
|
||||
| /dashboard | 2025-03-04T00:00:00 | 3 |
|
||||
+------------+---------------------+------------+
|
||||
|
||||
DROP FLOW calc_access_log_10s;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE access_log_10s;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE access_log;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE TABLE percentile_base (
|
||||
"id" INT PRIMARY KEY,
|
||||
"value" DOUBLE,
|
||||
ts timestamp(0) time index
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE TABLE percentile_5s (
|
||||
"percentile_state" BINARY,
|
||||
time_window timestamp(0) time index
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE FLOW calc_percentile_5s SINK TO percentile_5s
|
||||
AS
|
||||
SELECT
|
||||
uddsketch_state(128, 0.01, "value") AS "value",
|
||||
date_bin('5 seconds'::INTERVAL, ts) AS time_window
|
||||
FROM
|
||||
percentile_base
|
||||
WHERE
|
||||
"value" > 0 AND "value" < 70
|
||||
GROUP BY
|
||||
time_window;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO percentile_base ("id", "value", ts) VALUES
|
||||
(1, 10.0, 1),
|
||||
(2, 20.0, 2),
|
||||
(3, 30.0, 3),
|
||||
(4, 40.0, 4),
|
||||
(5, 50.0, 5),
|
||||
(6, 60.0, 6),
|
||||
(7, 70.0, 7),
|
||||
(8, 80.0, 8),
|
||||
(9, 90.0, 9),
|
||||
(10, 100.0, 10);
|
||||
|
||||
Affected Rows: 10
|
||||
|
||||
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
|
||||
ADMIN FLUSH_FLOW('calc_percentile_5s');
|
||||
|
||||
+----------------------------------------+
|
||||
| ADMIN FLUSH_FLOW('calc_percentile_5s') |
|
||||
+----------------------------------------+
|
||||
| FLOW_FLUSHED |
|
||||
+----------------------------------------+
|
||||
|
||||
SELECT
|
||||
time_window,
|
||||
uddsketch_calc(0.99, `percentile_state`) AS p99
|
||||
FROM
|
||||
percentile_5s
|
||||
ORDER BY
|
||||
time_window;
|
||||
|
||||
+---------------------+--------------------+
|
||||
| time_window | p99 |
|
||||
+---------------------+--------------------+
|
||||
| 1970-01-01T00:00:00 | 40.04777053326359 |
|
||||
| 1970-01-01T00:00:05 | 59.745049810145126 |
|
||||
+---------------------+--------------------+
|
||||
|
||||
DROP FLOW calc_percentile_5s;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE percentile_5s;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE percentile_base;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE TABLE percentile_base (
|
||||
"id" INT PRIMARY KEY,
|
||||
"value" DOUBLE,
|
||||
ts timestamp(0) time index
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE TABLE percentile_5s (
|
||||
"percentile_state" BINARY,
|
||||
time_window timestamp(0) time index
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE FLOW calc_percentile_5s SINK TO percentile_5s
|
||||
AS
|
||||
SELECT
|
||||
uddsketch_state(128, 0.01, CASE WHEN "value" > 0 AND "value" < 70 THEN "value" ELSE NULL END) AS "value",
|
||||
date_bin('5 seconds'::INTERVAL, ts) AS time_window
|
||||
FROM
|
||||
percentile_base
|
||||
GROUP BY
|
||||
time_window;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO percentile_base ("id", "value", ts) VALUES
|
||||
(1, 10.0, 1),
|
||||
(2, 20.0, 2),
|
||||
(3, 30.0, 3),
|
||||
(4, 40.0, 4),
|
||||
(5, 50.0, 5),
|
||||
(6, 60.0, 6),
|
||||
(7, 70.0, 7),
|
||||
(8, 80.0, 8),
|
||||
(9, 90.0, 9),
|
||||
(10, 100.0, 10);
|
||||
|
||||
Affected Rows: 10
|
||||
|
||||
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
|
||||
ADMIN FLUSH_FLOW('calc_percentile_5s');
|
||||
|
||||
+----------------------------------------+
|
||||
| ADMIN FLUSH_FLOW('calc_percentile_5s') |
|
||||
+----------------------------------------+
|
||||
| FLOW_FLUSHED |
|
||||
+----------------------------------------+
|
||||
|
||||
SELECT
|
||||
time_window,
|
||||
uddsketch_calc(0.99, percentile_state) AS p99
|
||||
FROM
|
||||
percentile_5s
|
||||
ORDER BY
|
||||
time_window;
|
||||
|
||||
+---------------------+--------------------+
|
||||
| time_window | p99 |
|
||||
+---------------------+--------------------+
|
||||
| 1970-01-01T00:00:00 | 40.04777053326359 |
|
||||
| 1970-01-01T00:00:05 | 59.745049810145126 |
|
||||
| 1970-01-01T00:00:10 | |
|
||||
+---------------------+--------------------+
|
||||
|
||||
DROP FLOW calc_percentile_5s;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE percentile_5s;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE percentile_base;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
@@ -1,161 +0,0 @@
|
||||
CREATE TABLE access_log (
|
||||
"url" STRING,
|
||||
user_id BIGINT,
|
||||
ts TIMESTAMP TIME INDEX,
|
||||
PRIMARY KEY ("url", user_id)
|
||||
);
|
||||
|
||||
CREATE TABLE access_log_10s (
|
||||
"url" STRING,
|
||||
time_window timestamp time INDEX,
|
||||
state BINARY,
|
||||
PRIMARY KEY ("url")
|
||||
);
|
||||
|
||||
CREATE FLOW calc_access_log_10s SINK TO access_log_10s
|
||||
AS
|
||||
SELECT
|
||||
"url",
|
||||
date_bin('10s'::INTERVAL, ts) AS time_window,
|
||||
hll(user_id) AS state
|
||||
FROM
|
||||
access_log
|
||||
GROUP BY
|
||||
"url",
|
||||
time_window;
|
||||
|
||||
-- insert 4 rows of data
|
||||
INSERT INTO access_log VALUES
|
||||
("/dashboard", 1, "2025-03-04 00:00:00"),
|
||||
("/dashboard", 1, "2025-03-04 00:00:01"),
|
||||
("/dashboard", 2, "2025-03-04 00:00:05"),
|
||||
("/not_found", 3, "2025-03-04 00:00:11"),
|
||||
("/dashboard", 4, "2025-03-04 00:00:15");
|
||||
|
||||
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
|
||||
ADMIN FLUSH_FLOW('calc_access_log_10s');
|
||||
|
||||
-- query should return 3 rows
|
||||
SELECT "url", time_window FROM access_log_10s
|
||||
ORDER BY
|
||||
time_window;
|
||||
|
||||
-- use hll_count to query the approximate data in access_log_10s
|
||||
SELECT "url", time_window, hll_count(state) FROM access_log_10s
|
||||
ORDER BY
|
||||
time_window;
|
||||
|
||||
-- further, we can aggregate 10 seconds of data to every minute, by using hll_merge to merge 10 seconds of hyperloglog state
|
||||
SELECT
|
||||
"url",
|
||||
date_bin('1 minute'::INTERVAL, time_window) AS time_window_1m,
|
||||
hll_count(hll_merge(state)) as uv_per_min
|
||||
FROM
|
||||
access_log_10s
|
||||
GROUP BY
|
||||
"url",
|
||||
time_window_1m
|
||||
ORDER BY
|
||||
time_window_1m;
|
||||
|
||||
DROP FLOW calc_access_log_10s;
|
||||
DROP TABLE access_log_10s;
|
||||
DROP TABLE access_log;
|
||||
|
||||
CREATE TABLE percentile_base (
|
||||
"id" INT PRIMARY KEY,
|
||||
"value" DOUBLE,
|
||||
ts timestamp(0) time index
|
||||
);
|
||||
|
||||
CREATE TABLE percentile_5s (
|
||||
"percentile_state" BINARY,
|
||||
time_window timestamp(0) time index
|
||||
);
|
||||
|
||||
CREATE FLOW calc_percentile_5s SINK TO percentile_5s
|
||||
AS
|
||||
SELECT
|
||||
uddsketch_state(128, 0.01, "value") AS "value",
|
||||
date_bin('5 seconds'::INTERVAL, ts) AS time_window
|
||||
FROM
|
||||
percentile_base
|
||||
WHERE
|
||||
"value" > 0 AND "value" < 70
|
||||
GROUP BY
|
||||
time_window;
|
||||
|
||||
INSERT INTO percentile_base ("id", "value", ts) VALUES
|
||||
(1, 10.0, 1),
|
||||
(2, 20.0, 2),
|
||||
(3, 30.0, 3),
|
||||
(4, 40.0, 4),
|
||||
(5, 50.0, 5),
|
||||
(6, 60.0, 6),
|
||||
(7, 70.0, 7),
|
||||
(8, 80.0, 8),
|
||||
(9, 90.0, 9),
|
||||
(10, 100.0, 10);
|
||||
|
||||
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
|
||||
ADMIN FLUSH_FLOW('calc_percentile_5s');
|
||||
|
||||
SELECT
|
||||
time_window,
|
||||
uddsketch_calc(0.99, `percentile_state`) AS p99
|
||||
FROM
|
||||
percentile_5s
|
||||
ORDER BY
|
||||
time_window;
|
||||
|
||||
DROP FLOW calc_percentile_5s;
|
||||
DROP TABLE percentile_5s;
|
||||
DROP TABLE percentile_base;
|
||||
|
||||
CREATE TABLE percentile_base (
|
||||
"id" INT PRIMARY KEY,
|
||||
"value" DOUBLE,
|
||||
ts timestamp(0) time index
|
||||
);
|
||||
|
||||
CREATE TABLE percentile_5s (
|
||||
"percentile_state" BINARY,
|
||||
time_window timestamp(0) time index
|
||||
);
|
||||
|
||||
CREATE FLOW calc_percentile_5s SINK TO percentile_5s
|
||||
AS
|
||||
SELECT
|
||||
uddsketch_state(128, 0.01, CASE WHEN "value" > 0 AND "value" < 70 THEN "value" ELSE NULL END) AS "value",
|
||||
date_bin('5 seconds'::INTERVAL, ts) AS time_window
|
||||
FROM
|
||||
percentile_base
|
||||
GROUP BY
|
||||
time_window;
|
||||
|
||||
INSERT INTO percentile_base ("id", "value", ts) VALUES
|
||||
(1, 10.0, 1),
|
||||
(2, 20.0, 2),
|
||||
(3, 30.0, 3),
|
||||
(4, 40.0, 4),
|
||||
(5, 50.0, 5),
|
||||
(6, 60.0, 6),
|
||||
(7, 70.0, 7),
|
||||
(8, 80.0, 8),
|
||||
(9, 90.0, 9),
|
||||
(10, 100.0, 10);
|
||||
|
||||
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
|
||||
ADMIN FLUSH_FLOW('calc_percentile_5s');
|
||||
|
||||
SELECT
|
||||
time_window,
|
||||
uddsketch_calc(0.99, percentile_state) AS p99
|
||||
FROM
|
||||
percentile_5s
|
||||
ORDER BY
|
||||
time_window;
|
||||
|
||||
DROP FLOW calc_percentile_5s;
|
||||
DROP TABLE percentile_5s;
|
||||
DROP TABLE percentile_base;
|
||||
@@ -30,40 +30,40 @@ Affected Rows: 16
|
||||
|
||||
TQL EVAL (0, 15, '5s') quantile(0.5, test);
|
||||
|
||||
+---------------------+---------------------------------+
|
||||
| ts | quantile(Float64(0.5),test.val) |
|
||||
+---------------------+---------------------------------+
|
||||
| 1970-01-01T00:00:00 | 2.5 |
|
||||
| 1970-01-01T00:00:05 | 6.5 |
|
||||
| 1970-01-01T00:00:10 | 10.5 |
|
||||
| 1970-01-01T00:00:15 | 14.5 |
|
||||
+---------------------+---------------------------------+
|
||||
+---------------------+--------------------+
|
||||
| ts | quantile(test.val) |
|
||||
+---------------------+--------------------+
|
||||
| 1970-01-01T00:00:00 | 2.5 |
|
||||
| 1970-01-01T00:00:05 | 6.5 |
|
||||
| 1970-01-01T00:00:10 | 10.5 |
|
||||
| 1970-01-01T00:00:15 | 14.5 |
|
||||
+---------------------+--------------------+
|
||||
|
||||
TQL EVAL (0, 15, '5s') quantile(0.5, test) by (idc);
|
||||
|
||||
+------+---------------------+---------------------------------+
|
||||
| idc | ts | quantile(Float64(0.5),test.val) |
|
||||
+------+---------------------+---------------------------------+
|
||||
| idc1 | 1970-01-01T00:00:00 | 1.5 |
|
||||
| idc1 | 1970-01-01T00:00:05 | 5.5 |
|
||||
| idc1 | 1970-01-01T00:00:10 | 9.5 |
|
||||
| idc1 | 1970-01-01T00:00:15 | 13.5 |
|
||||
| idc2 | 1970-01-01T00:00:00 | 3.5 |
|
||||
| idc2 | 1970-01-01T00:00:05 | 7.5 |
|
||||
| idc2 | 1970-01-01T00:00:10 | 11.5 |
|
||||
| idc2 | 1970-01-01T00:00:15 | 15.5 |
|
||||
+------+---------------------+---------------------------------+
|
||||
+------+---------------------+--------------------+
|
||||
| idc | ts | quantile(test.val) |
|
||||
+------+---------------------+--------------------+
|
||||
| idc1 | 1970-01-01T00:00:00 | 1.5 |
|
||||
| idc1 | 1970-01-01T00:00:05 | 5.5 |
|
||||
| idc1 | 1970-01-01T00:00:10 | 9.5 |
|
||||
| idc1 | 1970-01-01T00:00:15 | 13.5 |
|
||||
| idc2 | 1970-01-01T00:00:00 | 3.5 |
|
||||
| idc2 | 1970-01-01T00:00:05 | 7.5 |
|
||||
| idc2 | 1970-01-01T00:00:10 | 11.5 |
|
||||
| idc2 | 1970-01-01T00:00:15 | 15.5 |
|
||||
+------+---------------------+--------------------+
|
||||
|
||||
TQL EVAL (0, 15, '5s') quantile(0.5, sum(test) by (idc));
|
||||
|
||||
+---------------------+--------------------------------------+
|
||||
| ts | quantile(Float64(0.5),sum(test.val)) |
|
||||
+---------------------+--------------------------------------+
|
||||
| 1970-01-01T00:00:00 | 5.0 |
|
||||
| 1970-01-01T00:00:05 | 13.0 |
|
||||
| 1970-01-01T00:00:10 | 21.0 |
|
||||
| 1970-01-01T00:00:15 | 29.0 |
|
||||
+---------------------+--------------------------------------+
|
||||
+---------------------+-------------------------+
|
||||
| ts | quantile(sum(test.val)) |
|
||||
+---------------------+-------------------------+
|
||||
| 1970-01-01T00:00:00 | 5.0 |
|
||||
| 1970-01-01T00:00:05 | 13.0 |
|
||||
| 1970-01-01T00:00:10 | 21.0 |
|
||||
| 1970-01-01T00:00:15 | 29.0 |
|
||||
+---------------------+-------------------------+
|
||||
|
||||
DROP TABLE test;
|
||||
|
||||
|
||||
@@ -18,62 +18,62 @@ Affected Rows: 4
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
tql eval (3, 4, '1s') round(cache_hit, 0.01);
|
||||
|
||||
+---------------------+------------------------------------------+-------+
|
||||
| ts | prom_round(greptime_value,Float64(0.01)) | job |
|
||||
+---------------------+------------------------------------------+-------+
|
||||
| 1970-01-01T00:00:03 | 123.45 | read |
|
||||
| 1970-01-01T00:00:03 | 234.57 | write |
|
||||
| 1970-01-01T00:00:04 | 345.68 | read |
|
||||
| 1970-01-01T00:00:04 | 456.79 | write |
|
||||
+---------------------+------------------------------------------+-------+
|
||||
+---------------------+----------------------------+-------+
|
||||
| ts | prom_round(greptime_value) | job |
|
||||
+---------------------+----------------------------+-------+
|
||||
| 1970-01-01T00:00:03 | 123.45 | read |
|
||||
| 1970-01-01T00:00:03 | 234.57 | write |
|
||||
| 1970-01-01T00:00:04 | 345.68 | read |
|
||||
| 1970-01-01T00:00:04 | 456.79 | write |
|
||||
+---------------------+----------------------------+-------+
|
||||
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
tql eval (3, 4, '1s') round(cache_hit, 0.1);
|
||||
|
||||
+---------------------+-----------------------------------------+-------+
|
||||
| ts | prom_round(greptime_value,Float64(0.1)) | job |
|
||||
+---------------------+-----------------------------------------+-------+
|
||||
| 1970-01-01T00:00:03 | 123.5 | read |
|
||||
| 1970-01-01T00:00:03 | 234.60000000000002 | write |
|
||||
| 1970-01-01T00:00:04 | 345.70000000000005 | read |
|
||||
| 1970-01-01T00:00:04 | 456.8 | write |
|
||||
+---------------------+-----------------------------------------+-------+
|
||||
+---------------------+----------------------------+-------+
|
||||
| ts | prom_round(greptime_value) | job |
|
||||
+---------------------+----------------------------+-------+
|
||||
| 1970-01-01T00:00:03 | 123.5 | read |
|
||||
| 1970-01-01T00:00:03 | 234.60000000000002 | write |
|
||||
| 1970-01-01T00:00:04 | 345.70000000000005 | read |
|
||||
| 1970-01-01T00:00:04 | 456.8 | write |
|
||||
+---------------------+----------------------------+-------+
|
||||
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
tql eval (3, 4, '1s') round(cache_hit, 1.0);
|
||||
|
||||
+---------------------+---------------------------------------+-------+
|
||||
| ts | prom_round(greptime_value,Float64(1)) | job |
|
||||
+---------------------+---------------------------------------+-------+
|
||||
| 1970-01-01T00:00:03 | 123.0 | read |
|
||||
| 1970-01-01T00:00:03 | 235.0 | write |
|
||||
| 1970-01-01T00:00:04 | 346.0 | read |
|
||||
| 1970-01-01T00:00:04 | 457.0 | write |
|
||||
+---------------------+---------------------------------------+-------+
|
||||
+---------------------+----------------------------+-------+
|
||||
| ts | prom_round(greptime_value) | job |
|
||||
+---------------------+----------------------------+-------+
|
||||
| 1970-01-01T00:00:03 | 123.0 | read |
|
||||
| 1970-01-01T00:00:03 | 235.0 | write |
|
||||
| 1970-01-01T00:00:04 | 346.0 | read |
|
||||
| 1970-01-01T00:00:04 | 457.0 | write |
|
||||
+---------------------+----------------------------+-------+
|
||||
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
tql eval (3, 4, '1s') round(cache_hit);
|
||||
|
||||
+---------------------+---------------------------------------+-------+
|
||||
| ts | prom_round(greptime_value,Float64(0)) | job |
|
||||
+---------------------+---------------------------------------+-------+
|
||||
| 1970-01-01T00:00:03 | 123.0 | read |
|
||||
| 1970-01-01T00:00:03 | 235.0 | write |
|
||||
| 1970-01-01T00:00:04 | 346.0 | read |
|
||||
| 1970-01-01T00:00:04 | 457.0 | write |
|
||||
+---------------------+---------------------------------------+-------+
|
||||
+---------------------+----------------------------+-------+
|
||||
| ts | prom_round(greptime_value) | job |
|
||||
+---------------------+----------------------------+-------+
|
||||
| 1970-01-01T00:00:03 | 123.0 | read |
|
||||
| 1970-01-01T00:00:03 | 235.0 | write |
|
||||
| 1970-01-01T00:00:04 | 346.0 | read |
|
||||
| 1970-01-01T00:00:04 | 457.0 | write |
|
||||
+---------------------+----------------------------+-------+
|
||||
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
tql eval (3, 4, '1s') round(cache_hit, 10.0);
|
||||
|
||||
+---------------------+----------------------------------------+-------+
|
||||
| ts | prom_round(greptime_value,Float64(10)) | job |
|
||||
+---------------------+----------------------------------------+-------+
|
||||
| 1970-01-01T00:00:03 | 120.0 | read |
|
||||
| 1970-01-01T00:00:03 | 230.0 | write |
|
||||
| 1970-01-01T00:00:04 | 350.0 | read |
|
||||
| 1970-01-01T00:00:04 | 460.0 | write |
|
||||
+---------------------+----------------------------------------+-------+
|
||||
+---------------------+----------------------------+-------+
|
||||
| ts | prom_round(greptime_value) | job |
|
||||
+---------------------+----------------------------+-------+
|
||||
| 1970-01-01T00:00:03 | 120.0 | read |
|
||||
| 1970-01-01T00:00:03 | 230.0 | write |
|
||||
| 1970-01-01T00:00:04 | 350.0 | read |
|
||||
| 1970-01-01T00:00:04 | 460.0 | write |
|
||||
+---------------------+----------------------------+-------+
|
||||
|
||||
drop table cache_hit;
|
||||
|
||||
|
||||
@@ -228,27 +228,27 @@ tql eval (420, 420, '1s') histogram_quantile(0.833, histogram2_bucket);
|
||||
|
||||
tql eval (2820, 2820, '1s') histogram_quantile(0.166, rate(histogram2_bucket[15m]));
|
||||
|
||||
+---------------------+------------------------------------------+
|
||||
| ts | prom_rate(ts_range,val,ts,Int64(900000)) |
|
||||
+---------------------+------------------------------------------+
|
||||
| 1970-01-01T00:47:00 | 0.996 |
|
||||
+---------------------+------------------------------------------+
|
||||
+---------------------+----------------------------+
|
||||
| ts | prom_rate(ts_range,val,ts) |
|
||||
+---------------------+----------------------------+
|
||||
| 1970-01-01T00:47:00 | 0.996 |
|
||||
+---------------------+----------------------------+
|
||||
|
||||
tql eval (2820, 2820, '1s') histogram_quantile(0.5, rate(histogram2_bucket[15m]));
|
||||
|
||||
+---------------------+------------------------------------------+
|
||||
| ts | prom_rate(ts_range,val,ts,Int64(900000)) |
|
||||
+---------------------+------------------------------------------+
|
||||
| 1970-01-01T00:47:00 | 3.0 |
|
||||
+---------------------+------------------------------------------+
|
||||
+---------------------+----------------------------+
|
||||
| ts | prom_rate(ts_range,val,ts) |
|
||||
+---------------------+----------------------------+
|
||||
| 1970-01-01T00:47:00 | 3.0 |
|
||||
+---------------------+----------------------------+
|
||||
|
||||
tql eval (2820, 2820, '1s') histogram_quantile(0.833, rate(histogram2_bucket[15m]));
|
||||
|
||||
+---------------------+------------------------------------------+
|
||||
| ts | prom_rate(ts_range,val,ts,Int64(900000)) |
|
||||
+---------------------+------------------------------------------+
|
||||
| 1970-01-01T00:47:00 | 4.998 |
|
||||
+---------------------+------------------------------------------+
|
||||
+---------------------+----------------------------+
|
||||
| ts | prom_rate(ts_range,val,ts) |
|
||||
+---------------------+----------------------------+
|
||||
| 1970-01-01T00:47:00 | 4.998 |
|
||||
+---------------------+----------------------------+
|
||||
|
||||
drop table histogram2_bucket;
|
||||
|
||||
@@ -284,12 +284,12 @@ Affected Rows: 12
|
||||
|
||||
tql eval (3000, 3005, '3s') histogram_quantile(0.5, sum by(le, s) (rate(histogram3_bucket[5m])));
|
||||
|
||||
+---+---------------------+-----------------------------------------------+
|
||||
| s | ts | sum(prom_rate(ts_range,val,ts,Int64(300000))) |
|
||||
+---+---------------------+-----------------------------------------------+
|
||||
| a | 1970-01-01T00:50:00 | 0.55 |
|
||||
| a | 1970-01-01T00:50:03 | 0.5500000000000002 |
|
||||
+---+---------------------+-----------------------------------------------+
|
||||
+---+---------------------+---------------------------------+
|
||||
| s | ts | sum(prom_rate(ts_range,val,ts)) |
|
||||
+---+---------------------+---------------------------------+
|
||||
| a | 1970-01-01T00:50:00 | 0.55 |
|
||||
| a | 1970-01-01T00:50:03 | 0.5500000000000002 |
|
||||
+---+---------------------+---------------------------------+
|
||||
|
||||
drop table histogram3_bucket;
|
||||
|
||||
|
||||
@@ -45,19 +45,19 @@ tql eval (359, 359, '1s') sum_over_time(metric_total[60s:10s]);
|
||||
|
||||
tql eval (10, 10, '1s') rate(metric_total[20s:10s]);
|
||||
|
||||
+---------------------+-----------------------------------------+
|
||||
| ts | prom_rate(ts_range,val,ts,Int64(20000)) |
|
||||
+---------------------+-----------------------------------------+
|
||||
| 1970-01-01T00:00:10 | 0.1 |
|
||||
+---------------------+-----------------------------------------+
|
||||
+---------------------+----------------------------+
|
||||
| ts | prom_rate(ts_range,val,ts) |
|
||||
+---------------------+----------------------------+
|
||||
| 1970-01-01T00:00:10 | 0.1 |
|
||||
+---------------------+----------------------------+
|
||||
|
||||
tql eval (20, 20, '1s') rate(metric_total[20s:5s]);
|
||||
|
||||
+---------------------+-----------------------------------------+
|
||||
| ts | prom_rate(ts_range,val,ts,Int64(20000)) |
|
||||
+---------------------+-----------------------------------------+
|
||||
| 1970-01-01T00:00:20 | 0.06666666666666667 |
|
||||
+---------------------+-----------------------------------------+
|
||||
+---------------------+----------------------------+
|
||||
| ts | prom_rate(ts_range,val,ts) |
|
||||
+---------------------+----------------------------+
|
||||
| 1970-01-01T00:00:20 | 0.06666666666666667 |
|
||||
+---------------------+----------------------------+
|
||||
|
||||
drop table metric_total;
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user