mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-07 05:42:57 +00:00
Compare commits
9 Commits
v1.0.0-bet
...
feature/df
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ef80503454 | ||
|
|
69f0249039 | ||
|
|
1f91422bae | ||
|
|
377373b8fd | ||
|
|
e107030d85 | ||
|
|
18875eed4d | ||
|
|
ee76d50569 | ||
|
|
5d634aeba0 | ||
|
|
30ca2d7652 |
69
Cargo.lock
generated
69
Cargo.lock
generated
@@ -3274,7 +3274,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-ipc",
|
||||
@@ -3329,7 +3329,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-catalog"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"async-trait",
|
||||
@@ -3353,7 +3353,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-catalog-listing"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"async-trait",
|
||||
@@ -3375,7 +3375,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-common"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"ahash 0.8.12",
|
||||
"arrow",
|
||||
@@ -3398,7 +3398,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-common-runtime"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"futures",
|
||||
"log",
|
||||
@@ -3408,7 +3408,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-datasource"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"async-compression 0.4.19",
|
||||
@@ -3442,7 +3442,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-datasource-csv"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"async-trait",
|
||||
@@ -3464,7 +3464,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-datasource-json"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"async-trait",
|
||||
@@ -3485,7 +3485,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-datasource-parquet"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"async-trait",
|
||||
@@ -3514,12 +3514,12 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-doc"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-execution"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"async-trait",
|
||||
@@ -3538,7 +3538,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-expr"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"async-trait",
|
||||
@@ -3560,7 +3560,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-expr-common"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"datafusion-common",
|
||||
@@ -3572,7 +3572,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-functions"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-buffer",
|
||||
@@ -3600,7 +3600,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-functions-aggregate"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"ahash 0.8.12",
|
||||
"arrow",
|
||||
@@ -3620,7 +3620,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-functions-aggregate-common"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"ahash 0.8.12",
|
||||
"arrow",
|
||||
@@ -3632,7 +3632,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-functions-nested"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-ord",
|
||||
@@ -3654,7 +3654,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-functions-table"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"async-trait",
|
||||
@@ -3669,7 +3669,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-functions-window"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"datafusion-common",
|
||||
@@ -3686,7 +3686,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-functions-window-common"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"datafusion-common",
|
||||
"datafusion-physical-expr-common",
|
||||
@@ -3695,7 +3695,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-macros"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"datafusion-doc",
|
||||
"quote",
|
||||
@@ -3705,7 +3705,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-optimizer"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"chrono",
|
||||
@@ -3741,9 +3741,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-pg-catalog"
|
||||
version = "0.12.1"
|
||||
version = "0.12.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "15824c98ff2009c23b0398d441499b147f7c5ac0e5ee993e7a473d79040e3626"
|
||||
checksum = "755393864c0c2dd95575ceed4b25e348686028e1b83d06f8f39914209999f821"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"datafusion",
|
||||
@@ -3756,7 +3756,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-physical-expr"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"ahash 0.8.12",
|
||||
"arrow",
|
||||
@@ -3777,7 +3777,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-physical-expr-adapter"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"datafusion-common",
|
||||
@@ -3791,7 +3791,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-physical-expr-common"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"ahash 0.8.12",
|
||||
"arrow",
|
||||
@@ -3804,7 +3804,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-physical-optimizer"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"datafusion-common",
|
||||
@@ -3822,7 +3822,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-physical-plan"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"ahash 0.8.12",
|
||||
"arrow",
|
||||
@@ -3852,7 +3852,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-pruning"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"datafusion-common",
|
||||
@@ -3868,7 +3868,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-session"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"datafusion-common",
|
||||
@@ -3881,7 +3881,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-sql"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"bigdecimal 0.4.8",
|
||||
@@ -3898,7 +3898,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-substrait"
|
||||
version = "50.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=fd4b2abcf3c3e43e94951bda452c9fd35243aab0#fd4b2abcf3c3e43e94951bda452c9fd35243aab0"
|
||||
source = "git+https://github.com/GreptimeTeam/datafusion.git?rev=7f8ea0a45748ed32695757368f847ab9ac7b6c82#7f8ea0a45748ed32695757368f847ab9ac7b6c82"
|
||||
dependencies = [
|
||||
"async-recursion",
|
||||
"async-trait",
|
||||
@@ -9501,6 +9501,7 @@ name = "plugins"
|
||||
version = "1.0.0-beta.2"
|
||||
dependencies = [
|
||||
"auth",
|
||||
"catalog",
|
||||
"clap 4.5.40",
|
||||
"cli",
|
||||
"common-base",
|
||||
@@ -9509,6 +9510,7 @@ dependencies = [
|
||||
"datanode",
|
||||
"flow",
|
||||
"frontend",
|
||||
"meta-client",
|
||||
"meta-srv",
|
||||
"serde",
|
||||
"snafu 0.8.6",
|
||||
@@ -13065,6 +13067,7 @@ dependencies = [
|
||||
"loki-proto",
|
||||
"meta-client",
|
||||
"meta-srv",
|
||||
"mito2",
|
||||
"moka",
|
||||
"mysql_async",
|
||||
"object-store",
|
||||
|
||||
26
Cargo.toml
26
Cargo.toml
@@ -131,7 +131,7 @@ datafusion-functions = "50"
|
||||
datafusion-functions-aggregate-common = "50"
|
||||
datafusion-optimizer = "50"
|
||||
datafusion-orc = "0.5"
|
||||
datafusion-pg-catalog = "0.12.1"
|
||||
datafusion-pg-catalog = "0.12.2"
|
||||
datafusion-physical-expr = "50"
|
||||
datafusion-physical-plan = "50"
|
||||
datafusion-sql = "50"
|
||||
@@ -316,18 +316,18 @@ git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
rev = "5618e779cf2bb4755b499c630fba4c35e91898cb"
|
||||
|
||||
[patch.crates-io]
|
||||
datafusion = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-functions = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-functions-aggregate-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-optimizer = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-physical-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-physical-expr-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-physical-plan = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-datasource = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-sql = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-substrait = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7f8ea0a45748ed32695757368f847ab9ac7b6c82" }
|
||||
datafusion-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7f8ea0a45748ed32695757368f847ab9ac7b6c82" }
|
||||
datafusion-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7f8ea0a45748ed32695757368f847ab9ac7b6c82" }
|
||||
datafusion-functions = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7f8ea0a45748ed32695757368f847ab9ac7b6c82" }
|
||||
datafusion-functions-aggregate-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7f8ea0a45748ed32695757368f847ab9ac7b6c82" }
|
||||
datafusion-optimizer = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7f8ea0a45748ed32695757368f847ab9ac7b6c82" }
|
||||
datafusion-physical-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7f8ea0a45748ed32695757368f847ab9ac7b6c82" }
|
||||
datafusion-physical-expr-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7f8ea0a45748ed32695757368f847ab9ac7b6c82" }
|
||||
datafusion-physical-plan = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7f8ea0a45748ed32695757368f847ab9ac7b6c82" }
|
||||
datafusion-datasource = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7f8ea0a45748ed32695757368f847ab9ac7b6c82" }
|
||||
datafusion-sql = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7f8ea0a45748ed32695757368f847ab9ac7b6c82" }
|
||||
datafusion-substrait = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7f8ea0a45748ed32695757368f847ab9ac7b6c82" }
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "4b519a5caa95472cc3988f5556813a583dd35af1" } # branch = "v0.58.x"
|
||||
|
||||
[profile.release]
|
||||
|
||||
@@ -211,6 +211,7 @@ struct InformationSchemaPartitionsBuilder {
|
||||
partition_names: StringVectorBuilder,
|
||||
partition_ordinal_positions: Int64VectorBuilder,
|
||||
partition_expressions: StringVectorBuilder,
|
||||
partition_descriptions: StringVectorBuilder,
|
||||
create_times: TimestampSecondVectorBuilder,
|
||||
partition_ids: UInt64VectorBuilder,
|
||||
}
|
||||
@@ -231,6 +232,7 @@ impl InformationSchemaPartitionsBuilder {
|
||||
partition_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_ordinal_positions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_expressions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_descriptions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
create_times: TimestampSecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
@@ -319,6 +321,21 @@ impl InformationSchemaPartitionsBuilder {
|
||||
return;
|
||||
}
|
||||
|
||||
// Get partition column names (shared by all partitions)
|
||||
// In MySQL, PARTITION_EXPRESSION is the partitioning function expression (e.g., column name)
|
||||
let partition_columns: String = table_info
|
||||
.meta
|
||||
.partition_column_names()
|
||||
.cloned()
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
|
||||
let partition_expr_str = if partition_columns.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(partition_columns)
|
||||
};
|
||||
|
||||
for (index, partition) in partitions.iter().enumerate() {
|
||||
let partition_name = format!("p{index}");
|
||||
|
||||
@@ -328,8 +345,12 @@ impl InformationSchemaPartitionsBuilder {
|
||||
self.partition_names.push(Some(&partition_name));
|
||||
self.partition_ordinal_positions
|
||||
.push(Some((index + 1) as i64));
|
||||
let expression = partition.partition_expr.as_ref().map(|e| e.to_string());
|
||||
self.partition_expressions.push(expression.as_deref());
|
||||
// PARTITION_EXPRESSION: partition column names (same for all partitions)
|
||||
self.partition_expressions
|
||||
.push(partition_expr_str.as_deref());
|
||||
// PARTITION_DESCRIPTION: partition boundary expression (different for each partition)
|
||||
let description = partition.partition_expr.as_ref().map(|e| e.to_string());
|
||||
self.partition_descriptions.push(description.as_deref());
|
||||
self.create_times.push(Some(TimestampSecond::from(
|
||||
table_info.meta.created_on.timestamp(),
|
||||
)));
|
||||
@@ -369,7 +390,7 @@ impl InformationSchemaPartitionsBuilder {
|
||||
null_string_vector.clone(),
|
||||
Arc::new(self.partition_expressions.finish()),
|
||||
null_string_vector.clone(),
|
||||
null_string_vector.clone(),
|
||||
Arc::new(self.partition_descriptions.finish()),
|
||||
// TODO(dennis): rows and index statistics info
|
||||
null_i64_vector.clone(),
|
||||
null_i64_vector.clone(),
|
||||
|
||||
@@ -18,7 +18,6 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use catalog::CatalogManagerRef;
|
||||
use catalog::information_extension::DistributedInformationExtension;
|
||||
use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManagerBuilder, MetaKvBackend};
|
||||
use clap::Parser;
|
||||
@@ -26,14 +25,12 @@ use client::client_manager::NodeClients;
|
||||
use common_base::Plugins;
|
||||
use common_config::{Configurable, DEFAULT_DATA_HOME};
|
||||
use common_grpc::channel_manager::ChannelConfig;
|
||||
use common_meta::FlownodeId;
|
||||
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
||||
use common_meta::heartbeat::handler::invalidate_table_cache::InvalidateCacheHandler;
|
||||
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_stat::ResourceStatImpl;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
|
||||
@@ -43,6 +40,7 @@ use flow::{
|
||||
get_flow_auth_options,
|
||||
};
|
||||
use meta_client::{MetaClientOptions, MetaClientType};
|
||||
use plugins::flownode::context::GrpcConfigureContext;
|
||||
use servers::configurator::GrpcBuilderConfiguratorRef;
|
||||
use snafu::{OptionExt, ResultExt, ensure};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
@@ -435,11 +433,3 @@ impl StartCommand {
|
||||
Ok(Instance::new(flownode, guard))
|
||||
}
|
||||
}
|
||||
|
||||
/// The context for [`GrpcBuilderConfiguratorRef`] in flownode.
|
||||
pub struct GrpcConfigureContext {
|
||||
pub kv_backend: KvBackendRef,
|
||||
pub fe_client: Arc<FrontendClient>,
|
||||
pub flownode_id: FlownodeId,
|
||||
pub catalog_manager: CatalogManagerRef,
|
||||
}
|
||||
|
||||
@@ -45,7 +45,10 @@ use frontend::frontend::Frontend;
|
||||
use frontend::heartbeat::HeartbeatTask;
|
||||
use frontend::instance::builder::FrontendBuilder;
|
||||
use frontend::server::Services;
|
||||
use meta_client::{MetaClientOptions, MetaClientRef, MetaClientType};
|
||||
use meta_client::{MetaClientOptions, MetaClientType};
|
||||
use plugins::frontend::context::{
|
||||
CatalogManagerConfigureContext, DistributedCatalogManagerConfigureContext,
|
||||
};
|
||||
use servers::addrs;
|
||||
use servers::grpc::GrpcOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
@@ -423,9 +426,11 @@ impl StartCommand {
|
||||
let builder = if let Some(configurator) =
|
||||
plugins.get::<CatalogManagerConfiguratorRef<CatalogManagerConfigureContext>>()
|
||||
{
|
||||
let ctx = CatalogManagerConfigureContext {
|
||||
let ctx = DistributedCatalogManagerConfigureContext {
|
||||
meta_client: meta_client.clone(),
|
||||
};
|
||||
let ctx = CatalogManagerConfigureContext::Distributed(ctx);
|
||||
|
||||
configurator
|
||||
.configure(builder, ctx)
|
||||
.await
|
||||
@@ -482,11 +487,6 @@ impl StartCommand {
|
||||
}
|
||||
}
|
||||
|
||||
/// The context for [`CatalogManagerConfigratorRef`] in frontend.
|
||||
pub struct CatalogManagerConfigureContext {
|
||||
pub meta_client: MetaClientRef,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::Write;
|
||||
|
||||
@@ -32,7 +32,7 @@ use common_meta::cache::LayeredCacheRegistryBuilder;
|
||||
use common_meta::ddl::flow_meta::FlowMetadataAllocator;
|
||||
use common_meta::ddl::table_meta::TableMetadataAllocator;
|
||||
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl};
|
||||
use common_meta::ddl_manager::{DdlManager, DdlManagerConfiguratorRef, DdlManagerConfigureContext};
|
||||
use common_meta::ddl_manager::{DdlManager, DdlManagerConfiguratorRef};
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
@@ -58,6 +58,10 @@ use frontend::instance::StandaloneDatanodeManager;
|
||||
use frontend::instance::builder::FrontendBuilder;
|
||||
use frontend::server::Services;
|
||||
use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ};
|
||||
use plugins::frontend::context::{
|
||||
CatalogManagerConfigureContext, StandaloneCatalogManagerConfigureContext,
|
||||
};
|
||||
use plugins::standalone::context::DdlManagerConfigureContext;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
use snafu::ResultExt;
|
||||
use standalone::StandaloneInformationExtension;
|
||||
@@ -414,9 +418,10 @@ impl StartCommand {
|
||||
let builder = if let Some(configurator) =
|
||||
plugins.get::<CatalogManagerConfiguratorRef<CatalogManagerConfigureContext>>()
|
||||
{
|
||||
let ctx = CatalogManagerConfigureContext {
|
||||
let ctx = StandaloneCatalogManagerConfigureContext {
|
||||
fe_client: frontend_client.clone(),
|
||||
};
|
||||
let ctx = CatalogManagerConfigureContext::Standalone(ctx);
|
||||
configurator
|
||||
.configure(builder, ctx)
|
||||
.await
|
||||
@@ -506,9 +511,13 @@ impl StartCommand {
|
||||
let ddl_manager = DdlManager::try_new(ddl_context, procedure_manager.clone(), true)
|
||||
.context(error::InitDdlManagerSnafu)?;
|
||||
|
||||
let ddl_manager = if let Some(configurator) = plugins.get::<DdlManagerConfiguratorRef>() {
|
||||
let ddl_manager = if let Some(configurator) =
|
||||
plugins.get::<DdlManagerConfiguratorRef<DdlManagerConfigureContext>>()
|
||||
{
|
||||
let ctx = DdlManagerConfigureContext {
|
||||
kv_backend: kv_backend.clone(),
|
||||
fe_client: frontend_client.clone(),
|
||||
catalog_manager: catalog_manager.clone(),
|
||||
};
|
||||
configurator
|
||||
.configure(ddl_manager, ctx)
|
||||
@@ -595,11 +604,6 @@ impl StartCommand {
|
||||
}
|
||||
}
|
||||
|
||||
/// The context for [`CatalogManagerConfigratorRef`] in standalone.
|
||||
pub struct CatalogManagerConfigureContext {
|
||||
pub fe_client: Arc<FrontendClient>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::default::Default;
|
||||
|
||||
@@ -32,7 +32,12 @@ impl Plugins {
|
||||
|
||||
pub fn insert<T: 'static + Send + Sync>(&self, value: T) {
|
||||
let last = self.write().insert(value);
|
||||
assert!(last.is_none(), "each type of plugins must be one and only");
|
||||
if last.is_some() {
|
||||
panic!(
|
||||
"Plugin of type {} already exists",
|
||||
std::any::type_name::<T>()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get<T: 'static + Send + Sync + Clone>(&self) -> Option<T> {
|
||||
@@ -140,7 +145,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "each type of plugins must be one and only")]
|
||||
#[should_panic(expected = "Plugin of type i32 already exists")]
|
||||
fn test_plugin_uniqueness() {
|
||||
let plugins = Plugins::new();
|
||||
plugins.insert(1i32);
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
mod binary;
|
||||
mod ctx;
|
||||
mod if_func;
|
||||
mod is_null;
|
||||
mod unary;
|
||||
|
||||
@@ -22,6 +23,7 @@ pub use ctx::EvalContext;
|
||||
pub use unary::scalar_unary_op;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
use crate::scalars::expression::if_func::IfFunction;
|
||||
use crate::scalars::expression::is_null::IsNullFunction;
|
||||
|
||||
pub(crate) struct ExpressionFunction;
|
||||
@@ -29,5 +31,6 @@ pub(crate) struct ExpressionFunction;
|
||||
impl ExpressionFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register_scalar(IsNullFunction::default());
|
||||
registry.register_scalar(IfFunction::default());
|
||||
}
|
||||
}
|
||||
|
||||
404
src/common/function/src/scalars/expression/if_func.rs
Normal file
404
src/common/function/src/scalars/expression/if_func.rs
Normal file
@@ -0,0 +1,404 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
use std::fmt::Display;
|
||||
|
||||
use arrow::array::ArrowNativeTypeOp;
|
||||
use arrow::datatypes::ArrowPrimitiveType;
|
||||
use datafusion::arrow::array::{Array, ArrayRef, AsArray, BooleanArray, PrimitiveArray};
|
||||
use datafusion::arrow::compute::kernels::zip::zip;
|
||||
use datafusion::arrow::datatypes::DataType;
|
||||
use datafusion_common::DataFusionError;
|
||||
use datafusion_expr::type_coercion::binary::comparison_coercion;
|
||||
use datafusion_expr::{ColumnarValue, ScalarFunctionArgs, Signature, Volatility};
|
||||
|
||||
use crate::function::Function;
|
||||
|
||||
const NAME: &str = "if";
|
||||
|
||||
/// MySQL-compatible IF function: IF(condition, true_value, false_value)
|
||||
///
|
||||
/// Returns true_value if condition is TRUE (not NULL and not 0),
|
||||
/// otherwise returns false_value.
|
||||
///
|
||||
/// MySQL truthy rules:
|
||||
/// - NULL -> false
|
||||
/// - 0 (numeric zero) -> false
|
||||
/// - Any non-zero numeric -> true
|
||||
/// - Boolean true/false -> use directly
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct IfFunction {
|
||||
signature: Signature,
|
||||
}
|
||||
|
||||
impl Default for IfFunction {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
signature: Signature::any(3, Volatility::Immutable),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for IfFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for IfFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, input_types: &[DataType]) -> datafusion_common::Result<DataType> {
|
||||
// Return the common type of true_value and false_value (args[1] and args[2])
|
||||
if input_types.len() < 3 {
|
||||
return Err(DataFusionError::Plan(format!(
|
||||
"{} requires 3 arguments, got {}",
|
||||
NAME,
|
||||
input_types.len()
|
||||
)));
|
||||
}
|
||||
let true_type = &input_types[1];
|
||||
let false_type = &input_types[2];
|
||||
|
||||
// Use comparison_coercion to find common type
|
||||
comparison_coercion(true_type, false_type).ok_or_else(|| {
|
||||
DataFusionError::Plan(format!(
|
||||
"Cannot find common type for IF function between {:?} and {:?}",
|
||||
true_type, false_type
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
fn signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
fn invoke_with_args(
|
||||
&self,
|
||||
args: ScalarFunctionArgs,
|
||||
) -> datafusion_common::Result<ColumnarValue> {
|
||||
if args.args.len() != 3 {
|
||||
return Err(DataFusionError::Plan(format!(
|
||||
"{} requires exactly 3 arguments, got {}",
|
||||
NAME,
|
||||
args.args.len()
|
||||
)));
|
||||
}
|
||||
|
||||
let condition = &args.args[0];
|
||||
let true_value = &args.args[1];
|
||||
let false_value = &args.args[2];
|
||||
|
||||
// Convert condition to boolean array using MySQL truthy rules
|
||||
let bool_array = to_boolean_array(condition, args.number_rows)?;
|
||||
|
||||
// Convert true and false values to arrays
|
||||
let true_array = true_value.to_array(args.number_rows)?;
|
||||
let false_array = false_value.to_array(args.number_rows)?;
|
||||
|
||||
// Use zip to select values based on condition
|
||||
// zip expects &dyn Datum, and ArrayRef (Arc<dyn Array>) implements Datum
|
||||
let result = zip(&bool_array, &true_array, &false_array)?;
|
||||
Ok(ColumnarValue::Array(result))
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert a ColumnarValue to a BooleanArray using MySQL truthy rules:
|
||||
/// - NULL -> false
|
||||
/// - 0 (any numeric zero) -> false
|
||||
/// - Non-zero numeric -> true
|
||||
/// - Boolean -> use directly
|
||||
fn to_boolean_array(
|
||||
value: &ColumnarValue,
|
||||
num_rows: usize,
|
||||
) -> datafusion_common::Result<BooleanArray> {
|
||||
let array = value.to_array(num_rows)?;
|
||||
array_to_bool(array)
|
||||
}
|
||||
|
||||
/// Convert an integer PrimitiveArray to BooleanArray using MySQL truthy rules:
|
||||
/// NULL -> false, 0 -> false, non-zero -> true
|
||||
fn int_array_to_bool<T>(array: &PrimitiveArray<T>) -> BooleanArray
|
||||
where
|
||||
T: ArrowPrimitiveType,
|
||||
T::Native: ArrowNativeTypeOp,
|
||||
{
|
||||
BooleanArray::from_iter(
|
||||
array
|
||||
.iter()
|
||||
.map(|opt| Some(opt.is_some_and(|v| !v.is_zero()))),
|
||||
)
|
||||
}
|
||||
|
||||
/// Convert a float PrimitiveArray to BooleanArray using MySQL truthy rules:
|
||||
/// NULL -> false, 0 (including -0.0) -> false, NaN -> true, other non-zero -> true
|
||||
fn float_array_to_bool<T>(array: &PrimitiveArray<T>) -> BooleanArray
|
||||
where
|
||||
T: ArrowPrimitiveType,
|
||||
T::Native: ArrowNativeTypeOp + num_traits::Float,
|
||||
{
|
||||
use num_traits::Float;
|
||||
BooleanArray::from_iter(
|
||||
array
|
||||
.iter()
|
||||
.map(|opt| Some(opt.is_some_and(|v| v.is_nan() || !v.is_zero()))),
|
||||
)
|
||||
}
|
||||
|
||||
/// Convert an Array to BooleanArray using MySQL truthy rules
|
||||
fn array_to_bool(array: ArrayRef) -> datafusion_common::Result<BooleanArray> {
|
||||
use arrow::datatypes::*;
|
||||
|
||||
match array.data_type() {
|
||||
DataType::Boolean => {
|
||||
let bool_array = array.as_boolean();
|
||||
Ok(BooleanArray::from_iter(
|
||||
bool_array.iter().map(|opt| Some(opt.unwrap_or(false))),
|
||||
))
|
||||
}
|
||||
DataType::Int8 => Ok(int_array_to_bool(array.as_primitive::<Int8Type>())),
|
||||
DataType::Int16 => Ok(int_array_to_bool(array.as_primitive::<Int16Type>())),
|
||||
DataType::Int32 => Ok(int_array_to_bool(array.as_primitive::<Int32Type>())),
|
||||
DataType::Int64 => Ok(int_array_to_bool(array.as_primitive::<Int64Type>())),
|
||||
DataType::UInt8 => Ok(int_array_to_bool(array.as_primitive::<UInt8Type>())),
|
||||
DataType::UInt16 => Ok(int_array_to_bool(array.as_primitive::<UInt16Type>())),
|
||||
DataType::UInt32 => Ok(int_array_to_bool(array.as_primitive::<UInt32Type>())),
|
||||
DataType::UInt64 => Ok(int_array_to_bool(array.as_primitive::<UInt64Type>())),
|
||||
// Float16 needs special handling since half::f16 doesn't implement num_traits::Float
|
||||
DataType::Float16 => {
|
||||
let typed_array = array.as_primitive::<Float16Type>();
|
||||
Ok(BooleanArray::from_iter(typed_array.iter().map(|opt| {
|
||||
Some(opt.is_some_and(|v| {
|
||||
let f = v.to_f32();
|
||||
f.is_nan() || !f.is_zero()
|
||||
}))
|
||||
})))
|
||||
}
|
||||
DataType::Float32 => Ok(float_array_to_bool(array.as_primitive::<Float32Type>())),
|
||||
DataType::Float64 => Ok(float_array_to_bool(array.as_primitive::<Float64Type>())),
|
||||
// Null type is always false.
|
||||
// Note: NullArray::is_null() returns false (physical null), so we must handle it explicitly.
|
||||
// See: https://github.com/apache/arrow-rs/issues/4840
|
||||
DataType::Null => Ok(BooleanArray::from(vec![false; array.len()])),
|
||||
// For other types, treat non-null as true
|
||||
_ => {
|
||||
let len = array.len();
|
||||
Ok(BooleanArray::from_iter(
|
||||
(0..len).map(|i| Some(!array.is_null(i))),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_schema::Field;
|
||||
use datafusion_common::ScalarValue;
|
||||
use datafusion_common::arrow::array::{AsArray, Int32Array, StringArray};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_if_function_basic() {
|
||||
let if_func = IfFunction::default();
|
||||
assert_eq!("if", if_func.name());
|
||||
|
||||
// Test IF(true, 'yes', 'no') -> 'yes'
|
||||
let result = if_func
|
||||
.invoke_with_args(ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Boolean(Some(true))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
|
||||
],
|
||||
arg_fields: vec![],
|
||||
number_rows: 1,
|
||||
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
|
||||
config_options: Arc::new(Default::default()),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
let str_arr = arr.as_string::<i32>();
|
||||
assert_eq!(str_arr.value(0), "yes");
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_if_function_false() {
|
||||
let if_func = IfFunction::default();
|
||||
|
||||
// Test IF(false, 'yes', 'no') -> 'no'
|
||||
let result = if_func
|
||||
.invoke_with_args(ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Boolean(Some(false))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
|
||||
],
|
||||
arg_fields: vec![],
|
||||
number_rows: 1,
|
||||
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
|
||||
config_options: Arc::new(Default::default()),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
let str_arr = arr.as_string::<i32>();
|
||||
assert_eq!(str_arr.value(0), "no");
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_if_function_null_is_false() {
|
||||
let if_func = IfFunction::default();
|
||||
|
||||
// Test IF(NULL, 'yes', 'no') -> 'no' (NULL is treated as false)
|
||||
// Using Boolean(None) - typed null
|
||||
let result = if_func
|
||||
.invoke_with_args(ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Boolean(None)),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
|
||||
],
|
||||
arg_fields: vec![],
|
||||
number_rows: 1,
|
||||
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
|
||||
config_options: Arc::new(Default::default()),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
let str_arr = arr.as_string::<i32>();
|
||||
assert_eq!(str_arr.value(0), "no");
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
|
||||
// Test IF(NULL, 'yes', 'no') -> 'no' using ScalarValue::Null (untyped null from SQL NULL literal)
|
||||
let result = if_func
|
||||
.invoke_with_args(ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Null),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
|
||||
],
|
||||
arg_fields: vec![],
|
||||
number_rows: 1,
|
||||
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
|
||||
config_options: Arc::new(Default::default()),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
let str_arr = arr.as_string::<i32>();
|
||||
assert_eq!(str_arr.value(0), "no");
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_if_function_numeric_truthy() {
|
||||
let if_func = IfFunction::default();
|
||||
|
||||
// Test IF(1, 'yes', 'no') -> 'yes' (non-zero is true)
|
||||
let result = if_func
|
||||
.invoke_with_args(ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Int32(Some(1))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
|
||||
],
|
||||
arg_fields: vec![],
|
||||
number_rows: 1,
|
||||
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
|
||||
config_options: Arc::new(Default::default()),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
let str_arr = arr.as_string::<i32>();
|
||||
assert_eq!(str_arr.value(0), "yes");
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
|
||||
// Test IF(0, 'yes', 'no') -> 'no' (zero is false)
|
||||
let result = if_func
|
||||
.invoke_with_args(ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Int32(Some(0))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
|
||||
],
|
||||
arg_fields: vec![],
|
||||
number_rows: 1,
|
||||
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
|
||||
config_options: Arc::new(Default::default()),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
let str_arr = arr.as_string::<i32>();
|
||||
assert_eq!(str_arr.value(0), "no");
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_if_function_with_arrays() {
|
||||
let if_func = IfFunction::default();
|
||||
|
||||
// Test with array condition
|
||||
let condition = Int32Array::from(vec![Some(1), Some(0), None, Some(5)]);
|
||||
let true_val = StringArray::from(vec!["yes", "yes", "yes", "yes"]);
|
||||
let false_val = StringArray::from(vec!["no", "no", "no", "no"]);
|
||||
|
||||
let result = if_func
|
||||
.invoke_with_args(ScalarFunctionArgs {
|
||||
args: vec![
|
||||
ColumnarValue::Array(Arc::new(condition)),
|
||||
ColumnarValue::Array(Arc::new(true_val)),
|
||||
ColumnarValue::Array(Arc::new(false_val)),
|
||||
],
|
||||
arg_fields: vec![],
|
||||
number_rows: 4,
|
||||
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
|
||||
config_options: Arc::new(Default::default()),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
let str_arr = arr.as_string::<i32>();
|
||||
assert_eq!(str_arr.value(0), "yes"); // 1 is true
|
||||
assert_eq!(str_arr.value(1), "no"); // 0 is false
|
||||
assert_eq!(str_arr.value(2), "no"); // NULL is false
|
||||
assert_eq!(str_arr.value(3), "yes"); // 5 is true
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -17,7 +17,7 @@ use std::sync::Arc;
|
||||
use common_catalog::consts::{
|
||||
DEFAULT_PRIVATE_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, PG_CATALOG_NAME,
|
||||
};
|
||||
use datafusion::arrow::array::{ArrayRef, StringArray, as_boolean_array};
|
||||
use datafusion::arrow::array::{ArrayRef, StringArray, StringBuilder, as_boolean_array};
|
||||
use datafusion::catalog::TableFunction;
|
||||
use datafusion::common::ScalarValue;
|
||||
use datafusion::common::utils::SingleRowListArrayBuilder;
|
||||
@@ -34,10 +34,15 @@ const CURRENT_SCHEMA_FUNCTION_NAME: &str = "current_schema";
|
||||
const CURRENT_SCHEMAS_FUNCTION_NAME: &str = "current_schemas";
|
||||
const SESSION_USER_FUNCTION_NAME: &str = "session_user";
|
||||
const CURRENT_DATABASE_FUNCTION_NAME: &str = "current_database";
|
||||
const OBJ_DESCRIPTION_FUNCTION_NAME: &str = "obj_description";
|
||||
const COL_DESCRIPTION_FUNCTION_NAME: &str = "col_description";
|
||||
const SHOBJ_DESCRIPTION_FUNCTION_NAME: &str = "shobj_description";
|
||||
const PG_MY_TEMP_SCHEMA_FUNCTION_NAME: &str = "pg_my_temp_schema";
|
||||
|
||||
define_nullary_udf!(CurrentSchemaFunction);
|
||||
define_nullary_udf!(SessionUserFunction);
|
||||
define_nullary_udf!(CurrentDatabaseFunction);
|
||||
define_nullary_udf!(PgMyTempSchemaFunction);
|
||||
|
||||
impl Function for CurrentDatabaseFunction {
|
||||
fn name(&self) -> &str {
|
||||
@@ -173,6 +178,175 @@ impl Function for CurrentSchemasFunction {
|
||||
}
|
||||
}
|
||||
|
||||
/// PostgreSQL obj_description - returns NULL for compatibility
|
||||
#[derive(Display, Debug, Clone)]
|
||||
#[display("{}", self.name())]
|
||||
pub(super) struct ObjDescriptionFunction {
|
||||
signature: Signature,
|
||||
}
|
||||
|
||||
impl ObjDescriptionFunction {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
signature: Signature::one_of(
|
||||
vec![
|
||||
TypeSignature::Exact(vec![DataType::Int64, DataType::Utf8]),
|
||||
TypeSignature::Exact(vec![DataType::UInt32, DataType::Utf8]),
|
||||
TypeSignature::Exact(vec![DataType::Int64]),
|
||||
TypeSignature::Exact(vec![DataType::UInt32]),
|
||||
],
|
||||
Volatility::Stable,
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for ObjDescriptionFunction {
|
||||
fn name(&self) -> &str {
|
||||
OBJ_DESCRIPTION_FUNCTION_NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
|
||||
Ok(DataType::Utf8)
|
||||
}
|
||||
|
||||
fn signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
fn invoke_with_args(
|
||||
&self,
|
||||
args: ScalarFunctionArgs,
|
||||
) -> datafusion_common::Result<ColumnarValue> {
|
||||
let num_rows = args.number_rows;
|
||||
let mut builder = StringBuilder::with_capacity(num_rows, 0);
|
||||
for _ in 0..num_rows {
|
||||
builder.append_null();
|
||||
}
|
||||
Ok(ColumnarValue::Array(Arc::new(builder.finish())))
|
||||
}
|
||||
}
|
||||
|
||||
/// PostgreSQL col_description - returns NULL for compatibility
|
||||
#[derive(Display, Debug, Clone)]
|
||||
#[display("{}", self.name())]
|
||||
pub(super) struct ColDescriptionFunction {
|
||||
signature: Signature,
|
||||
}
|
||||
|
||||
impl ColDescriptionFunction {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
signature: Signature::one_of(
|
||||
vec![
|
||||
TypeSignature::Exact(vec![DataType::Int64, DataType::Int32]),
|
||||
TypeSignature::Exact(vec![DataType::UInt32, DataType::Int32]),
|
||||
TypeSignature::Exact(vec![DataType::Int64, DataType::Int64]),
|
||||
TypeSignature::Exact(vec![DataType::UInt32, DataType::Int64]),
|
||||
],
|
||||
Volatility::Stable,
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for ColDescriptionFunction {
|
||||
fn name(&self) -> &str {
|
||||
COL_DESCRIPTION_FUNCTION_NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
|
||||
Ok(DataType::Utf8)
|
||||
}
|
||||
|
||||
fn signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
fn invoke_with_args(
|
||||
&self,
|
||||
args: ScalarFunctionArgs,
|
||||
) -> datafusion_common::Result<ColumnarValue> {
|
||||
let num_rows = args.number_rows;
|
||||
let mut builder = StringBuilder::with_capacity(num_rows, 0);
|
||||
for _ in 0..num_rows {
|
||||
builder.append_null();
|
||||
}
|
||||
Ok(ColumnarValue::Array(Arc::new(builder.finish())))
|
||||
}
|
||||
}
|
||||
|
||||
/// PostgreSQL shobj_description - returns NULL for compatibility
|
||||
#[derive(Display, Debug, Clone)]
|
||||
#[display("{}", self.name())]
|
||||
pub(super) struct ShobjDescriptionFunction {
|
||||
signature: Signature,
|
||||
}
|
||||
|
||||
impl ShobjDescriptionFunction {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
signature: Signature::one_of(
|
||||
vec![
|
||||
TypeSignature::Exact(vec![DataType::Int64, DataType::Utf8]),
|
||||
TypeSignature::Exact(vec![DataType::UInt64, DataType::Utf8]),
|
||||
TypeSignature::Exact(vec![DataType::Int32, DataType::Utf8]),
|
||||
TypeSignature::Exact(vec![DataType::UInt32, DataType::Utf8]),
|
||||
],
|
||||
Volatility::Stable,
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for ShobjDescriptionFunction {
|
||||
fn name(&self) -> &str {
|
||||
SHOBJ_DESCRIPTION_FUNCTION_NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
|
||||
Ok(DataType::Utf8)
|
||||
}
|
||||
|
||||
fn signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
fn invoke_with_args(
|
||||
&self,
|
||||
args: ScalarFunctionArgs,
|
||||
) -> datafusion_common::Result<ColumnarValue> {
|
||||
let num_rows = args.number_rows;
|
||||
let mut builder = StringBuilder::with_capacity(num_rows, 0);
|
||||
for _ in 0..num_rows {
|
||||
builder.append_null();
|
||||
}
|
||||
Ok(ColumnarValue::Array(Arc::new(builder.finish())))
|
||||
}
|
||||
}
|
||||
|
||||
/// PostgreSQL pg_my_temp_schema - returns 0 (no temp schema) for compatibility
|
||||
impl Function for PgMyTempSchemaFunction {
|
||||
fn name(&self) -> &str {
|
||||
PG_MY_TEMP_SCHEMA_FUNCTION_NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
|
||||
Ok(DataType::UInt32)
|
||||
}
|
||||
|
||||
fn signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
fn invoke_with_args(
|
||||
&self,
|
||||
_args: ScalarFunctionArgs,
|
||||
) -> datafusion_common::Result<ColumnarValue> {
|
||||
Ok(ColumnarValue::Scalar(ScalarValue::UInt32(Some(0))))
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) struct PGCatalogFunction;
|
||||
|
||||
impl PGCatalogFunction {
|
||||
@@ -212,5 +386,98 @@ impl PGCatalogFunction {
|
||||
registry.register(pg_catalog::create_pg_total_relation_size_udf());
|
||||
registry.register(pg_catalog::create_pg_stat_get_numscans());
|
||||
registry.register(pg_catalog::create_pg_get_constraintdef());
|
||||
registry.register(pg_catalog::create_pg_get_partition_ancestors_udf());
|
||||
registry.register_scalar(ObjDescriptionFunction::new());
|
||||
registry.register_scalar(ColDescriptionFunction::new());
|
||||
registry.register_scalar(ShobjDescriptionFunction::new());
|
||||
registry.register_scalar(PgMyTempSchemaFunction::default());
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_schema::Field;
|
||||
use datafusion::arrow::array::Array;
|
||||
use datafusion_common::ScalarValue;
|
||||
use datafusion_expr::ColumnarValue;
|
||||
|
||||
use super::*;
|
||||
|
||||
fn create_test_args(args: Vec<ColumnarValue>, number_rows: usize) -> ScalarFunctionArgs {
|
||||
ScalarFunctionArgs {
|
||||
args,
|
||||
arg_fields: vec![],
|
||||
number_rows,
|
||||
return_field: Arc::new(Field::new("result", DataType::Utf8, true)),
|
||||
config_options: Arc::new(Default::default()),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_obj_description_function() {
|
||||
let func = ObjDescriptionFunction::new();
|
||||
assert_eq!("obj_description", func.name());
|
||||
assert_eq!(DataType::Utf8, func.return_type(&[]).unwrap());
|
||||
|
||||
let args = create_test_args(
|
||||
vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Int64(Some(1234))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("pg_class".to_string()))),
|
||||
],
|
||||
1,
|
||||
);
|
||||
let result = func.invoke_with_args(args).unwrap();
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
assert_eq!(1, arr.len());
|
||||
assert!(arr.is_null(0));
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_col_description_function() {
|
||||
let func = ColDescriptionFunction::new();
|
||||
assert_eq!("col_description", func.name());
|
||||
assert_eq!(DataType::Utf8, func.return_type(&[]).unwrap());
|
||||
|
||||
let args = create_test_args(
|
||||
vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Int64(Some(1234))),
|
||||
ColumnarValue::Scalar(ScalarValue::Int64(Some(1))),
|
||||
],
|
||||
1,
|
||||
);
|
||||
let result = func.invoke_with_args(args).unwrap();
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
assert_eq!(1, arr.len());
|
||||
assert!(arr.is_null(0));
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shobj_description_function() {
|
||||
let func = ShobjDescriptionFunction::new();
|
||||
assert_eq!("shobj_description", func.name());
|
||||
assert_eq!(DataType::Utf8, func.return_type(&[]).unwrap());
|
||||
|
||||
let args = create_test_args(
|
||||
vec![
|
||||
ColumnarValue::Scalar(ScalarValue::Int64(Some(1))),
|
||||
ColumnarValue::Scalar(ScalarValue::Utf8(Some("pg_database".to_string()))),
|
||||
],
|
||||
1,
|
||||
);
|
||||
let result = func.invoke_with_args(args).unwrap();
|
||||
if let ColumnarValue::Array(arr) = result {
|
||||
assert_eq!(1, arr.len());
|
||||
assert!(arr.is_null(0));
|
||||
} else {
|
||||
panic!("Expected Array result");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,7 +46,6 @@ use crate::error::{
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::{DeserializedValueWithBytes, TableMetadataManagerRef};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::procedure_executor::ExecutorContext;
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::rpc::ddl::DdlTask::CreateTrigger;
|
||||
@@ -70,20 +69,16 @@ use crate::rpc::router::RegionRoute;
|
||||
|
||||
/// A configurator that customizes or enhances a [`DdlManager`].
|
||||
#[async_trait::async_trait]
|
||||
pub trait DdlManagerConfigurator: Send + Sync {
|
||||
pub trait DdlManagerConfigurator<C>: Send + Sync {
|
||||
/// Configures the given [`DdlManager`] using the provided [`DdlManagerConfigureContext`].
|
||||
async fn configure(
|
||||
&self,
|
||||
ddl_manager: DdlManager,
|
||||
ctx: DdlManagerConfigureContext,
|
||||
ctx: C,
|
||||
) -> std::result::Result<DdlManager, BoxedError>;
|
||||
}
|
||||
|
||||
pub type DdlManagerConfiguratorRef = Arc<dyn DdlManagerConfigurator>;
|
||||
|
||||
pub struct DdlManagerConfigureContext {
|
||||
pub kv_backend: KvBackendRef,
|
||||
}
|
||||
pub type DdlManagerConfiguratorRef<C> = Arc<dyn DdlManagerConfigurator<C>>;
|
||||
|
||||
pub type DdlManagerRef = Arc<DdlManager>;
|
||||
|
||||
|
||||
@@ -339,6 +339,16 @@ pub struct FlushRegions {
|
||||
pub error_strategy: FlushErrorStrategy,
|
||||
}
|
||||
|
||||
impl Display for FlushRegions {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"FlushRegions(region_ids={:?}, strategy={:?}, error_strategy={:?})",
|
||||
self.region_ids, self.strategy, self.error_strategy
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl FlushRegions {
|
||||
/// Create synchronous single-region flush
|
||||
pub fn sync_single(region_id: RegionId) -> Self {
|
||||
|
||||
@@ -246,14 +246,6 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Loader for {type_name} is not implemented: {reason}"))]
|
||||
ProcedureLoaderNotImplemented {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
type_name: String,
|
||||
reason: String,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -274,8 +266,7 @@ impl ErrorExt for Error {
|
||||
Error::ToJson { .. }
|
||||
| Error::DeleteState { .. }
|
||||
| Error::FromJson { .. }
|
||||
| Error::WaitWatcher { .. }
|
||||
| Error::ProcedureLoaderNotImplemented { .. } => StatusCode::Internal,
|
||||
| Error::WaitWatcher { .. } => StatusCode::Internal,
|
||||
|
||||
Error::RetryTimesExceeded { .. }
|
||||
| Error::RollbackTimesExceeded { .. }
|
||||
|
||||
@@ -320,4 +320,15 @@ mod tests {
|
||||
assert!(flush_reply.results[0].1.is_ok());
|
||||
assert!(flush_reply.results[1].1.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_flush_regions_display() {
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
let flush_regions = FlushRegions::sync_single(region_id);
|
||||
let display = format!("{}", flush_regions);
|
||||
assert_eq!(
|
||||
display,
|
||||
"FlushRegions(region_ids=[4398046511105(1024, 1)], strategy=Sync, error_strategy=FailFast)"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,9 +28,10 @@ mod procedure;
|
||||
mod scheduler;
|
||||
mod tracker;
|
||||
|
||||
pub(crate) use options::GcSchedulerOptions;
|
||||
pub use options::GcSchedulerOptions;
|
||||
pub use procedure::BatchGcProcedure;
|
||||
pub(crate) use scheduler::{GcScheduler, GcTickerRef};
|
||||
|
||||
pub(crate) type Region2Peers = HashMap<RegionId, (Peer, Vec<Peer>)>;
|
||||
pub type Region2Peers = HashMap<RegionId, (Peer, Vec<Peer>)>;
|
||||
|
||||
pub(crate) type Peer2Regions = HashMap<Peer, HashSet<RegionId>>;
|
||||
|
||||
@@ -84,44 +84,6 @@ impl DefaultGcSchedulerCtx {
|
||||
mailbox: MailboxRef,
|
||||
server_addr: String,
|
||||
) -> Result<Self> {
|
||||
// register a noop loader for `GcRegionProcedure` to avoid error when deserializing procedure when rebooting
|
||||
|
||||
procedure_manager
|
||||
.register_loader(
|
||||
GcRegionProcedure::TYPE_NAME,
|
||||
Box::new(move |json| {
|
||||
common_procedure::error::ProcedureLoaderNotImplementedSnafu {
|
||||
type_name: GcRegionProcedure::TYPE_NAME.to_string(),
|
||||
reason:
|
||||
"GC procedure should be retried by scheduler, not reloaded from storage"
|
||||
.to_string(),
|
||||
}
|
||||
.fail()
|
||||
}),
|
||||
)
|
||||
.context(error::RegisterProcedureLoaderSnafu {
|
||||
type_name: GcRegionProcedure::TYPE_NAME,
|
||||
})?;
|
||||
|
||||
// register a noop loader for `BatchGcProcedure` to avoid error when deserializing procedure when rebooting
|
||||
|
||||
procedure_manager
|
||||
.register_loader(
|
||||
BatchGcProcedure::TYPE_NAME,
|
||||
Box::new(move |json| {
|
||||
common_procedure::error::ProcedureLoaderNotImplementedSnafu {
|
||||
type_name: BatchGcProcedure::TYPE_NAME.to_string(),
|
||||
reason:
|
||||
"Batch GC procedure should not be reloaded from storage, as it doesn't need to be retried if interrupted"
|
||||
.to_string(),
|
||||
}
|
||||
.fail()
|
||||
}),
|
||||
)
|
||||
.context(error::RegisterProcedureLoaderSnafu {
|
||||
type_name: BatchGcProcedure::TYPE_NAME,
|
||||
})?;
|
||||
|
||||
Ok(Self {
|
||||
table_metadata_manager,
|
||||
procedure_manager,
|
||||
|
||||
@@ -28,7 +28,7 @@ use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocato
|
||||
use common_meta::ddl::{
|
||||
DdlContext, NoopRegionFailureDetectorControl, RegionFailureDetectorControllerRef,
|
||||
};
|
||||
use common_meta::ddl_manager::{DdlManager, DdlManagerConfiguratorRef, DdlManagerConfigureContext};
|
||||
use common_meta::ddl_manager::{DdlManager, DdlManagerConfiguratorRef};
|
||||
use common_meta::distributed_time_constants::{self};
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
@@ -405,10 +405,11 @@ impl MetasrvBuilder {
|
||||
|
||||
let ddl_manager = if let Some(configurator) = plugins
|
||||
.as_ref()
|
||||
.and_then(|p| p.get::<DdlManagerConfiguratorRef>())
|
||||
.and_then(|p| p.get::<DdlManagerConfiguratorRef<DdlManagerConfigureContext>>())
|
||||
{
|
||||
let ctx = DdlManagerConfigureContext {
|
||||
kv_backend: kv_backend.clone(),
|
||||
meta_peer_client: meta_peer_client.clone(),
|
||||
};
|
||||
configurator
|
||||
.configure(ddl_manager, ctx)
|
||||
@@ -637,3 +638,9 @@ impl Default for MetasrvBuilder {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// The context for [`DdlManagerConfiguratorRef`].
|
||||
pub struct DdlManagerConfigureContext {
|
||||
pub kv_backend: KvBackendRef,
|
||||
pub meta_peer_client: MetaPeerClientRef,
|
||||
}
|
||||
|
||||
@@ -13,11 +13,16 @@
|
||||
// limitations under the License.
|
||||
|
||||
pub(crate) mod repartition_start;
|
||||
pub(crate) mod update_metadata;
|
||||
|
||||
use std::any::Any;
|
||||
use std::fmt::Debug;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::DatanodeId;
|
||||
use common_meta::cache_invalidator::CacheInvalidatorRef;
|
||||
use common_meta::instruction::CacheIdent;
|
||||
use common_meta::key::datanode_table::{DatanodeTableKey, DatanodeTableValue, RegionInfo};
|
||||
use common_meta::key::table_route::TableRouteValue;
|
||||
use common_meta::key::{DeserializedValueWithBytes, TableMetadataManagerRef};
|
||||
use common_meta::rpc::router::RegionRoute;
|
||||
@@ -37,6 +42,8 @@ pub struct RepartitionGroupProcedure {}
|
||||
pub struct Context {
|
||||
pub persistent_ctx: PersistentContext,
|
||||
|
||||
pub cache_invalidator: CacheInvalidatorRef,
|
||||
|
||||
pub table_metadata_manager: TableMetadataManagerRef,
|
||||
}
|
||||
|
||||
@@ -45,6 +52,7 @@ pub struct GroupPrepareResult {
|
||||
pub source_routes: Vec<RegionRoute>,
|
||||
pub target_routes: Vec<RegionRoute>,
|
||||
pub central_region: RegionId,
|
||||
pub central_region_datanode_id: DatanodeId,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
@@ -91,6 +99,109 @@ impl Context {
|
||||
|
||||
Ok(table_route_value)
|
||||
}
|
||||
|
||||
/// Returns the `datanode_table_value`
|
||||
///
|
||||
/// Retry:
|
||||
/// - Failed to retrieve the metadata of datanode table.
|
||||
pub async fn get_datanode_table_value(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
datanode_id: u64,
|
||||
) -> Result<DatanodeTableValue> {
|
||||
let datanode_table_value = self
|
||||
.table_metadata_manager
|
||||
.datanode_table_manager()
|
||||
.get(&DatanodeTableKey {
|
||||
datanode_id,
|
||||
table_id,
|
||||
})
|
||||
.await
|
||||
.context(error::TableMetadataManagerSnafu)
|
||||
.map_err(BoxedError::new)
|
||||
.with_context(|_| error::RetryLaterWithSourceSnafu {
|
||||
reason: format!("Failed to get DatanodeTable: {table_id}"),
|
||||
})?
|
||||
.context(error::DatanodeTableNotFoundSnafu {
|
||||
table_id,
|
||||
datanode_id,
|
||||
})?;
|
||||
Ok(datanode_table_value)
|
||||
}
|
||||
|
||||
/// Broadcasts the invalidate table cache message.
|
||||
pub async fn invalidate_table_cache(&self) -> Result<()> {
|
||||
let table_id = self.persistent_ctx.table_id;
|
||||
let group_id = self.persistent_ctx.group_id;
|
||||
let subject = format!(
|
||||
"Invalidate table cache for repartition table, group: {}, table: {}",
|
||||
group_id, table_id,
|
||||
);
|
||||
let ctx = common_meta::cache_invalidator::Context {
|
||||
subject: Some(subject),
|
||||
};
|
||||
let _ = self
|
||||
.cache_invalidator
|
||||
.invalidate(&ctx, &[CacheIdent::TableId(table_id)])
|
||||
.await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Updates the table route.
|
||||
///
|
||||
/// Retry:
|
||||
/// - Failed to retrieve the metadata of datanode table.
|
||||
///
|
||||
/// Abort:
|
||||
/// - Table route not found.
|
||||
/// - Failed to update the table route.
|
||||
pub async fn update_table_route(
|
||||
&self,
|
||||
current_table_route_value: &DeserializedValueWithBytes<TableRouteValue>,
|
||||
new_region_routes: Vec<RegionRoute>,
|
||||
) -> Result<()> {
|
||||
let table_id = self.persistent_ctx.table_id;
|
||||
// Safety: prepare result is set in [RepartitionStart] state.
|
||||
let prepare_result = self.persistent_ctx.group_prepare_result.as_ref().unwrap();
|
||||
let central_region_datanode_table_value = self
|
||||
.get_datanode_table_value(table_id, prepare_result.central_region_datanode_id)
|
||||
.await?;
|
||||
let RegionInfo {
|
||||
region_options,
|
||||
region_wal_options,
|
||||
..
|
||||
} = ¢ral_region_datanode_table_value.region_info;
|
||||
|
||||
self.table_metadata_manager
|
||||
.update_table_route(
|
||||
table_id,
|
||||
central_region_datanode_table_value.region_info.clone(),
|
||||
current_table_route_value,
|
||||
new_region_routes,
|
||||
region_options,
|
||||
region_wal_options,
|
||||
)
|
||||
.await
|
||||
.context(error::TableMetadataManagerSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the region routes of the given table route value.
|
||||
///
|
||||
/// Abort:
|
||||
/// - Table route value is not physical.
|
||||
pub fn region_routes(
|
||||
table_id: TableId,
|
||||
table_route_value: &TableRouteValue,
|
||||
) -> Result<&Vec<RegionRoute>> {
|
||||
table_route_value
|
||||
.region_routes()
|
||||
.with_context(|_| error::UnexpectedLogicalRouteTableSnafu {
|
||||
err_msg: format!(
|
||||
"TableRoute({:?}) is a non-physical TableRouteValue.",
|
||||
table_id
|
||||
),
|
||||
})
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -151,4 +262,23 @@ mod tests {
|
||||
let err = ctx.get_table_route_value().await.unwrap_err();
|
||||
assert!(err.is_retryable());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_datanode_table_value_retry_error() {
|
||||
let kv = MockKvBackendBuilder::default()
|
||||
.range_fn(Arc::new(|_| {
|
||||
common_meta::error::UnexpectedSnafu {
|
||||
err_msg: "mock err",
|
||||
}
|
||||
.fail()
|
||||
}))
|
||||
.build()
|
||||
.unwrap();
|
||||
let mut env = TestingEnv::new();
|
||||
env.table_metadata_manager = Arc::new(TableMetadataManager::new(Arc::new(kv)));
|
||||
let persistent_context = new_persistent_context(1024, vec![], vec![]);
|
||||
let ctx = env.create_context(persistent_context);
|
||||
let err = ctx.get_datanode_table_value(1024, 1).await.unwrap_err();
|
||||
assert!(err.is_retryable());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,9 @@ use serde::{Deserialize, Serialize};
|
||||
use snafu::{OptionExt, ResultExt, ensure};
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::procedure::repartition::group::{Context, GroupId, GroupPrepareResult, State};
|
||||
use crate::procedure::repartition::group::{
|
||||
Context, GroupId, GroupPrepareResult, State, region_routes,
|
||||
};
|
||||
use crate::procedure::repartition::plan::RegionDescriptor;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
@@ -67,7 +69,6 @@ impl RepartitionStart {
|
||||
}
|
||||
);
|
||||
|
||||
let central_region = sources[0].region_id;
|
||||
let region_routes_map = region_routes
|
||||
.iter()
|
||||
.map(|r| (r.region.id, r))
|
||||
@@ -93,14 +94,26 @@ impl RepartitionStart {
|
||||
group_id,
|
||||
region_id: t.region_id,
|
||||
})
|
||||
.and_then(|r| ensure_region_route_expr_match(r, t))
|
||||
.map(|r| (*r).clone())
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
let central_region = sources[0].region_id;
|
||||
let central_region_datanode_id = source_region_routes[0]
|
||||
.leader_peer
|
||||
.as_ref()
|
||||
.context(error::UnexpectedSnafu {
|
||||
violated: format!(
|
||||
"Leader peer is not set for central region: {}",
|
||||
central_region
|
||||
),
|
||||
})?
|
||||
.id;
|
||||
|
||||
Ok(GroupPrepareResult {
|
||||
source_routes: source_region_routes,
|
||||
target_routes: target_region_routes,
|
||||
central_region,
|
||||
central_region_datanode_id,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -135,14 +148,7 @@ impl State for RepartitionStart {
|
||||
let table_id = ctx.persistent_ctx.table_id;
|
||||
let group_id = ctx.persistent_ctx.group_id;
|
||||
let table_route_value = ctx.get_table_route_value().await?.into_inner();
|
||||
let region_routes = table_route_value.region_routes().with_context(|_| {
|
||||
error::UnexpectedLogicalRouteTableSnafu {
|
||||
err_msg: format!(
|
||||
"TableRoute({:?}) is a non-physical TableRouteValue.",
|
||||
table_id
|
||||
),
|
||||
}
|
||||
})?;
|
||||
let region_routes = region_routes(table_id, &table_route_value)?;
|
||||
let group_prepare_result = Self::ensure_route_present(
|
||||
group_id,
|
||||
region_routes,
|
||||
@@ -234,43 +240,6 @@ mod tests {
|
||||
)
|
||||
.unwrap_err();
|
||||
assert_matches!(err, Error::PartitionExprMismatch { .. });
|
||||
|
||||
let source_region = RegionDescriptor {
|
||||
region_id: RegionId::new(1024, 1),
|
||||
partition_expr: range_expr("x", 0, 100),
|
||||
};
|
||||
let target_region = RegionDescriptor {
|
||||
region_id: RegionId::new(1024, 2),
|
||||
partition_expr: range_expr("x", 0, 10),
|
||||
};
|
||||
let region_routes = vec![
|
||||
RegionRoute {
|
||||
region: Region {
|
||||
id: RegionId::new(1024, 1),
|
||||
partition_expr: range_expr("x", 0, 100).as_json_str().unwrap(),
|
||||
..Default::default()
|
||||
},
|
||||
leader_peer: Some(Peer::empty(1)),
|
||||
..Default::default()
|
||||
},
|
||||
RegionRoute {
|
||||
region: Region {
|
||||
id: RegionId::new(1024, 2),
|
||||
partition_expr: range_expr("x", 0, 5).as_json_str().unwrap(),
|
||||
..Default::default()
|
||||
},
|
||||
leader_peer: Some(Peer::empty(1)),
|
||||
..Default::default()
|
||||
},
|
||||
];
|
||||
let err = RepartitionStart::ensure_route_present(
|
||||
Uuid::new_v4(),
|
||||
®ion_routes,
|
||||
&[source_region],
|
||||
&[target_region],
|
||||
)
|
||||
.unwrap_err();
|
||||
assert_matches!(err, Error::PartitionExprMismatch { .. });
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -0,0 +1,80 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub(crate) mod apply_staging_region;
|
||||
pub(crate) mod rollback_staging_region;
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use common_procedure::{Context as ProcedureContext, Status};
|
||||
use common_telemetry::warn;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::procedure::repartition::group::repartition_start::RepartitionStart;
|
||||
use crate::procedure::repartition::group::{Context, State};
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub enum UpdateMetadata {
|
||||
/// Applies the new partition expressions for staging regions.
|
||||
ApplyStaging,
|
||||
/// Rolls back the new partition expressions for staging regions.
|
||||
RollbackStaging,
|
||||
}
|
||||
|
||||
impl UpdateMetadata {
|
||||
#[allow(dead_code)]
|
||||
fn next_state() -> (Box<dyn State>, Status) {
|
||||
// TODO(weny): change it later.
|
||||
(Box::new(RepartitionStart), Status::executing(true))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
#[typetag::serde]
|
||||
impl State for UpdateMetadata {
|
||||
async fn next(
|
||||
&mut self,
|
||||
ctx: &mut Context,
|
||||
_procedure_ctx: &ProcedureContext,
|
||||
) -> Result<(Box<dyn State>, Status)> {
|
||||
match self {
|
||||
UpdateMetadata::ApplyStaging => {
|
||||
// TODO(weny): If all metadata have already been updated, skip applying staging regions.
|
||||
self.apply_staging_regions(ctx).await?;
|
||||
|
||||
if let Err(err) = ctx.invalidate_table_cache().await {
|
||||
warn!(
|
||||
"Failed to broadcast the invalidate table cache message during the apply staging regions, error: {err:?}"
|
||||
);
|
||||
};
|
||||
Ok(Self::next_state())
|
||||
}
|
||||
UpdateMetadata::RollbackStaging => {
|
||||
self.rollback_staging_regions(ctx).await?;
|
||||
|
||||
if let Err(err) = ctx.invalidate_table_cache().await {
|
||||
warn!(
|
||||
"Failed to broadcast the invalidate table cache message during the rollback staging regions, error: {err:?}"
|
||||
);
|
||||
};
|
||||
Ok(Self::next_state())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,181 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::rpc::router::RegionRoute;
|
||||
use common_telemetry::error;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::procedure::repartition::group::update_metadata::UpdateMetadata;
|
||||
use crate::procedure::repartition::group::{Context, GroupId, region_routes};
|
||||
use crate::procedure::repartition::plan::RegionDescriptor;
|
||||
|
||||
impl UpdateMetadata {
|
||||
/// Applies the new partition expressions for staging regions.
|
||||
///
|
||||
/// Abort:
|
||||
/// - Target region not found.
|
||||
/// - Source region not found.
|
||||
fn apply_staging_region_routes(
|
||||
group_id: GroupId,
|
||||
sources: &[RegionDescriptor],
|
||||
targets: &[RegionDescriptor],
|
||||
current_region_routes: &[RegionRoute],
|
||||
) -> Result<Vec<RegionRoute>> {
|
||||
let mut region_routes = current_region_routes.to_vec();
|
||||
let mut region_routes_map = region_routes
|
||||
.iter_mut()
|
||||
.map(|route| (route.region.id, route))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
for target in targets {
|
||||
let region_route = region_routes_map.get_mut(&target.region_id).context(
|
||||
error::RepartitionTargetRegionMissingSnafu {
|
||||
group_id,
|
||||
region_id: target.region_id,
|
||||
},
|
||||
)?;
|
||||
region_route.region.partition_expr = target
|
||||
.partition_expr
|
||||
.as_json_str()
|
||||
.context(error::SerializePartitionExprSnafu)?;
|
||||
region_route.set_leader_staging();
|
||||
}
|
||||
|
||||
for source in sources {
|
||||
let region_route = region_routes_map.get_mut(&source.region_id).context(
|
||||
error::RepartitionSourceRegionMissingSnafu {
|
||||
group_id,
|
||||
region_id: source.region_id,
|
||||
},
|
||||
)?;
|
||||
region_route.set_leader_staging();
|
||||
}
|
||||
|
||||
Ok(region_routes)
|
||||
}
|
||||
|
||||
/// Applies the new partition expressions for staging regions.
|
||||
///
|
||||
/// Abort:
|
||||
/// - Table route is not physical.
|
||||
/// - Target region not found.
|
||||
/// - Source region not found.
|
||||
/// - Failed to update the table route.
|
||||
/// - Central region datanode table value not found.
|
||||
#[allow(dead_code)]
|
||||
pub(crate) async fn apply_staging_regions(&self, ctx: &mut Context) -> Result<()> {
|
||||
let table_id = ctx.persistent_ctx.table_id;
|
||||
let group_id = ctx.persistent_ctx.group_id;
|
||||
let current_table_route_value = ctx.get_table_route_value().await?;
|
||||
let region_routes = region_routes(table_id, current_table_route_value.get_inner_ref())?;
|
||||
let new_region_routes = Self::apply_staging_region_routes(
|
||||
group_id,
|
||||
&ctx.persistent_ctx.sources,
|
||||
&ctx.persistent_ctx.targets,
|
||||
region_routes,
|
||||
)?;
|
||||
|
||||
if let Err(err) = ctx
|
||||
.update_table_route(¤t_table_route_value, new_region_routes)
|
||||
.await
|
||||
{
|
||||
error!(err; "Failed to update the table route during the updating metadata for repartition: {table_id}, group_id: {group_id}");
|
||||
return Err(BoxedError::new(err)).context(error::RetryLaterWithSourceSnafu {
|
||||
reason: format!(
|
||||
"Failed to update the table route during the updating metadata for repartition: {table_id}, group_id: {group_id}"
|
||||
),
|
||||
});
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::rpc::router::{Region, RegionRoute};
|
||||
use store_api::storage::RegionId;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::procedure::repartition::group::update_metadata::UpdateMetadata;
|
||||
use crate::procedure::repartition::plan::RegionDescriptor;
|
||||
use crate::procedure::repartition::test_util::range_expr;
|
||||
|
||||
#[test]
|
||||
fn test_generate_region_routes() {
|
||||
let group_id = Uuid::new_v4();
|
||||
let table_id = 1024;
|
||||
let region_routes = vec![
|
||||
RegionRoute {
|
||||
region: Region {
|
||||
id: RegionId::new(table_id, 1),
|
||||
partition_expr: range_expr("x", 0, 100).as_json_str().unwrap(),
|
||||
..Default::default()
|
||||
},
|
||||
leader_peer: Some(Peer::empty(1)),
|
||||
..Default::default()
|
||||
},
|
||||
RegionRoute {
|
||||
region: Region {
|
||||
id: RegionId::new(table_id, 2),
|
||||
partition_expr: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
leader_peer: Some(Peer::empty(1)),
|
||||
..Default::default()
|
||||
},
|
||||
RegionRoute {
|
||||
region: Region {
|
||||
id: RegionId::new(table_id, 3),
|
||||
partition_expr: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
leader_peer: Some(Peer::empty(1)),
|
||||
..Default::default()
|
||||
},
|
||||
];
|
||||
let source_region = RegionDescriptor {
|
||||
region_id: RegionId::new(table_id, 1),
|
||||
partition_expr: range_expr("x", 0, 100),
|
||||
};
|
||||
let target_region = RegionDescriptor {
|
||||
region_id: RegionId::new(table_id, 2),
|
||||
partition_expr: range_expr("x", 0, 10),
|
||||
};
|
||||
|
||||
let new_region_routes = UpdateMetadata::apply_staging_region_routes(
|
||||
group_id,
|
||||
&[source_region],
|
||||
&[target_region],
|
||||
®ion_routes,
|
||||
)
|
||||
.unwrap();
|
||||
assert!(new_region_routes[0].is_leader_staging());
|
||||
assert_eq!(
|
||||
new_region_routes[0].region.partition_expr,
|
||||
range_expr("x", 0, 100).as_json_str().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
new_region_routes[1].region.partition_expr,
|
||||
range_expr("x", 0, 10).as_json_str().unwrap()
|
||||
);
|
||||
assert!(new_region_routes[1].is_leader_staging());
|
||||
assert!(!new_region_routes[2].is_leader_staging());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,187 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::rpc::router::RegionRoute;
|
||||
use common_telemetry::error;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::procedure::repartition::group::update_metadata::UpdateMetadata;
|
||||
use crate::procedure::repartition::group::{Context, GroupId, region_routes};
|
||||
|
||||
impl UpdateMetadata {
|
||||
/// Rolls back the staging regions.
|
||||
///
|
||||
/// Abort:
|
||||
/// - Source region not found.
|
||||
/// - Target region not found.
|
||||
#[allow(dead_code)]
|
||||
fn rollback_staging_region_routes(
|
||||
group_id: GroupId,
|
||||
source_routes: &[RegionRoute],
|
||||
target_routes: &[RegionRoute],
|
||||
current_region_routes: &[RegionRoute],
|
||||
) -> Result<Vec<RegionRoute>> {
|
||||
let mut region_routes = current_region_routes.to_vec();
|
||||
let mut region_routes_map = region_routes
|
||||
.iter_mut()
|
||||
.map(|route| (route.region.id, route))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
for source in source_routes {
|
||||
let region_route = region_routes_map.get_mut(&source.region.id).context(
|
||||
error::RepartitionSourceRegionMissingSnafu {
|
||||
group_id,
|
||||
region_id: source.region.id,
|
||||
},
|
||||
)?;
|
||||
region_route.region.partition_expr = source.region.partition_expr.clone();
|
||||
region_route.clear_leader_staging();
|
||||
}
|
||||
|
||||
for target in target_routes {
|
||||
let region_route = region_routes_map.get_mut(&target.region.id).context(
|
||||
error::RepartitionTargetRegionMissingSnafu {
|
||||
group_id,
|
||||
region_id: target.region.id,
|
||||
},
|
||||
)?;
|
||||
region_route.clear_leader_staging();
|
||||
}
|
||||
|
||||
Ok(region_routes)
|
||||
}
|
||||
|
||||
/// Rolls back the metadata for staging regions.
|
||||
///
|
||||
/// Abort:
|
||||
/// - Table route is not physical.
|
||||
/// - Source region not found.
|
||||
/// - Target region not found.
|
||||
/// - Failed to update the table route.
|
||||
/// - Central region datanode table value not found.
|
||||
#[allow(dead_code)]
|
||||
pub(crate) async fn rollback_staging_regions(&self, ctx: &mut Context) -> Result<()> {
|
||||
let table_id = ctx.persistent_ctx.table_id;
|
||||
let group_id = ctx.persistent_ctx.group_id;
|
||||
let current_table_route_value = ctx.get_table_route_value().await?;
|
||||
let region_routes = region_routes(table_id, current_table_route_value.get_inner_ref())?;
|
||||
// Safety: prepare result is set in [RepartitionStart] state.
|
||||
let prepare_result = ctx.persistent_ctx.group_prepare_result.as_ref().unwrap();
|
||||
let new_region_routes = Self::rollback_staging_region_routes(
|
||||
group_id,
|
||||
&prepare_result.source_routes,
|
||||
&prepare_result.target_routes,
|
||||
region_routes,
|
||||
)?;
|
||||
|
||||
if let Err(err) = ctx
|
||||
.update_table_route(¤t_table_route_value, new_region_routes)
|
||||
.await
|
||||
{
|
||||
error!(err; "Failed to update the table route during the updating metadata for repartition: {table_id}, group_id: {group_id}");
|
||||
return Err(BoxedError::new(err)).context(error::RetryLaterWithSourceSnafu {
|
||||
reason: format!(
|
||||
"Failed to update the table route during the updating metadata for repartition: {table_id}, group_id: {group_id}"
|
||||
),
|
||||
});
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::rpc::router::{LeaderState, Region, RegionRoute};
|
||||
use store_api::storage::RegionId;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::procedure::repartition::group::update_metadata::UpdateMetadata;
|
||||
use crate::procedure::repartition::test_util::range_expr;
|
||||
|
||||
#[test]
|
||||
fn test_rollback_staging_region_routes() {
|
||||
let group_id = Uuid::new_v4();
|
||||
let table_id = 1024;
|
||||
let region_routes = vec![
|
||||
RegionRoute {
|
||||
region: Region {
|
||||
id: RegionId::new(table_id, 1),
|
||||
partition_expr: range_expr("x", 0, 100).as_json_str().unwrap(),
|
||||
..Default::default()
|
||||
},
|
||||
leader_peer: Some(Peer::empty(1)),
|
||||
leader_state: Some(LeaderState::Staging),
|
||||
..Default::default()
|
||||
},
|
||||
RegionRoute {
|
||||
region: Region {
|
||||
id: RegionId::new(table_id, 2),
|
||||
partition_expr: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
leader_peer: Some(Peer::empty(1)),
|
||||
leader_state: Some(LeaderState::Staging),
|
||||
..Default::default()
|
||||
},
|
||||
RegionRoute {
|
||||
region: Region {
|
||||
id: RegionId::new(table_id, 3),
|
||||
partition_expr: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
leader_peer: Some(Peer::empty(1)),
|
||||
leader_state: Some(LeaderState::Downgrading),
|
||||
..Default::default()
|
||||
},
|
||||
];
|
||||
let source_routes = vec![RegionRoute {
|
||||
region: Region {
|
||||
id: RegionId::new(table_id, 1),
|
||||
partition_expr: range_expr("x", 0, 20).as_json_str().unwrap(),
|
||||
..Default::default()
|
||||
},
|
||||
leader_peer: Some(Peer::empty(1)),
|
||||
..Default::default()
|
||||
}];
|
||||
let target_routes = vec![RegionRoute {
|
||||
region: Region {
|
||||
id: RegionId::new(table_id, 2),
|
||||
partition_expr: range_expr("x", 0, 20).as_json_str().unwrap(),
|
||||
..Default::default()
|
||||
},
|
||||
leader_peer: Some(Peer::empty(1)),
|
||||
..Default::default()
|
||||
}];
|
||||
let new_region_routes = UpdateMetadata::rollback_staging_region_routes(
|
||||
group_id,
|
||||
&source_routes,
|
||||
&target_routes,
|
||||
®ion_routes,
|
||||
)
|
||||
.unwrap();
|
||||
assert!(!new_region_routes[0].is_leader_staging());
|
||||
assert_eq!(
|
||||
new_region_routes[0].region.partition_expr,
|
||||
range_expr("x", 0, 20).as_json_str().unwrap(),
|
||||
);
|
||||
assert!(!new_region_routes[1].is_leader_staging());
|
||||
assert!(new_region_routes[2].is_leader_downgrading());
|
||||
}
|
||||
}
|
||||
@@ -21,6 +21,6 @@ use store_api::storage::RegionId;
|
||||
pub struct RegionDescriptor {
|
||||
/// The region id of the region involved in the plan.
|
||||
pub region_id: RegionId,
|
||||
/// The partition expression of the region.
|
||||
/// The new partition expression of the region.
|
||||
pub partition_expr: PartitionExpr,
|
||||
}
|
||||
|
||||
@@ -16,17 +16,22 @@ use std::sync::Arc;
|
||||
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use common_meta::sequence::SequenceBuilder;
|
||||
use datatypes::value::Value;
|
||||
use partition::expr::{PartitionExpr, col};
|
||||
use store_api::storage::TableId;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::cache_invalidator::MetasrvCacheInvalidator;
|
||||
use crate::metasrv::MetasrvInfo;
|
||||
use crate::procedure::repartition::group::{Context, PersistentContext};
|
||||
use crate::procedure::repartition::plan::RegionDescriptor;
|
||||
use crate::procedure::test_util::MailboxContext;
|
||||
|
||||
/// `TestingEnv` provides components during the tests.
|
||||
pub struct TestingEnv {
|
||||
pub table_metadata_manager: TableMetadataManagerRef,
|
||||
pub mailbox_ctx: MailboxContext,
|
||||
}
|
||||
|
||||
impl Default for TestingEnv {
|
||||
@@ -39,16 +44,28 @@ impl TestingEnv {
|
||||
pub fn new() -> Self {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new());
|
||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend.clone()));
|
||||
let mailbox_sequence =
|
||||
SequenceBuilder::new("test_heartbeat_mailbox", kv_backend.clone()).build();
|
||||
let mailbox_ctx = MailboxContext::new(mailbox_sequence);
|
||||
|
||||
Self {
|
||||
table_metadata_manager,
|
||||
mailbox_ctx,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_context(self, persistent_context: PersistentContext) -> Context {
|
||||
let cache_invalidator = Arc::new(MetasrvCacheInvalidator::new(
|
||||
self.mailbox_ctx.mailbox().clone(),
|
||||
MetasrvInfo {
|
||||
server_addr: String::new(),
|
||||
},
|
||||
));
|
||||
|
||||
Context {
|
||||
persistent_ctx: persistent_context,
|
||||
table_metadata_manager: self.table_metadata_manager.clone(),
|
||||
cache_invalidator,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::procedure_executor::ProcedureExecutorRef;
|
||||
use common_query::Output;
|
||||
use common_telemetry::{debug, tracing};
|
||||
use common_telemetry::{debug, tracing, warn};
|
||||
use common_time::Timestamp;
|
||||
use common_time::range::TimestampRange;
|
||||
use datafusion_expr::LogicalPlan;
|
||||
@@ -488,6 +488,11 @@ impl StatementExecutor {
|
||||
"@@SESSION.MAX_EXECUTION_TIME" | "MAX_EXECUTION_TIME" => match query_ctx.channel() {
|
||||
Channel::Mysql => set_query_timeout(set_var.value, query_ctx)?,
|
||||
Channel::Postgres => {
|
||||
warn!(
|
||||
"Unsupported set variable {} for channel {:?}",
|
||||
var_name,
|
||||
query_ctx.channel()
|
||||
);
|
||||
query_ctx.set_warning(format!("Unsupported set variable {}", var_name))
|
||||
}
|
||||
_ => {
|
||||
@@ -497,16 +502,23 @@ impl StatementExecutor {
|
||||
.fail();
|
||||
}
|
||||
},
|
||||
"STATEMENT_TIMEOUT" => {
|
||||
if query_ctx.channel() == Channel::Postgres {
|
||||
set_query_timeout(set_var.value, query_ctx)?
|
||||
} else {
|
||||
"STATEMENT_TIMEOUT" => match query_ctx.channel() {
|
||||
Channel::Postgres => set_query_timeout(set_var.value, query_ctx)?,
|
||||
Channel::Mysql => {
|
||||
warn!(
|
||||
"Unsupported set variable {} for channel {:?}",
|
||||
var_name,
|
||||
query_ctx.channel()
|
||||
);
|
||||
query_ctx.set_warning(format!("Unsupported set variable {}", var_name));
|
||||
}
|
||||
_ => {
|
||||
return NotSupportedSnafu {
|
||||
feat: format!("Unsupported set variable {}", var_name),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
}
|
||||
},
|
||||
"SEARCH_PATH" => {
|
||||
if query_ctx.channel() == Channel::Postgres {
|
||||
set_search_path(set_var.value, query_ctx)?
|
||||
@@ -518,14 +530,16 @@ impl StatementExecutor {
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
// for postgres, we give unknown SET statements a warning with
|
||||
// success, this is prevent the SET call becoming a blocker
|
||||
// of connection establishment
|
||||
//
|
||||
if query_ctx.channel() == Channel::Postgres {
|
||||
query_ctx.set_warning(format!("Unsupported set variable {}", var_name));
|
||||
} else if query_ctx.channel() == Channel::Mysql && var_name.starts_with("@@") {
|
||||
// Just ignore `SET @@` commands for MySQL
|
||||
if query_ctx.channel() == Channel::Postgres || query_ctx.channel() == Channel::Mysql
|
||||
{
|
||||
// For unknown SET statements, we give a warning with success.
|
||||
// This prevents the SET call from becoming a blocker of MySQL/Postgres clients'
|
||||
// connection establishment.
|
||||
warn!(
|
||||
"Unsupported set variable {} for channel {:?}",
|
||||
var_name,
|
||||
query_ctx.channel()
|
||||
);
|
||||
query_ctx.set_warning(format!("Unsupported set variable {}", var_name));
|
||||
} else {
|
||||
return NotSupportedSnafu {
|
||||
|
||||
@@ -9,6 +9,7 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
auth.workspace = true
|
||||
catalog.workspace = true
|
||||
clap.workspace = true
|
||||
cli.workspace = true
|
||||
common-base.workspace = true
|
||||
@@ -17,6 +18,7 @@ common-meta.workspace = true
|
||||
datanode.workspace = true
|
||||
flow.workspace = true
|
||||
frontend.workspace = true
|
||||
meta-client.workspace = true
|
||||
meta-srv.workspace = true
|
||||
serde.workspace = true
|
||||
snafu.workspace = true
|
||||
|
||||
@@ -30,3 +30,20 @@ pub async fn setup_flownode_plugins(
|
||||
pub async fn start_flownode_plugins(_plugins: Plugins) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub mod context {
|
||||
use std::sync::Arc;
|
||||
|
||||
use catalog::CatalogManagerRef;
|
||||
use common_meta::FlownodeId;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use flow::FrontendClient;
|
||||
|
||||
/// The context for `GrpcBuilderConfiguratorRef` in flownode.
|
||||
pub struct GrpcConfigureContext {
|
||||
pub kv_backend: KvBackendRef,
|
||||
pub fe_client: Arc<FrontendClient>,
|
||||
pub flownode_id: FlownodeId,
|
||||
pub catalog_manager: CatalogManagerRef,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,3 +40,25 @@ pub async fn setup_frontend_plugins(
|
||||
pub async fn start_frontend_plugins(_plugins: Plugins) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub mod context {
|
||||
use std::sync::Arc;
|
||||
|
||||
use flow::FrontendClient;
|
||||
use meta_client::MetaClientRef;
|
||||
|
||||
/// The context for [`catalog::kvbackend::CatalogManagerConfiguratorRef`] in standalone or
|
||||
/// distributed.
|
||||
pub enum CatalogManagerConfigureContext {
|
||||
Distributed(DistributedCatalogManagerConfigureContext),
|
||||
Standalone(StandaloneCatalogManagerConfigureContext),
|
||||
}
|
||||
|
||||
pub struct DistributedCatalogManagerConfigureContext {
|
||||
pub meta_client: MetaClientRef,
|
||||
}
|
||||
|
||||
pub struct StandaloneCatalogManagerConfigureContext {
|
||||
pub fe_client: Arc<FrontendClient>,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,12 +13,12 @@
|
||||
// limitations under the License.
|
||||
|
||||
mod cli;
|
||||
mod datanode;
|
||||
mod flownode;
|
||||
mod frontend;
|
||||
pub mod datanode;
|
||||
pub mod flownode;
|
||||
pub mod frontend;
|
||||
mod meta_srv;
|
||||
mod options;
|
||||
mod standalone;
|
||||
pub mod standalone;
|
||||
|
||||
pub use cli::SubCommand;
|
||||
pub use datanode::{setup_datanode_plugins, start_datanode_plugins};
|
||||
|
||||
@@ -33,3 +33,18 @@ pub async fn setup_standalone_plugins(
|
||||
pub async fn start_standalone_plugins(_plugins: Plugins) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub mod context {
|
||||
use std::sync::Arc;
|
||||
|
||||
use catalog::CatalogManagerRef;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use flow::FrontendClient;
|
||||
|
||||
/// The context for [`common_meta::ddl_manager::DdlManagerConfiguratorRef`] in standalone.
|
||||
pub struct DdlManagerConfigureContext {
|
||||
pub kv_backend: KvBackendRef,
|
||||
pub fe_client: Arc<FrontendClient>,
|
||||
pub catalog_manager: CatalogManagerRef,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
mod show_create_table;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::ops::ControlFlow;
|
||||
use std::sync::Arc;
|
||||
|
||||
use catalog::CatalogManagerRef;
|
||||
@@ -52,7 +53,7 @@ use regex::Regex;
|
||||
use session::context::{Channel, QueryContextRef};
|
||||
pub use show_create_table::create_table_stmt;
|
||||
use snafu::{OptionExt, ResultExt, ensure};
|
||||
use sql::ast::Ident;
|
||||
use sql::ast::{Ident, visit_expressions_mut};
|
||||
use sql::parser::ParserContext;
|
||||
use sql::statements::OptionMap;
|
||||
use sql::statements::create::{CreateDatabase, CreateFlow, CreateView, Partitions, SqlOrTql};
|
||||
@@ -73,7 +74,6 @@ use crate::planner::DfLogicalPlanner;
|
||||
|
||||
const SCHEMAS_COLUMN: &str = "Database";
|
||||
const OPTIONS_COLUMN: &str = "Options";
|
||||
const TABLES_COLUMN: &str = "Tables";
|
||||
const VIEWS_COLUMN: &str = "Views";
|
||||
const FLOWS_COLUMN: &str = "Flows";
|
||||
const FIELD_COLUMN: &str = "Field";
|
||||
@@ -210,6 +210,29 @@ pub async fn show_databases(
|
||||
.await
|
||||
}
|
||||
|
||||
/// Replaces column identifier references in a SQL expression.
|
||||
/// Used for backward compatibility where old column names should work with new ones.
|
||||
fn replace_column_in_expr(expr: &mut sqlparser::ast::Expr, from_column: &str, to_column: &str) {
|
||||
let _ = visit_expressions_mut(expr, |e| {
|
||||
match e {
|
||||
sqlparser::ast::Expr::Identifier(ident) => {
|
||||
if ident.value.eq_ignore_ascii_case(from_column) {
|
||||
ident.value = to_column.to_string();
|
||||
}
|
||||
}
|
||||
sqlparser::ast::Expr::CompoundIdentifier(idents) => {
|
||||
if let Some(last) = idents.last_mut()
|
||||
&& last.value.eq_ignore_ascii_case(from_column)
|
||||
{
|
||||
last.value = to_column.to_string();
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
ControlFlow::<()>::Continue(())
|
||||
});
|
||||
}
|
||||
|
||||
/// Cast a `show` statement execution into a query from tables in `information_schema`.
|
||||
/// - `table_name`: the table name in `information_schema`,
|
||||
/// - `projects`: query projection, a list of `(column, renamed_column)`,
|
||||
@@ -540,15 +563,15 @@ pub async fn show_tables(
|
||||
query_ctx.current_schema()
|
||||
};
|
||||
|
||||
// (dennis): MySQL rename `table_name` to `Tables_in_{schema}`, but we use `Tables` instead.
|
||||
// I don't want to modify this currently, our dashboard may depend on it.
|
||||
// MySQL renames `table_name` to `Tables_in_{schema}` for protocol compatibility
|
||||
let tables_column = format!("Tables_in_{}", schema_name);
|
||||
let projects = if stmt.full {
|
||||
vec![
|
||||
(tables::TABLE_NAME, TABLES_COLUMN),
|
||||
(tables::TABLE_NAME, tables_column.as_str()),
|
||||
(tables::TABLE_TYPE, TABLE_TYPE_COLUMN),
|
||||
]
|
||||
} else {
|
||||
vec![(tables::TABLE_NAME, TABLES_COLUMN)]
|
||||
vec![(tables::TABLE_NAME, tables_column.as_str())]
|
||||
};
|
||||
let filters = vec![
|
||||
col(tables::TABLE_SCHEMA).eq(lit(schema_name.clone())),
|
||||
@@ -557,6 +580,16 @@ pub async fn show_tables(
|
||||
let like_field = Some(tables::TABLE_NAME);
|
||||
let sort = vec![col(tables::TABLE_NAME).sort(true, true)];
|
||||
|
||||
// Transform the WHERE clause for backward compatibility:
|
||||
// Replace "Tables" with "Tables_in_{schema}" to support old queries
|
||||
let kind = match stmt.kind {
|
||||
ShowKind::Where(mut filter) => {
|
||||
replace_column_in_expr(&mut filter, "Tables", &tables_column);
|
||||
ShowKind::Where(filter)
|
||||
}
|
||||
other => other,
|
||||
};
|
||||
|
||||
query_from_information_schema_table(
|
||||
query_engine,
|
||||
catalog_manager,
|
||||
@@ -567,7 +600,7 @@ pub async fn show_tables(
|
||||
filters,
|
||||
like_field,
|
||||
sort,
|
||||
stmt.kind,
|
||||
kind,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -37,6 +37,8 @@ static SHOW_LOWER_CASE_PATTERN: Lazy<Regex> =
|
||||
Lazy::new(|| Regex::new("(?i)^(SHOW VARIABLES LIKE 'lower_case_table_names'(.*))").unwrap());
|
||||
static SHOW_VARIABLES_LIKE_PATTERN: Lazy<Regex> =
|
||||
Lazy::new(|| Regex::new("(?i)^(SHOW VARIABLES( LIKE (.*))?)").unwrap());
|
||||
static SHOW_WARNINGS_PATTERN: Lazy<Regex> =
|
||||
Lazy::new(|| Regex::new("(?i)^(/\\* ApplicationName=.*)?SHOW WARNINGS").unwrap());
|
||||
|
||||
// SELECT TIMEDIFF(NOW(), UTC_TIMESTAMP());
|
||||
static SELECT_TIME_DIFF_FUNC_PATTERN: Lazy<Regex> =
|
||||
@@ -85,8 +87,6 @@ static OTHER_NOT_SUPPORTED_STMT: Lazy<RegexSet> = Lazy::new(|| {
|
||||
"(?i)^(/\\*!40101 SET(.*) \\*/)$",
|
||||
|
||||
// DBeaver.
|
||||
"(?i)^(SHOW WARNINGS)",
|
||||
"(?i)^(/\\* ApplicationName=(.*)SHOW WARNINGS)",
|
||||
"(?i)^(/\\* ApplicationName=(.*)SHOW PLUGINS)",
|
||||
"(?i)^(/\\* ApplicationName=(.*)SHOW ENGINES)",
|
||||
"(?i)^(/\\* ApplicationName=(.*)SELECT @@(.*))",
|
||||
@@ -252,6 +252,47 @@ fn check_show_variables(query: &str) -> Option<Output> {
|
||||
recordbatches.map(Output::new_with_record_batches)
|
||||
}
|
||||
|
||||
/// Build SHOW WARNINGS result from session's warnings
|
||||
fn show_warnings(session: &SessionRef) -> RecordBatches {
|
||||
let schema = Arc::new(Schema::new(vec![
|
||||
ColumnSchema::new("Level", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("Code", ConcreteDataType::uint16_datatype(), false),
|
||||
ColumnSchema::new("Message", ConcreteDataType::string_datatype(), false),
|
||||
]));
|
||||
|
||||
let warnings = session.warnings();
|
||||
let count = warnings.len();
|
||||
|
||||
let columns = if count > 0 {
|
||||
vec![
|
||||
Arc::new(StringVector::from(vec!["Warning"; count])) as _,
|
||||
Arc::new(datatypes::vectors::UInt16Vector::from(vec![
|
||||
Some(1000u16);
|
||||
count
|
||||
])) as _,
|
||||
Arc::new(StringVector::from(warnings)) as _,
|
||||
]
|
||||
} else {
|
||||
vec![
|
||||
Arc::new(StringVector::from(Vec::<String>::new())) as _,
|
||||
Arc::new(datatypes::vectors::UInt16Vector::from(
|
||||
Vec::<Option<u16>>::new(),
|
||||
)) as _,
|
||||
Arc::new(StringVector::from(Vec::<String>::new())) as _,
|
||||
]
|
||||
};
|
||||
|
||||
RecordBatches::try_from_columns(schema, columns).unwrap()
|
||||
}
|
||||
|
||||
fn check_show_warnings(query: &str, session: &SessionRef) -> Option<Output> {
|
||||
if SHOW_WARNINGS_PATTERN.is_match(query) {
|
||||
Some(Output::new_with_record_batches(show_warnings(session)))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
// Check for SET or others query, this is the final check of the federated query.
|
||||
fn check_others(query: &str, _query_ctx: QueryContextRef) -> Option<Output> {
|
||||
if OTHER_NOT_SUPPORTED_STMT.is_match(query.as_bytes()) {
|
||||
@@ -274,7 +315,7 @@ fn check_others(query: &str, _query_ctx: QueryContextRef) -> Option<Output> {
|
||||
pub(crate) fn check(
|
||||
query: &str,
|
||||
query_ctx: QueryContextRef,
|
||||
_session: SessionRef,
|
||||
session: SessionRef,
|
||||
) -> Option<Output> {
|
||||
// INSERT don't need MySQL federated check. We assume the query doesn't contain
|
||||
// federated or driver setup command if it starts with a 'INSERT' statement.
|
||||
@@ -287,8 +328,8 @@ pub(crate) fn check(
|
||||
|
||||
// First to check the query is like "select @@variables".
|
||||
check_select_variable(query, query_ctx.clone())
|
||||
// Then to check "show variables like ...".
|
||||
.or_else(|| check_show_variables(query))
|
||||
.or_else(|| check_show_warnings(query, &session))
|
||||
// Last check
|
||||
.or_else(|| check_others(query, query_ctx))
|
||||
}
|
||||
@@ -392,4 +433,64 @@ mod test {
|
||||
+----------------------------------+";
|
||||
test(query, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_show_warnings() {
|
||||
// Test SHOW WARNINGS with no warnings
|
||||
let session = Arc::new(Session::new(None, Channel::Mysql, Default::default(), 0));
|
||||
let output = check("SHOW WARNINGS", QueryContext::arc(), session.clone());
|
||||
match output.unwrap().data {
|
||||
OutputData::RecordBatches(r) => {
|
||||
assert_eq!(r.iter().map(|b| b.num_rows()).sum::<usize>(), 0);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
// Test SHOW WARNINGS with a single warning
|
||||
session.add_warning("Test warning message".to_string());
|
||||
let output = check("SHOW WARNINGS", QueryContext::arc(), session.clone());
|
||||
match output.unwrap().data {
|
||||
OutputData::RecordBatches(r) => {
|
||||
let expected = "\
|
||||
+---------+------+----------------------+
|
||||
| Level | Code | Message |
|
||||
+---------+------+----------------------+
|
||||
| Warning | 1000 | Test warning message |
|
||||
+---------+------+----------------------+";
|
||||
assert_eq!(&r.pretty_print().unwrap(), expected);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
// Test SHOW WARNINGS with multiple warnings
|
||||
session.clear_warnings();
|
||||
session.add_warning("First warning".to_string());
|
||||
session.add_warning("Second warning".to_string());
|
||||
let output = check("SHOW WARNINGS", QueryContext::arc(), session.clone());
|
||||
match output.unwrap().data {
|
||||
OutputData::RecordBatches(r) => {
|
||||
let expected = "\
|
||||
+---------+------+----------------+
|
||||
| Level | Code | Message |
|
||||
+---------+------+----------------+
|
||||
| Warning | 1000 | First warning |
|
||||
| Warning | 1000 | Second warning |
|
||||
+---------+------+----------------+";
|
||||
assert_eq!(&r.pretty_print().unwrap(), expected);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
// Test case insensitivity
|
||||
let output = check("show warnings", QueryContext::arc(), session.clone());
|
||||
assert!(output.is_some());
|
||||
|
||||
// Test with DBeaver-style comment prefix
|
||||
let output = check(
|
||||
"/* ApplicationName=DBeaver */SHOW WARNINGS",
|
||||
QueryContext::arc(),
|
||||
session.clone(),
|
||||
);
|
||||
assert!(output.is_some());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -475,6 +475,8 @@ impl<W: AsyncWrite + Send + Sync + Unpin> AsyncMysqlShim<W> for MysqlInstanceShi
|
||||
p: ParamParser<'a>,
|
||||
w: QueryResultWriter<'a, W>,
|
||||
) -> Result<()> {
|
||||
self.session.clear_warnings();
|
||||
|
||||
let query_ctx = self.session.new_query_context();
|
||||
let db = query_ctx.get_db_string();
|
||||
let _timer = crate::metrics::METRIC_MYSQL_QUERY_TIMER
|
||||
@@ -500,7 +502,7 @@ impl<W: AsyncWrite + Send + Sync + Unpin> AsyncMysqlShim<W> for MysqlInstanceShi
|
||||
}
|
||||
};
|
||||
|
||||
writer::write_output(w, query_ctx, outputs).await?;
|
||||
writer::write_output(w, query_ctx, self.session.clone(), outputs).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -525,7 +527,12 @@ impl<W: AsyncWrite + Send + Sync + Unpin> AsyncMysqlShim<W> for MysqlInstanceShi
|
||||
.with_label_values(&[crate::metrics::METRIC_MYSQL_TEXTQUERY, db.as_str()])
|
||||
.start_timer();
|
||||
|
||||
// Clear warnings for non SHOW WARNINGS queries
|
||||
let query_upcase = query.to_uppercase();
|
||||
if !query_upcase.starts_with("SHOW WARNINGS") {
|
||||
self.session.clear_warnings();
|
||||
}
|
||||
|
||||
if query_upcase.starts_with("PREPARE ") {
|
||||
match ParserContext::parse_mysql_prepare_stmt(query, query_ctx.sql_dialect()) {
|
||||
Ok((stmt_name, stmt)) => {
|
||||
@@ -534,7 +541,8 @@ impl<W: AsyncWrite + Send + Sync + Unpin> AsyncMysqlShim<W> for MysqlInstanceShi
|
||||
match prepare_results {
|
||||
Ok(_) => {
|
||||
let outputs = vec![Ok(Output::new_with_affected_rows(0))];
|
||||
writer::write_output(writer, query_ctx, outputs).await?;
|
||||
writer::write_output(writer, query_ctx, self.session.clone(), outputs)
|
||||
.await?;
|
||||
return Ok(());
|
||||
}
|
||||
Err(e) => {
|
||||
@@ -570,7 +578,8 @@ impl<W: AsyncWrite + Send + Sync + Unpin> AsyncMysqlShim<W> for MysqlInstanceShi
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
writer::write_output(writer, query_ctx, outputs).await?;
|
||||
writer::write_output(writer, query_ctx, self.session.clone(), outputs).await?;
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
Err(e) => {
|
||||
@@ -585,7 +594,7 @@ impl<W: AsyncWrite + Send + Sync + Unpin> AsyncMysqlShim<W> for MysqlInstanceShi
|
||||
Ok(stmt_name) => {
|
||||
self.do_close(stmt_name);
|
||||
let outputs = vec![Ok(Output::new_with_affected_rows(0))];
|
||||
writer::write_output(writer, query_ctx, outputs).await?;
|
||||
writer::write_output(writer, query_ctx, self.session.clone(), outputs).await?;
|
||||
return Ok(());
|
||||
}
|
||||
Err(e) => {
|
||||
@@ -598,7 +607,8 @@ impl<W: AsyncWrite + Send + Sync + Unpin> AsyncMysqlShim<W> for MysqlInstanceShi
|
||||
}
|
||||
|
||||
let outputs = self.do_query(query, query_ctx.clone()).await;
|
||||
writer::write_output(writer, query_ctx, outputs).await?;
|
||||
writer::write_output(writer, query_ctx, self.session.clone(), outputs).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -36,6 +36,7 @@ use futures::StreamExt;
|
||||
use opensrv_mysql::{
|
||||
Column, ColumnFlags, ColumnType, ErrorKind, OkResponse, QueryResultWriter, RowWriter,
|
||||
};
|
||||
use session::SessionRef;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::prelude::*;
|
||||
use tokio::io::AsyncWrite;
|
||||
@@ -47,9 +48,18 @@ use crate::metrics::*;
|
||||
pub async fn write_output<W: AsyncWrite + Send + Sync + Unpin>(
|
||||
w: QueryResultWriter<'_, W>,
|
||||
query_context: QueryContextRef,
|
||||
session: SessionRef,
|
||||
outputs: Vec<Result<Output>>,
|
||||
) -> Result<()> {
|
||||
let mut writer = Some(MysqlResultWriter::new(w, query_context.clone()));
|
||||
if let Some(warning) = query_context.warning() {
|
||||
session.add_warning(warning);
|
||||
}
|
||||
|
||||
let mut writer = Some(MysqlResultWriter::new(
|
||||
w,
|
||||
query_context.clone(),
|
||||
session.clone(),
|
||||
));
|
||||
for output in outputs {
|
||||
let result_writer = writer.take().context(error::InternalSnafu {
|
||||
err_msg: "Sending multiple result set is unsupported",
|
||||
@@ -94,16 +104,19 @@ struct QueryResult {
|
||||
pub struct MysqlResultWriter<'a, W: AsyncWrite + Unpin> {
|
||||
writer: QueryResultWriter<'a, W>,
|
||||
query_context: QueryContextRef,
|
||||
session: SessionRef,
|
||||
}
|
||||
|
||||
impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
|
||||
pub fn new(
|
||||
writer: QueryResultWriter<'a, W>,
|
||||
query_context: QueryContextRef,
|
||||
session: SessionRef,
|
||||
) -> MysqlResultWriter<'a, W> {
|
||||
MysqlResultWriter::<'a, W> {
|
||||
writer,
|
||||
query_context,
|
||||
session,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -131,10 +144,12 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
|
||||
Self::write_query_result(query_result, self.writer, self.query_context).await?;
|
||||
}
|
||||
OutputData::AffectedRows(rows) => {
|
||||
let next_writer = Self::write_affected_rows(self.writer, rows).await?;
|
||||
let next_writer =
|
||||
Self::write_affected_rows(self.writer, rows, &self.session).await?;
|
||||
return Ok(Some(MysqlResultWriter::new(
|
||||
next_writer,
|
||||
self.query_context,
|
||||
self.session,
|
||||
)));
|
||||
}
|
||||
},
|
||||
@@ -152,10 +167,14 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
|
||||
async fn write_affected_rows(
|
||||
w: QueryResultWriter<'a, W>,
|
||||
rows: usize,
|
||||
session: &SessionRef,
|
||||
) -> Result<QueryResultWriter<'a, W>> {
|
||||
let warnings = session.warnings_count() as u16;
|
||||
|
||||
let next_writer = w
|
||||
.complete_one(OkResponse {
|
||||
affected_rows: rows as u64,
|
||||
warnings,
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
|
||||
@@ -133,6 +133,8 @@ impl Drop for RequestMemoryGuard {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use tokio::sync::Barrier;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
@@ -188,21 +190,33 @@ mod tests {
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
async fn test_limiter_concurrent() {
|
||||
let limiter = RequestMemoryLimiter::new(1000);
|
||||
let barrier = Arc::new(Barrier::new(11)); // 10 tasks + main
|
||||
let mut handles = vec![];
|
||||
|
||||
// Spawn 10 tasks each trying to acquire 200 bytes
|
||||
for _ in 0..10 {
|
||||
let limiter_clone = limiter.clone();
|
||||
let handle = tokio::spawn(async move { limiter_clone.try_acquire(200) });
|
||||
let barrier_clone = barrier.clone();
|
||||
let handle = tokio::spawn(async move {
|
||||
barrier_clone.wait().await;
|
||||
limiter_clone.try_acquire(200)
|
||||
});
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
// Let all tasks start together
|
||||
barrier.wait().await;
|
||||
|
||||
let mut success_count = 0;
|
||||
let mut fail_count = 0;
|
||||
let mut guards = Vec::new();
|
||||
|
||||
for handle in handles {
|
||||
match handle.await.unwrap() {
|
||||
Ok(Some(_)) => success_count += 1,
|
||||
Ok(Some(guard)) => {
|
||||
success_count += 1;
|
||||
guards.push(guard);
|
||||
}
|
||||
Err(_) => fail_count += 1,
|
||||
Ok(None) => unreachable!(),
|
||||
}
|
||||
@@ -211,5 +225,6 @@ mod tests {
|
||||
// Only 5 tasks should succeed (5 * 200 = 1000)
|
||||
assert_eq!(success_count, 5);
|
||||
assert_eq!(fail_count, 5);
|
||||
drop(guards);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ pub mod protocol_ctx;
|
||||
pub mod session_config;
|
||||
pub mod table_name;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::collections::{HashMap, VecDeque};
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::Duration;
|
||||
@@ -35,6 +35,9 @@ use derive_more::Debug;
|
||||
|
||||
use crate::context::{Channel, ConnInfo, QueryContextRef};
|
||||
|
||||
/// Maximum number of warnings to store per session (similar to MySQL's max_error_count)
|
||||
const MAX_WARNINGS: usize = 64;
|
||||
|
||||
/// Session for persistent connection such as MySQL, PostgreSQL etc.
|
||||
#[derive(Debug)]
|
||||
pub struct Session {
|
||||
@@ -58,6 +61,8 @@ pub(crate) struct MutableInner {
|
||||
read_preference: ReadPreference,
|
||||
#[debug(skip)]
|
||||
pub(crate) cursors: HashMap<String, Arc<RecordBatchStreamCursor>>,
|
||||
/// Warning messages for MySQL SHOW WARNINGS support
|
||||
warnings: VecDeque<String>,
|
||||
}
|
||||
|
||||
impl Default for MutableInner {
|
||||
@@ -69,6 +74,7 @@ impl Default for MutableInner {
|
||||
query_timeout: None,
|
||||
read_preference: ReadPreference::Leader,
|
||||
cursors: HashMap::with_capacity(0),
|
||||
warnings: VecDeque::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -156,4 +162,35 @@ impl Session {
|
||||
pub fn process_id(&self) -> u32 {
|
||||
self.process_id
|
||||
}
|
||||
|
||||
pub fn warnings_count(&self) -> usize {
|
||||
self.mutable_inner.read().unwrap().warnings.len()
|
||||
}
|
||||
|
||||
pub fn warnings(&self) -> Vec<String> {
|
||||
self.mutable_inner
|
||||
.read()
|
||||
.unwrap()
|
||||
.warnings
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Add a warning message. If the limit is reached, discard the oldest warning.
|
||||
pub fn add_warning(&self, warning: String) {
|
||||
let mut inner = self.mutable_inner.write().unwrap();
|
||||
if inner.warnings.len() >= MAX_WARNINGS {
|
||||
inner.warnings.pop_front();
|
||||
}
|
||||
inner.warnings.push_back(warning);
|
||||
}
|
||||
|
||||
pub fn clear_warnings(&self) {
|
||||
let mut inner = self.mutable_inner.write().unwrap();
|
||||
if inner.warnings.is_empty() {
|
||||
return;
|
||||
}
|
||||
inner.warnings.clear();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,6 +54,7 @@ log-query = { workspace = true }
|
||||
loki-proto.workspace = true
|
||||
meta-client.workspace = true
|
||||
meta-srv = { workspace = true, features = ["mock"] }
|
||||
mito2.workspace = true
|
||||
moka.workspace = true
|
||||
mysql_async = { version = "0.35", default-features = false, features = [
|
||||
"time",
|
||||
|
||||
@@ -59,8 +59,10 @@ use hyper_util::rt::TokioIo;
|
||||
use meta_client::client::MetaClientBuilder;
|
||||
use meta_srv::cluster::MetaPeerClientRef;
|
||||
use meta_srv::discovery;
|
||||
use meta_srv::gc::GcSchedulerOptions;
|
||||
use meta_srv::metasrv::{Metasrv, MetasrvOptions, SelectorRef};
|
||||
use meta_srv::mocks::MockInfo;
|
||||
use mito2::gc::GcConfig;
|
||||
use object_store::config::ObjectStoreConfig;
|
||||
use rand::Rng;
|
||||
use servers::grpc::GrpcOptions;
|
||||
@@ -103,6 +105,8 @@ pub struct GreptimeDbClusterBuilder {
|
||||
datanodes: Option<u32>,
|
||||
datanode_wal_config: DatanodeWalConfig,
|
||||
metasrv_wal_config: MetasrvWalConfig,
|
||||
datanode_gc_config: GcConfig,
|
||||
metasrv_gc_config: GcSchedulerOptions,
|
||||
shared_home_dir: Option<Arc<TempDir>>,
|
||||
meta_selector: Option<SelectorRef>,
|
||||
}
|
||||
@@ -134,6 +138,8 @@ impl GreptimeDbClusterBuilder {
|
||||
datanodes: None,
|
||||
datanode_wal_config: DatanodeWalConfig::default(),
|
||||
metasrv_wal_config: MetasrvWalConfig::default(),
|
||||
datanode_gc_config: GcConfig::default(),
|
||||
metasrv_gc_config: GcSchedulerOptions::default(),
|
||||
shared_home_dir: None,
|
||||
meta_selector: None,
|
||||
}
|
||||
@@ -169,6 +175,17 @@ impl GreptimeDbClusterBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_datanode_gc_config(mut self, datanode_gc_config: GcConfig) -> Self {
|
||||
self.datanode_gc_config = datanode_gc_config;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_metasrv_gc_config(mut self, metasrv_gc_config: GcSchedulerOptions) -> Self {
|
||||
self.metasrv_gc_config = metasrv_gc_config;
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_shared_home_dir(mut self, shared_home_dir: Arc<TempDir>) -> Self {
|
||||
self.shared_home_dir = Some(shared_home_dir);
|
||||
@@ -205,6 +222,7 @@ impl GreptimeDbClusterBuilder {
|
||||
server_addr: "127.0.0.1:3002".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
gc: self.metasrv_gc_config.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@@ -279,6 +297,7 @@ impl GreptimeDbClusterBuilder {
|
||||
vec![],
|
||||
home_dir,
|
||||
self.datanode_wal_config.clone(),
|
||||
self.datanode_gc_config.clone(),
|
||||
)
|
||||
} else {
|
||||
let (opts, guard) = create_tmp_dir_and_datanode_opts(
|
||||
@@ -286,6 +305,7 @@ impl GreptimeDbClusterBuilder {
|
||||
self.store_providers.clone().unwrap_or_default(),
|
||||
&format!("{}-dn-{}", self.cluster_name, datanode_id),
|
||||
self.datanode_wal_config.clone(),
|
||||
self.datanode_gc_config.clone(),
|
||||
);
|
||||
guards.push(guard);
|
||||
|
||||
|
||||
@@ -309,6 +309,7 @@ impl GreptimeDbStandaloneBuilder {
|
||||
store_types,
|
||||
&self.instance_name,
|
||||
self.datanode_wal_config.clone(),
|
||||
Default::default(),
|
||||
);
|
||||
|
||||
let kv_backend_config = KvBackendConfig::default();
|
||||
|
||||
@@ -32,6 +32,7 @@ use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::config::{DatanodeOptions, StorageConfig};
|
||||
use frontend::instance::Instance;
|
||||
use frontend::service_config::{MysqlOptions, PostgresOptions};
|
||||
use mito2::gc::GcConfig;
|
||||
use object_store::config::{
|
||||
AzblobConfig, FileConfig, GcsConfig, ObjectStoreConfig, OssConfig, S3Config,
|
||||
};
|
||||
@@ -145,6 +146,7 @@ fn s3_test_config() -> S3Config {
|
||||
secret_access_key: env::var("GT_S3_ACCESS_KEY").unwrap().into(),
|
||||
bucket: env::var("GT_S3_BUCKET").unwrap(),
|
||||
region: Some(env::var("GT_S3_REGION").unwrap()),
|
||||
endpoint: env::var("GT_S3_ENDPOINT_URL").ok(),
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
@@ -163,7 +165,7 @@ pub fn get_test_store_config(store_type: &StorageType) -> (ObjectStoreConfig, Te
|
||||
scope: env::var("GT_GCS_SCOPE").unwrap(),
|
||||
credential_path: env::var("GT_GCS_CREDENTIAL_PATH").unwrap().into(),
|
||||
credential: env::var("GT_GCS_CREDENTIAL").unwrap().into(),
|
||||
endpoint: env::var("GT_GCS_ENDPOINT").unwrap(),
|
||||
endpoint: env::var("GT_GCS_ENDPOINT").unwrap_or_default(),
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
@@ -297,6 +299,7 @@ pub fn create_tmp_dir_and_datanode_opts(
|
||||
store_provider_types: Vec<StorageType>,
|
||||
name: &str,
|
||||
wal_config: DatanodeWalConfig,
|
||||
gc_config: GcConfig,
|
||||
) -> (DatanodeOptions, TestGuard) {
|
||||
let home_tmp_dir = create_temp_dir(&format!("gt_data_{name}"));
|
||||
let home_dir = home_tmp_dir.path().to_str().unwrap().to_string();
|
||||
@@ -314,7 +317,13 @@ pub fn create_tmp_dir_and_datanode_opts(
|
||||
store_providers.push(store);
|
||||
storage_guards.push(StorageGuard(data_tmp_dir))
|
||||
}
|
||||
let opts = create_datanode_opts(default_store, store_providers, home_dir, wal_config);
|
||||
let opts = create_datanode_opts(
|
||||
default_store,
|
||||
store_providers,
|
||||
home_dir,
|
||||
wal_config,
|
||||
gc_config,
|
||||
);
|
||||
|
||||
(
|
||||
opts,
|
||||
@@ -330,7 +339,18 @@ pub(crate) fn create_datanode_opts(
|
||||
providers: Vec<ObjectStoreConfig>,
|
||||
home_dir: String,
|
||||
wal_config: DatanodeWalConfig,
|
||||
gc_config: GcConfig,
|
||||
) -> DatanodeOptions {
|
||||
let region_engine = DatanodeOptions::default()
|
||||
.region_engine
|
||||
.into_iter()
|
||||
.map(|mut v| {
|
||||
if let datanode::config::RegionEngineConfig::Mito(mito_config) = &mut v {
|
||||
mito_config.gc = gc_config.clone();
|
||||
}
|
||||
v
|
||||
})
|
||||
.collect();
|
||||
DatanodeOptions {
|
||||
node_id: Some(0),
|
||||
require_lease_before_startup: true,
|
||||
@@ -343,6 +363,7 @@ pub(crate) fn create_datanode_opts(
|
||||
.with_bind_addr(PEER_PLACEHOLDER_ADDR)
|
||||
.with_server_addr(PEER_PLACEHOLDER_ADDR),
|
||||
wal: wal_config,
|
||||
region_engine,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod gc;
|
||||
mod instance_kafka_wal_test;
|
||||
mod instance_noop_wal_test;
|
||||
mod instance_test;
|
||||
|
||||
262
tests-integration/src/tests/gc.rs
Normal file
262
tests-integration/src/tests/gc.rs
Normal file
@@ -0,0 +1,262 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::time::Duration;
|
||||
|
||||
use common_meta::key::TableMetadataManagerRef;
|
||||
use common_procedure::ProcedureWithId;
|
||||
use common_telemetry::info;
|
||||
use common_test_util::recordbatch::check_output_stream;
|
||||
use futures::TryStreamExt as _;
|
||||
use itertools::Itertools;
|
||||
use meta_srv::gc::{BatchGcProcedure, GcSchedulerOptions, Region2Peers};
|
||||
use mito2::gc::GcConfig;
|
||||
use store_api::storage::RegionId;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use crate::cluster::GreptimeDbClusterBuilder;
|
||||
use crate::test_util::{StorageType, TempDirGuard, get_test_store_config};
|
||||
use crate::tests::test_util::{MockInstanceBuilder, TestContext, execute_sql, wait_procedure};
|
||||
|
||||
/// Helper function to get table route information for GC procedure
|
||||
async fn get_table_route(
|
||||
table_metadata_manager: &TableMetadataManagerRef,
|
||||
table_id: TableId,
|
||||
) -> (Region2Peers, Vec<RegionId>) {
|
||||
// Get physical table route
|
||||
let (_, physical_table_route) = table_metadata_manager
|
||||
.table_route_manager()
|
||||
.get_physical_table_route(table_id)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut region_routes = Region2Peers::new();
|
||||
let mut regions = Vec::new();
|
||||
|
||||
// Convert region routes to Region2Peers format
|
||||
for region_route in physical_table_route.region_routes {
|
||||
let region_id = region_route.region.id;
|
||||
let leader_peer = region_route.leader_peer.clone().unwrap();
|
||||
let follower_peers = region_route.follower_peers.clone();
|
||||
|
||||
region_routes.insert(region_id, (leader_peer, follower_peers));
|
||||
regions.push(region_id);
|
||||
}
|
||||
|
||||
(region_routes, regions)
|
||||
}
|
||||
|
||||
/// Helper function to list all SST files
|
||||
async fn list_sst_files(test_context: &TestContext) -> HashSet<String> {
|
||||
let mut sst_files = HashSet::new();
|
||||
|
||||
for datanode in test_context.datanodes().values() {
|
||||
let region_server = datanode.region_server();
|
||||
let mito = region_server.mito_engine().unwrap();
|
||||
let all_files = mito
|
||||
.all_ssts_from_storage()
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(|e| e.file_path)
|
||||
.collect_vec();
|
||||
sst_files.extend(all_files);
|
||||
}
|
||||
|
||||
sst_files
|
||||
}
|
||||
|
||||
async fn distributed_with_gc(store_type: &StorageType) -> (TestContext, TempDirGuard) {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let test_name = uuid::Uuid::new_v4().to_string();
|
||||
let (store_config, guard) = get_test_store_config(store_type);
|
||||
|
||||
let builder = GreptimeDbClusterBuilder::new(&test_name)
|
||||
.await
|
||||
.with_metasrv_gc_config(GcSchedulerOptions {
|
||||
enable: true,
|
||||
..Default::default()
|
||||
})
|
||||
.with_datanode_gc_config(GcConfig {
|
||||
enable: true,
|
||||
// set lingering time to zero for test speedup
|
||||
lingering_time: Some(Duration::ZERO),
|
||||
..Default::default()
|
||||
})
|
||||
.with_store_config(store_config);
|
||||
(
|
||||
TestContext::new(MockInstanceBuilder::Distributed(builder)).await,
|
||||
guard,
|
||||
)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_gc_basic_different_store() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let store_type = StorageType::build_storage_types_based_on_env();
|
||||
for store in store_type {
|
||||
if store == StorageType::File {
|
||||
continue; // no point in test gc in fs storage
|
||||
}
|
||||
info!("Running GC test with storage type: {}", store);
|
||||
test_gc_basic(&store).await;
|
||||
}
|
||||
}
|
||||
|
||||
async fn test_gc_basic(store_type: &StorageType) {
|
||||
let (test_context, _guard) = distributed_with_gc(store_type).await;
|
||||
let instance = test_context.frontend();
|
||||
let metasrv = test_context.metasrv();
|
||||
|
||||
// Step 1: Create table with append_mode to easily generate multiple files
|
||||
let create_table_sql = r#"
|
||||
CREATE TABLE test_gc_table (
|
||||
ts TIMESTAMP TIME INDEX,
|
||||
val DOUBLE,
|
||||
host STRING
|
||||
) WITH (append_mode = 'true')
|
||||
"#;
|
||||
execute_sql(&instance, create_table_sql).await;
|
||||
|
||||
// Step 2: Generate SST files by inserting data and flushing multiple times
|
||||
for i in 0..4 {
|
||||
let insert_sql = format!(
|
||||
r#"
|
||||
INSERT INTO test_gc_table (ts, val, host) VALUES
|
||||
('2023-01-0{} 10:00:00', {}, 'host{}'),
|
||||
('2023-01-0{} 11:00:00', {}, 'host{}'),
|
||||
('2023-01-0{} 12:00:00', {}, 'host{}')
|
||||
"#,
|
||||
i + 1,
|
||||
10.0 + i as f64,
|
||||
i,
|
||||
i + 1,
|
||||
20.0 + i as f64,
|
||||
i,
|
||||
i + 1,
|
||||
30.0 + i as f64,
|
||||
i
|
||||
);
|
||||
execute_sql(&instance, &insert_sql).await;
|
||||
|
||||
// Flush the table to create SST files
|
||||
let flush_sql = "ADMIN FLUSH_TABLE('test_gc_table')";
|
||||
execute_sql(&instance, flush_sql).await;
|
||||
}
|
||||
|
||||
// Step 3: Get table information
|
||||
let table = instance
|
||||
.catalog_manager()
|
||||
.table("greptime", "public", "test_gc_table", None)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let table_id = table.table_info().table_id();
|
||||
|
||||
// List SST files before compaction (for verification)
|
||||
let sst_files_before_compaction = list_sst_files(&test_context).await;
|
||||
info!(
|
||||
"SST files before compaction: {:?}",
|
||||
sst_files_before_compaction
|
||||
);
|
||||
assert_eq!(sst_files_before_compaction.len(), 4); // 4 files from 4 flushes
|
||||
|
||||
// Step 4: Trigger compaction to create garbage SST files
|
||||
let compact_sql = "ADMIN COMPACT_TABLE('test_gc_table')";
|
||||
execute_sql(&instance, compact_sql).await;
|
||||
|
||||
// Wait for compaction to complete
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
|
||||
// List SST files after compaction (should have both old and new files)
|
||||
let sst_files_after_compaction = list_sst_files(&test_context).await;
|
||||
info!(
|
||||
"SST files after compaction: {:?}",
|
||||
sst_files_after_compaction
|
||||
);
|
||||
assert_eq!(sst_files_after_compaction.len(), 5); // 4 old + 1 new
|
||||
|
||||
// Step 5: Get table route information for GC procedure
|
||||
let (region_routes, regions) =
|
||||
get_table_route(metasrv.table_metadata_manager(), table_id).await;
|
||||
|
||||
// Step 6: Create and execute BatchGcProcedure
|
||||
let procedure = BatchGcProcedure::new(
|
||||
metasrv.mailbox().clone(),
|
||||
metasrv.options().grpc.server_addr.clone(),
|
||||
regions.clone(),
|
||||
false, // full_file_listing
|
||||
region_routes,
|
||||
HashMap::new(), // related_regions (empty for this simple test)
|
||||
Duration::from_secs(10), // timeout
|
||||
);
|
||||
|
||||
// Submit the procedure to the procedure manager
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
let procedure_id = procedure_with_id.id;
|
||||
|
||||
let _watcher = metasrv
|
||||
.procedure_manager()
|
||||
.submit(procedure_with_id)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Wait for the procedure to complete
|
||||
wait_procedure(metasrv.procedure_manager(), procedure_id).await;
|
||||
|
||||
// Step 7: Verify GC results
|
||||
let sst_files_after_gc = list_sst_files(&test_context).await;
|
||||
info!("SST files after GC: {:?}", sst_files_after_gc);
|
||||
assert_eq!(sst_files_after_gc.len(), 1); // Only the compacted file should remain after gc
|
||||
|
||||
// Verify that data is still accessible
|
||||
let count_sql = "SELECT COUNT(*) FROM test_gc_table";
|
||||
let count_output = execute_sql(&instance, count_sql).await;
|
||||
let expected = r#"
|
||||
+----------+
|
||||
| count(*) |
|
||||
+----------+
|
||||
| 12 |
|
||||
+----------+"#
|
||||
.trim();
|
||||
check_output_stream(count_output.data, expected).await;
|
||||
|
||||
let select_sql = "SELECT * FROM test_gc_table ORDER BY ts";
|
||||
let select_output = execute_sql(&instance, select_sql).await;
|
||||
let expected = r#"
|
||||
+---------------------+------+-------+
|
||||
| ts | val | host |
|
||||
+---------------------+------+-------+
|
||||
| 2023-01-01T10:00:00 | 10.0 | host0 |
|
||||
| 2023-01-01T11:00:00 | 20.0 | host0 |
|
||||
| 2023-01-01T12:00:00 | 30.0 | host0 |
|
||||
| 2023-01-02T10:00:00 | 11.0 | host1 |
|
||||
| 2023-01-02T11:00:00 | 21.0 | host1 |
|
||||
| 2023-01-02T12:00:00 | 31.0 | host1 |
|
||||
| 2023-01-03T10:00:00 | 12.0 | host2 |
|
||||
| 2023-01-03T11:00:00 | 22.0 | host2 |
|
||||
| 2023-01-03T12:00:00 | 32.0 | host2 |
|
||||
| 2023-01-04T10:00:00 | 13.0 | host3 |
|
||||
| 2023-01-04T11:00:00 | 23.0 | host3 |
|
||||
| 2023-01-04T12:00:00 | 33.0 | host3 |
|
||||
+---------------------+------+-------+"#
|
||||
.trim();
|
||||
check_output_stream(select_output.data, expected).await;
|
||||
|
||||
// TODO: Add more specific assertions once we have proper file system access
|
||||
// For now, the test passes if the procedure executes without errors
|
||||
info!("GC test completed successfully");
|
||||
}
|
||||
@@ -19,7 +19,7 @@ use common_wal::config::{DatanodeWalConfig, MetasrvWalConfig};
|
||||
|
||||
use crate::cluster::GreptimeDbClusterBuilder;
|
||||
use crate::tests::test_util::{
|
||||
MockInstance, MockInstanceBuilder, RebuildableMockInstance, TestContext, execute_sql,
|
||||
MockInstanceBuilder, RebuildableMockInstance, TestContext, execute_sql,
|
||||
};
|
||||
|
||||
pub(crate) async fn distributed_with_noop_wal() -> TestContext {
|
||||
|
||||
@@ -478,11 +478,11 @@ async fn test_execute_show_databases_tables(instance: Arc<dyn MockInstance>) {
|
||||
check_unordered_output_stream(output, expected).await;
|
||||
|
||||
let expected = "\
|
||||
+---------+
|
||||
| Tables |
|
||||
+---------+
|
||||
| numbers |
|
||||
+---------+\
|
||||
+------------------+
|
||||
| Tables_in_public |
|
||||
+------------------+
|
||||
| numbers |
|
||||
+------------------+\
|
||||
";
|
||||
let output = execute_sql(&instance, "show tables").await;
|
||||
check_unordered_output_stream(output, expected).await;
|
||||
@@ -494,23 +494,23 @@ async fn test_execute_show_databases_tables(instance: Arc<dyn MockInstance>) {
|
||||
|
||||
let output = execute_sql(&instance, "show tables").await;
|
||||
let expected = "\
|
||||
+---------+
|
||||
| Tables |
|
||||
+---------+
|
||||
| demo |
|
||||
| numbers |
|
||||
+---------+\
|
||||
+------------------+
|
||||
| Tables_in_public |
|
||||
+------------------+
|
||||
| demo |
|
||||
| numbers |
|
||||
+------------------+\
|
||||
";
|
||||
check_unordered_output_stream(output, expected).await;
|
||||
|
||||
let output = execute_sql(&instance, "SHOW FULL TABLES WHERE Table_type != 'VIEW'").await;
|
||||
let expected = "\
|
||||
+---------+-----------------+
|
||||
| Tables | Table_type |
|
||||
+---------+-----------------+
|
||||
| demo | BASE TABLE |
|
||||
| numbers | LOCAL TEMPORARY |
|
||||
+---------+-----------------+\
|
||||
+------------------+-----------------+
|
||||
| Tables_in_public | Table_type |
|
||||
+------------------+-----------------+
|
||||
| demo | BASE TABLE |
|
||||
| numbers | LOCAL TEMPORARY |
|
||||
+------------------+-----------------+\
|
||||
";
|
||||
check_unordered_output_stream(output, expected).await;
|
||||
|
||||
@@ -520,22 +520,22 @@ async fn test_execute_show_databases_tables(instance: Arc<dyn MockInstance>) {
|
||||
)
|
||||
.await;
|
||||
let expected = "\
|
||||
+--------+------------+
|
||||
| Tables | Table_type |
|
||||
+--------+------------+
|
||||
| demo | BASE TABLE |
|
||||
+--------+------------+\
|
||||
+------------------+------------+
|
||||
| Tables_in_public | Table_type |
|
||||
+------------------+------------+
|
||||
| demo | BASE TABLE |
|
||||
+------------------+------------+\
|
||||
";
|
||||
check_unordered_output_stream(output, expected).await;
|
||||
|
||||
// show tables like [string]
|
||||
let output = execute_sql(&instance, "show tables like 'de%'").await;
|
||||
let expected = "\
|
||||
+--------+
|
||||
| Tables |
|
||||
+--------+
|
||||
| demo |
|
||||
+--------+\
|
||||
+------------------+
|
||||
| Tables_in_public |
|
||||
+------------------+
|
||||
| demo |
|
||||
+------------------+\
|
||||
";
|
||||
check_unordered_output_stream(output, expected).await;
|
||||
}
|
||||
@@ -1252,11 +1252,11 @@ async fn test_rename_table(instance: Arc<dyn MockInstance>) {
|
||||
.await
|
||||
.data;
|
||||
let expect = "\
|
||||
+------------+
|
||||
| Tables |
|
||||
+------------+
|
||||
| test_table |
|
||||
+------------+";
|
||||
+--------------+
|
||||
| Tables_in_db |
|
||||
+--------------+
|
||||
| test_table |
|
||||
+--------------+";
|
||||
check_output_stream(output, expect).await;
|
||||
|
||||
let output = execute_sql_with(
|
||||
@@ -1323,12 +1323,12 @@ async fn test_create_table_after_rename_table(instance: Arc<dyn MockInstance>) {
|
||||
assert!(matches!(output, OutputData::AffectedRows(0)));
|
||||
|
||||
let expect = "\
|
||||
+------------+
|
||||
| Tables |
|
||||
+------------+
|
||||
| demo |
|
||||
| test_table |
|
||||
+------------+";
|
||||
+--------------+
|
||||
| Tables_in_db |
|
||||
+--------------+
|
||||
| demo |
|
||||
| test_table |
|
||||
+--------------+";
|
||||
let output = execute_sql_with(&instance, "show tables", query_ctx)
|
||||
.await
|
||||
.data;
|
||||
@@ -1516,11 +1516,11 @@ async fn test_use_database(instance: Arc<dyn MockInstance>) {
|
||||
.await
|
||||
.data;
|
||||
let expected = "\
|
||||
+--------+
|
||||
| Tables |
|
||||
+--------+
|
||||
| tb1 |
|
||||
+--------+";
|
||||
+---------------+
|
||||
| Tables_in_db1 |
|
||||
+---------------+
|
||||
| tb1 |
|
||||
+---------------+";
|
||||
check_output_stream(output, expected).await;
|
||||
|
||||
let output = execute_sql_with(
|
||||
|
||||
@@ -24,8 +24,8 @@ use table::table_reference::TableReference;
|
||||
|
||||
use crate::cluster::GreptimeDbClusterBuilder;
|
||||
use crate::tests::test_util::{
|
||||
MockInstance, MockInstanceBuilder, RebuildableMockInstance, TestContext, dump_kvbackend,
|
||||
execute_sql, restore_kvbackend, try_execute_sql, wait_procedure,
|
||||
MockInstanceBuilder, RebuildableMockInstance, TestContext, dump_kvbackend, execute_sql,
|
||||
restore_kvbackend, try_execute_sql, wait_procedure,
|
||||
};
|
||||
|
||||
const CREATE_MONITOR_TABLE_SQL: &str = r#"
|
||||
@@ -409,11 +409,11 @@ async fn test_recover_metadata_failed() {
|
||||
|
||||
// Only grpc_latencies table is visible.
|
||||
let output = execute_sql(&test_context.frontend(), "show tables;").await;
|
||||
let expected = r#"+---------+
|
||||
| Tables |
|
||||
+---------+
|
||||
| numbers |
|
||||
+---------+"#;
|
||||
let expected = r#"+------------------+
|
||||
| Tables_in_public |
|
||||
+------------------+
|
||||
| numbers |
|
||||
+------------------+"#;
|
||||
check_output_stream(output.data, expected).await;
|
||||
|
||||
// Expect table creation to fail because the region directory already exists.
|
||||
@@ -474,12 +474,12 @@ async fn test_dropped_table() {
|
||||
test_context.rebuild().await;
|
||||
|
||||
let output = execute_sql(&test_context.frontend(), "show tables;").await;
|
||||
let expected = r#"+----------------+
|
||||
| Tables |
|
||||
+----------------+
|
||||
| grpc_latencies |
|
||||
| numbers |
|
||||
+----------------+"#;
|
||||
let expected = r#"+------------------+
|
||||
| Tables_in_public |
|
||||
+------------------+
|
||||
| grpc_latencies |
|
||||
| numbers |
|
||||
+------------------+"#;
|
||||
check_output_stream(output.data, expected).await;
|
||||
|
||||
// We can't query the table because the table is dropped.
|
||||
@@ -531,12 +531,12 @@ async fn test_renamed_table() {
|
||||
check_output_stream(output.data, expected).await;
|
||||
|
||||
let output = execute_sql(&test_context.frontend(), "show tables;").await;
|
||||
let expected = r#"+----------------+
|
||||
| Tables |
|
||||
+----------------+
|
||||
| grpc_latencies |
|
||||
| numbers |
|
||||
+----------------+"#;
|
||||
let expected = r#"+------------------+
|
||||
| Tables_in_public |
|
||||
+------------------+
|
||||
| grpc_latencies |
|
||||
| numbers |
|
||||
+------------------+"#;
|
||||
check_output_stream(output.data, expected).await;
|
||||
}
|
||||
|
||||
|
||||
@@ -12,11 +12,13 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::env;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use client::OutputData;
|
||||
use common_meta::DatanodeId;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::range_stream::{DEFAULT_PAGE_SIZE, PaginationStream};
|
||||
use common_meta::rpc::KeyValue;
|
||||
@@ -30,6 +32,7 @@ use common_test_util::find_workspace_path;
|
||||
use common_wal::config::kafka::common::{KafkaConnectionConfig, KafkaTopicConfig};
|
||||
use common_wal::config::kafka::{DatanodeKafkaConfig, MetasrvKafkaConfig};
|
||||
use common_wal::config::{DatanodeWalConfig, MetasrvWalConfig};
|
||||
use datanode::datanode::Datanode;
|
||||
use frontend::error::Result;
|
||||
use frontend::instance::Instance;
|
||||
use futures::TryStreamExt;
|
||||
@@ -95,6 +98,13 @@ impl MockInstanceImpl {
|
||||
MockInstanceImpl::Distributed(instance) => &instance.metasrv,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn datanodes(&self) -> &HashMap<DatanodeId, Datanode> {
|
||||
match self {
|
||||
MockInstanceImpl::Standalone(_) => unreachable!(),
|
||||
MockInstanceImpl::Distributed(instance) => &instance.datanode_instances,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MockInstance for MockInstanceImpl {
|
||||
@@ -185,6 +195,14 @@ impl TestContext {
|
||||
pub(crate) fn metasrv(&self) -> &Arc<Metasrv> {
|
||||
self.instance.as_ref().unwrap().metasrv()
|
||||
}
|
||||
|
||||
pub(crate) fn frontend(&self) -> Arc<Instance> {
|
||||
self.instance.as_ref().unwrap().frontend()
|
||||
}
|
||||
|
||||
pub(crate) fn datanodes(&self) -> &HashMap<DatanodeId, Datanode> {
|
||||
self.instance.as_ref().unwrap().datanodes()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
|
||||
@@ -113,7 +113,7 @@ Error: 3001(EngineExecuteQuery), Invalid query: TQL query only supports one f64
|
||||
SHOW TABLES;
|
||||
|
||||
+------------------------+
|
||||
| Tables |
|
||||
| Tables_in_public |
|
||||
+------------------------+
|
||||
| http_requests_two_vals |
|
||||
| numbers |
|
||||
|
||||
@@ -4,12 +4,12 @@ Affected Rows: 0
|
||||
|
||||
SHOW TABLES;
|
||||
|
||||
+---------+
|
||||
| Tables |
|
||||
+---------+
|
||||
| numbers |
|
||||
| phy |
|
||||
+---------+
|
||||
+------------------+
|
||||
| Tables_in_public |
|
||||
+------------------+
|
||||
| numbers |
|
||||
| phy |
|
||||
+------------------+
|
||||
|
||||
DESC TABLE phy;
|
||||
|
||||
|
||||
@@ -45,19 +45,19 @@ Affected Rows: 0
|
||||
|
||||
SHOW TABLES FROM test_public_schema;
|
||||
|
||||
+--------+
|
||||
| Tables |
|
||||
+--------+
|
||||
| hello |
|
||||
+--------+
|
||||
+------------------------------+
|
||||
| Tables_in_test_public_schema |
|
||||
+------------------------------+
|
||||
| hello |
|
||||
+------------------------------+
|
||||
|
||||
SHOW TABLES FROM public;
|
||||
|
||||
+---------+
|
||||
| Tables |
|
||||
+---------+
|
||||
| numbers |
|
||||
+---------+
|
||||
+------------------+
|
||||
| Tables_in_public |
|
||||
+------------------+
|
||||
| numbers |
|
||||
+------------------+
|
||||
|
||||
INSERT INTO hello VALUES (2), (3), (4);
|
||||
|
||||
@@ -75,19 +75,19 @@ SELECT * FROM hello;
|
||||
|
||||
SHOW TABLES;
|
||||
|
||||
+--------+
|
||||
| Tables |
|
||||
+--------+
|
||||
| hello |
|
||||
+--------+
|
||||
+------------------------------+
|
||||
| Tables_in_test_public_schema |
|
||||
+------------------------------+
|
||||
| hello |
|
||||
+------------------------------+
|
||||
|
||||
SHOW FULL TABLES WHERE Table_type != 'VIEW';
|
||||
|
||||
+--------+------------+
|
||||
| Tables | Table_type |
|
||||
+--------+------------+
|
||||
| hello | BASE TABLE |
|
||||
+--------+------------+
|
||||
+------------------------------+------------+
|
||||
| Tables_in_test_public_schema | Table_type |
|
||||
+------------------------------+------------+
|
||||
| hello | BASE TABLE |
|
||||
+------------------------------+------------+
|
||||
|
||||
DROP TABLE hello;
|
||||
|
||||
@@ -104,19 +104,19 @@ SHOW TABLES FROM test_public_schema;
|
||||
|
||||
SHOW TABLES FROM public;
|
||||
|
||||
+---------+
|
||||
| Tables |
|
||||
+---------+
|
||||
| numbers |
|
||||
+---------+
|
||||
+------------------+
|
||||
| Tables_in_public |
|
||||
+------------------+
|
||||
| numbers |
|
||||
+------------------+
|
||||
|
||||
SHOW TABLES FROM public WHERE Tables = 'numbers';
|
||||
|
||||
+---------+
|
||||
| Tables |
|
||||
+---------+
|
||||
| numbers |
|
||||
+---------+
|
||||
+------------------+
|
||||
| Tables_in_public |
|
||||
+------------------+
|
||||
| numbers |
|
||||
+------------------+
|
||||
|
||||
DROP SCHEMA test_public_schema;
|
||||
|
||||
|
||||
@@ -4,12 +4,12 @@ Affected Rows: 0
|
||||
|
||||
SHOW TABLES;
|
||||
|
||||
+---------+
|
||||
| Tables |
|
||||
+---------+
|
||||
| numbers |
|
||||
| phy |
|
||||
+---------+
|
||||
+------------------+
|
||||
| Tables_in_public |
|
||||
+------------------+
|
||||
| numbers |
|
||||
| phy |
|
||||
+------------------+
|
||||
|
||||
DESC TABLE phy;
|
||||
|
||||
|
||||
@@ -40,12 +40,12 @@ Error: 4001(TableNotFound), Table not found: greptime.public.bar
|
||||
|
||||
SHOW TABLES;
|
||||
|
||||
+---------+
|
||||
| Tables |
|
||||
+---------+
|
||||
| foo |
|
||||
| numbers |
|
||||
+---------+
|
||||
+------------------+
|
||||
| Tables_in_public |
|
||||
+------------------+
|
||||
| foo |
|
||||
| numbers |
|
||||
+------------------+
|
||||
|
||||
DROP TABLE IF EXISTS foo, bar;
|
||||
|
||||
|
||||
@@ -991,11 +991,11 @@ ADMIN FLUSH_FLOW('temp_monitoring');
|
||||
-- This table should not exist yet
|
||||
SHOW TABLES LIKE 'temp_alerts';
|
||||
|
||||
+-------------+
|
||||
| Tables |
|
||||
+-------------+
|
||||
| temp_alerts |
|
||||
+-------------+
|
||||
+------------------+
|
||||
| Tables_in_public |
|
||||
+------------------+
|
||||
| temp_alerts |
|
||||
+------------------+
|
||||
|
||||
INSERT INTO
|
||||
temp_sensor_data
|
||||
@@ -1015,11 +1015,11 @@ ADMIN FLUSH_FLOW('temp_monitoring');
|
||||
|
||||
SHOW TABLES LIKE 'temp_alerts';
|
||||
|
||||
+-------------+
|
||||
| Tables |
|
||||
+-------------+
|
||||
| temp_alerts |
|
||||
+-------------+
|
||||
+------------------+
|
||||
| Tables_in_public |
|
||||
+------------------+
|
||||
| temp_alerts |
|
||||
+------------------+
|
||||
|
||||
SELECT
|
||||
sensor_id,
|
||||
|
||||
@@ -96,6 +96,133 @@ SELECT LAST_VALUE('a');
|
||||
| a |
|
||||
+-----------------------+
|
||||
|
||||
-- MySQL-compatible IF function tests
|
||||
SELECT IF(true, 'yes', 'no');
|
||||
|
||||
+------------------------------------------+
|
||||
| if(Boolean(true),Utf8("yes"),Utf8("no")) |
|
||||
+------------------------------------------+
|
||||
| yes |
|
||||
+------------------------------------------+
|
||||
|
||||
SELECT IF(false, 'yes', 'no');
|
||||
|
||||
+-------------------------------------------+
|
||||
| if(Boolean(false),Utf8("yes"),Utf8("no")) |
|
||||
+-------------------------------------------+
|
||||
| no |
|
||||
+-------------------------------------------+
|
||||
|
||||
SELECT IF(NULL, 'yes', 'no');
|
||||
|
||||
+---------------------------------+
|
||||
| if(NULL,Utf8("yes"),Utf8("no")) |
|
||||
+---------------------------------+
|
||||
| no |
|
||||
+---------------------------------+
|
||||
|
||||
SELECT IF(1, 'yes', 'no');
|
||||
|
||||
+-------------------------------------+
|
||||
| if(Int64(1),Utf8("yes"),Utf8("no")) |
|
||||
+-------------------------------------+
|
||||
| yes |
|
||||
+-------------------------------------+
|
||||
|
||||
SELECT IF(0, 'yes', 'no');
|
||||
|
||||
+-------------------------------------+
|
||||
| if(Int64(0),Utf8("yes"),Utf8("no")) |
|
||||
+-------------------------------------+
|
||||
| no |
|
||||
+-------------------------------------+
|
||||
|
||||
SELECT IF(-1, 'yes', 'no');
|
||||
|
||||
+--------------------------------------+
|
||||
| if(Int64(-1),Utf8("yes"),Utf8("no")) |
|
||||
+--------------------------------------+
|
||||
| yes |
|
||||
+--------------------------------------+
|
||||
|
||||
SELECT IF(1.5, 'yes', 'no');
|
||||
|
||||
+-----------------------------------------+
|
||||
| if(Float64(1.5),Utf8("yes"),Utf8("no")) |
|
||||
+-----------------------------------------+
|
||||
| yes |
|
||||
+-----------------------------------------+
|
||||
|
||||
SELECT IF(0.0, 'yes', 'no');
|
||||
|
||||
+---------------------------------------+
|
||||
| if(Float64(0),Utf8("yes"),Utf8("no")) |
|
||||
+---------------------------------------+
|
||||
| no |
|
||||
+---------------------------------------+
|
||||
|
||||
-- Test with table column
|
||||
SELECT IF(a > 1, 'greater', 'not greater') FROM t;
|
||||
|
||||
+--------------------------------------------------------+
|
||||
| if(t.a > Int64(1),Utf8("greater"),Utf8("not greater")) |
|
||||
+--------------------------------------------------------+
|
||||
| not greater |
|
||||
| not greater |
|
||||
| greater |
|
||||
+--------------------------------------------------------+
|
||||
|
||||
-- Test numeric return types
|
||||
SELECT IF(true, 100, 200);
|
||||
|
||||
+-----------------------------------------+
|
||||
| if(Boolean(true),Int64(100),Int64(200)) |
|
||||
+-----------------------------------------+
|
||||
| 100 |
|
||||
+-----------------------------------------+
|
||||
|
||||
SELECT IF(false, 100, 200);
|
||||
|
||||
+------------------------------------------+
|
||||
| if(Boolean(false),Int64(100),Int64(200)) |
|
||||
+------------------------------------------+
|
||||
| 200 |
|
||||
+------------------------------------------+
|
||||
|
||||
-- Test with IFNULL (should already work via DataFusion)
|
||||
SELECT IFNULL(NULL, 'default');
|
||||
|
||||
+------------------------------+
|
||||
| ifnull(NULL,Utf8("default")) |
|
||||
+------------------------------+
|
||||
| default |
|
||||
+------------------------------+
|
||||
|
||||
SELECT IFNULL('value', 'default');
|
||||
|
||||
+---------------------------------------+
|
||||
| ifnull(Utf8("value"),Utf8("default")) |
|
||||
+---------------------------------------+
|
||||
| value |
|
||||
+---------------------------------------+
|
||||
|
||||
-- Test COALESCE (should already work via DataFusion)
|
||||
SELECT COALESCE(NULL, NULL, 'third');
|
||||
|
||||
+-----------------------------------+
|
||||
| coalesce(NULL,NULL,Utf8("third")) |
|
||||
+-----------------------------------+
|
||||
| third |
|
||||
+-----------------------------------+
|
||||
|
||||
SELECT COALESCE('first', 'second');
|
||||
|
||||
+----------------------------------------+
|
||||
| coalesce(Utf8("first"),Utf8("second")) |
|
||||
+----------------------------------------+
|
||||
| first |
|
||||
+----------------------------------------+
|
||||
|
||||
DROP TABLE t;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
@@ -24,4 +24,39 @@ SELECT LAST_VALUE(1);
|
||||
|
||||
SELECT LAST_VALUE('a');
|
||||
|
||||
-- MySQL-compatible IF function tests
|
||||
SELECT IF(true, 'yes', 'no');
|
||||
|
||||
SELECT IF(false, 'yes', 'no');
|
||||
|
||||
SELECT IF(NULL, 'yes', 'no');
|
||||
|
||||
SELECT IF(1, 'yes', 'no');
|
||||
|
||||
SELECT IF(0, 'yes', 'no');
|
||||
|
||||
SELECT IF(-1, 'yes', 'no');
|
||||
|
||||
SELECT IF(1.5, 'yes', 'no');
|
||||
|
||||
SELECT IF(0.0, 'yes', 'no');
|
||||
|
||||
-- Test with table column
|
||||
SELECT IF(a > 1, 'greater', 'not greater') FROM t;
|
||||
|
||||
-- Test numeric return types
|
||||
SELECT IF(true, 100, 200);
|
||||
|
||||
SELECT IF(false, 100, 200);
|
||||
|
||||
-- Test with IFNULL (should already work via DataFusion)
|
||||
SELECT IFNULL(NULL, 'default');
|
||||
|
||||
SELECT IFNULL('value', 'default');
|
||||
|
||||
-- Test COALESCE (should already work via DataFusion)
|
||||
SELECT COALESCE(NULL, NULL, 'third');
|
||||
|
||||
SELECT COALESCE('first', 'second');
|
||||
|
||||
DROP TABLE t;
|
||||
|
||||
@@ -27,3 +27,78 @@ SHOW DATABASES;
|
||||
| public |
|
||||
+--------------------+
|
||||
|
||||
-- ======================================================
|
||||
-- MySQL compatibility tests for JDBC connectors
|
||||
-- ======================================================
|
||||
-- Test MySQL IF() function (issue #7278 compatibility)
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SELECT IF(1, 'yes', 'no') as result;
|
||||
|
||||
+--------+
|
||||
| result |
|
||||
+--------+
|
||||
| yes |
|
||||
+--------+
|
||||
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SELECT IF(0, 'yes', 'no') as result;
|
||||
|
||||
+--------+
|
||||
| result |
|
||||
+--------+
|
||||
| no |
|
||||
+--------+
|
||||
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SELECT IF(NULL, 'yes', 'no') as result;
|
||||
|
||||
+--------+
|
||||
| result |
|
||||
+--------+
|
||||
| no |
|
||||
+--------+
|
||||
|
||||
-- Test IFNULL (should work via DataFusion)
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SELECT IFNULL(NULL, 'default') as result;
|
||||
|
||||
+---------+
|
||||
| result |
|
||||
+---------+
|
||||
| default |
|
||||
+---------+
|
||||
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SELECT IFNULL('value', 'default') as result;
|
||||
|
||||
+--------+
|
||||
| result |
|
||||
+--------+
|
||||
| value |
|
||||
+--------+
|
||||
|
||||
-- Test COALESCE
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SELECT COALESCE(NULL, NULL, 'third') as result;
|
||||
|
||||
+--------+
|
||||
| result |
|
||||
+--------+
|
||||
| third |
|
||||
+--------+
|
||||
|
||||
-- Verify SHOW TABLES column naming
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
USE public;
|
||||
|
||||
affected_rows: 0
|
||||
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SHOW TABLES;
|
||||
|
||||
+------------------+
|
||||
| Tables_in_public |
|
||||
+------------------+
|
||||
| numbers |
|
||||
+------------------+
|
||||
|
||||
|
||||
@@ -6,3 +6,35 @@ SELECT @@version_comment;
|
||||
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SHOW DATABASES;
|
||||
|
||||
-- ======================================================
|
||||
-- MySQL compatibility tests for JDBC connectors
|
||||
-- ======================================================
|
||||
|
||||
-- Test MySQL IF() function (issue #7278 compatibility)
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SELECT IF(1, 'yes', 'no') as result;
|
||||
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SELECT IF(0, 'yes', 'no') as result;
|
||||
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SELECT IF(NULL, 'yes', 'no') as result;
|
||||
|
||||
-- Test IFNULL (should work via DataFusion)
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SELECT IFNULL(NULL, 'default') as result;
|
||||
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SELECT IFNULL('value', 'default') as result;
|
||||
|
||||
-- Test COALESCE
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SELECT COALESCE(NULL, NULL, 'third') as result;
|
||||
|
||||
-- Verify SHOW TABLES column naming
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
USE public;
|
||||
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SHOW TABLES;
|
||||
|
||||
@@ -12,15 +12,15 @@ PARTITION ON COLUMNS (a) (
|
||||
Affected Rows: 0
|
||||
|
||||
-- SQLNESS REPLACE (\d{13}) ID
|
||||
SELECT table_catalog, table_schema, table_name, partition_name, partition_expression, greptime_partition_id from information_schema.partitions WHERE table_name = 'my_table' ORDER BY table_catalog, table_schema, table_name, partition_name;
|
||||
SELECT table_catalog, table_schema, table_name, partition_name, partition_expression, partition_description, greptime_partition_id from information_schema.partitions WHERE table_name = 'my_table' ORDER BY table_catalog, table_schema, table_name, partition_name;
|
||||
|
||||
+---------------+--------------+------------+----------------+------------------------+-----------------------+
|
||||
| table_catalog | table_schema | table_name | partition_name | partition_expression | greptime_partition_id |
|
||||
+---------------+--------------+------------+----------------+------------------------+-----------------------+
|
||||
| greptime | public | my_table | p0 | a < 1000 | ID |
|
||||
| greptime | public | my_table | p1 | a >= 1000 AND a < 2000 | ID |
|
||||
| greptime | public | my_table | p2 | a >= 2000 | ID |
|
||||
+---------------+--------------+------------+----------------+------------------------+-----------------------+
|
||||
+---------------+--------------+------------+----------------+----------------------+------------------------+-----------------------+
|
||||
| table_catalog | table_schema | table_name | partition_name | partition_expression | partition_description | greptime_partition_id |
|
||||
+---------------+--------------+------------+----------------+----------------------+------------------------+-----------------------+
|
||||
| greptime | public | my_table | p0 | a | a < 1000 | ID |
|
||||
| greptime | public | my_table | p1 | a | a >= 1000 AND a < 2000 | ID |
|
||||
| greptime | public | my_table | p2 | a | a >= 2000 | ID |
|
||||
+---------------+--------------+------------+----------------+----------------------+------------------------+-----------------------+
|
||||
|
||||
-- SQLNESS REPLACE (\d{13}) REGION_ID
|
||||
-- SQLNESS REPLACE (\d{1}) PEER_ID
|
||||
@@ -126,7 +126,7 @@ SELECT table_catalog, table_schema, table_name, partition_name, partition_expres
|
||||
+---------------+--------------+------------+----------------+----------------------+-----------------------+
|
||||
| table_catalog | table_schema | table_name | partition_name | partition_expression | greptime_partition_id |
|
||||
+---------------+--------------+------------+----------------+----------------------+-----------------------+
|
||||
| greptime | public | my_table | p0 | | ID |
|
||||
| greptime | public | my_table | p0 | a | ID |
|
||||
+---------------+--------------+------------+----------------+----------------------+-----------------------+
|
||||
|
||||
-- SQLNESS REPLACE (\d{13}) REGION_ID
|
||||
|
||||
@@ -10,7 +10,7 @@ PARTITION ON COLUMNS (a) (
|
||||
);
|
||||
|
||||
-- SQLNESS REPLACE (\d{13}) ID
|
||||
SELECT table_catalog, table_schema, table_name, partition_name, partition_expression, greptime_partition_id from information_schema.partitions WHERE table_name = 'my_table' ORDER BY table_catalog, table_schema, table_name, partition_name;
|
||||
SELECT table_catalog, table_schema, table_name, partition_name, partition_expression, partition_description, greptime_partition_id from information_schema.partitions WHERE table_name = 'my_table' ORDER BY table_catalog, table_schema, table_name, partition_name;
|
||||
|
||||
-- SQLNESS REPLACE (\d{13}) REGION_ID
|
||||
-- SQLNESS REPLACE (\d{1}) PEER_ID
|
||||
|
||||
@@ -25,7 +25,7 @@ Affected Rows: 0
|
||||
SHOW TABLES;
|
||||
|
||||
+---------------------------------------+
|
||||
| Tables |
|
||||
| Tables_in_information_schema |
|
||||
+---------------------------------------+
|
||||
| build_info |
|
||||
| character_sets |
|
||||
@@ -66,16 +66,16 @@ SHOW TABLES;
|
||||
|
||||
SHOW TABLES LIKE 'tables';
|
||||
|
||||
+--------+
|
||||
| Tables |
|
||||
+--------+
|
||||
| tables |
|
||||
+--------+
|
||||
+------------------------------+
|
||||
| Tables_in_information_schema |
|
||||
+------------------------------+
|
||||
| tables |
|
||||
+------------------------------+
|
||||
|
||||
SHOW FULL TABLES;
|
||||
|
||||
+---------------------------------------+-----------------+
|
||||
| Tables | Table_type |
|
||||
| Tables_in_information_schema | Table_type |
|
||||
+---------------------------------------+-----------------+
|
||||
| build_info | LOCAL TEMPORARY |
|
||||
| character_sets | LOCAL TEMPORARY |
|
||||
|
||||
@@ -959,3 +959,74 @@ use public;
|
||||
|
||||
|
||||
|
||||
-- PostgreSQL description functions - placeholder returning NULL for compatibility
|
||||
-- SQLNESS PROTOCOL POSTGRES
|
||||
SELECT obj_description((SELECT oid FROM pg_class LIMIT 1), 'pg_class') IS NULL AS is_null;
|
||||
|
||||
+---------+
|
||||
| is_null |
|
||||
+---------+
|
||||
| t |
|
||||
+---------+
|
||||
|
||||
-- SQLNESS PROTOCOL POSTGRES
|
||||
SELECT obj_description((SELECT oid FROM pg_class LIMIT 1)) IS NULL AS is_null;
|
||||
|
||||
+---------+
|
||||
| is_null |
|
||||
+---------+
|
||||
| t |
|
||||
+---------+
|
||||
|
||||
-- SQLNESS PROTOCOL POSTGRES
|
||||
SELECT col_description((SELECT oid FROM pg_class LIMIT 1), 1) IS NULL AS is_null;
|
||||
|
||||
+---------+
|
||||
| is_null |
|
||||
+---------+
|
||||
| t |
|
||||
+---------+
|
||||
|
||||
-- SQLNESS PROTOCOL POSTGRES
|
||||
SELECT shobj_description(1, 'pg_database') IS NULL AS is_null;
|
||||
|
||||
+---------+
|
||||
| is_null |
|
||||
+---------+
|
||||
| t |
|
||||
+---------+
|
||||
|
||||
-- pg_my_temp_schema returns 0 (no temp schema support)
|
||||
-- SQLNESS PROTOCOL POSTGRES
|
||||
SELECT pg_my_temp_schema();
|
||||
|
||||
+---------------------+
|
||||
| pg_my_temp_schema() |
|
||||
+---------------------+
|
||||
| 0 |
|
||||
+---------------------+
|
||||
|
||||
-- Issue 7313
|
||||
-- SQLNESS PROTOCOL POSTGRES
|
||||
-- SQLNESS REPLACE (\d+\s*) OID
|
||||
SELECT
|
||||
oid
|
||||
,nspname
|
||||
,nspname = ANY (current_schemas(true)) AS is_on_search_path
|
||||
|
||||
,obj_description(oid, 'pg_namespace') AS comment
|
||||
|
||||
FROM pg_namespace; SELECT
|
||||
oid
|
||||
,nspname
|
||||
FROM pg_namespace
|
||||
WHERE oid = pg_my_temp_schema();
|
||||
|
||||
+-------+--------------------+-------------------+---------+
|
||||
| oid | nspname | is_on_search_path | comment |
|
||||
+-------+--------------------+-------------------+---------+
|
||||
| OID| greptime_private | t | |
|
||||
| OID| information_schema | t | |
|
||||
| OID| public | t | |
|
||||
+-------+--------------------+-------------------+---------+
|
||||
|
||||
|
||||
@@ -166,3 +166,38 @@ drop table my_db.foo;
|
||||
|
||||
-- SQLNESS PROTOCOL POSTGRES
|
||||
use public;
|
||||
|
||||
-- PostgreSQL description functions - placeholder returning NULL for compatibility
|
||||
|
||||
-- SQLNESS PROTOCOL POSTGRES
|
||||
SELECT obj_description((SELECT oid FROM pg_class LIMIT 1), 'pg_class') IS NULL AS is_null;
|
||||
|
||||
-- SQLNESS PROTOCOL POSTGRES
|
||||
SELECT obj_description((SELECT oid FROM pg_class LIMIT 1)) IS NULL AS is_null;
|
||||
|
||||
-- SQLNESS PROTOCOL POSTGRES
|
||||
SELECT col_description((SELECT oid FROM pg_class LIMIT 1), 1) IS NULL AS is_null;
|
||||
|
||||
-- SQLNESS PROTOCOL POSTGRES
|
||||
SELECT shobj_description(1, 'pg_database') IS NULL AS is_null;
|
||||
|
||||
-- pg_my_temp_schema returns 0 (no temp schema support)
|
||||
-- SQLNESS PROTOCOL POSTGRES
|
||||
SELECT pg_my_temp_schema();
|
||||
|
||||
-- Issue 7313
|
||||
-- SQLNESS PROTOCOL POSTGRES
|
||||
-- SQLNESS REPLACE (\d+\s*) OID
|
||||
SELECT
|
||||
oid
|
||||
,nspname
|
||||
,nspname = ANY (current_schemas(true)) AS is_on_search_path
|
||||
|
||||
,obj_description(oid, 'pg_namespace') AS comment
|
||||
|
||||
FROM pg_namespace; SELECT
|
||||
oid
|
||||
,nspname
|
||||
FROM pg_namespace
|
||||
WHERE oid = pg_my_temp_schema();
|
||||
|
||||
|
||||
80
tests/cases/standalone/common/system/set_unsupported.result
Normal file
80
tests/cases/standalone/common/system/set_unsupported.result
Normal file
@@ -0,0 +1,80 @@
|
||||
-- Test unsupported set variables for MySQL protocol
|
||||
-- These should succeed with a warning instead of failing
|
||||
-- Test setting an unsupported variable
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SET autocommit = 1;
|
||||
|
||||
affected_rows: 0
|
||||
|
||||
-- Test setting with @@ prefix (previously this would succeed)
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SET @@autocommit = 1;
|
||||
|
||||
affected_rows: 0
|
||||
|
||||
-- Test setting character_set_client (commonly used by MySQL clients)
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SET character_set_client = 'utf8mb4';
|
||||
|
||||
affected_rows: 0
|
||||
|
||||
-- Test setting character_set_results
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SET character_set_results = 'utf8mb4';
|
||||
|
||||
affected_rows: 0
|
||||
|
||||
-- Test setting sql_mode
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SET sql_mode = 'STRICT_TRANS_TABLES';
|
||||
|
||||
affected_rows: 0
|
||||
|
||||
-- Test multiple unsupported settings
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SET @@session.sql_mode = 'TRADITIONAL';
|
||||
|
||||
affected_rows: 0
|
||||
|
||||
-- Test NAMES (special MySQL syntax for character set)
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SET NAMES utf8mb4;
|
||||
|
||||
affected_rows: 0
|
||||
|
||||
-- Test collation_connection
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SET collation_connection = 'utf8mb4_unicode_ci';
|
||||
|
||||
affected_rows: 0
|
||||
|
||||
-- Test SHOW WARNINGS after setting unsupported variable
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SET some_unsupported_var = 123;
|
||||
|
||||
affected_rows: 0
|
||||
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SHOW WARNINGS;
|
||||
|
||||
+---------+------+-----------------------------------------------+
|
||||
| Level | Code | Message |
|
||||
+---------+------+-----------------------------------------------+
|
||||
| Warning | 1000 | Unsupported set variable SOME_UNSUPPORTED_VAR |
|
||||
+---------+------+-----------------------------------------------+
|
||||
|
||||
-- Test that warning is cleared after next statement
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SELECT 1;
|
||||
|
||||
+----------+
|
||||
| Int64(1) |
|
||||
+----------+
|
||||
| 1 |
|
||||
+----------+
|
||||
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SHOW WARNINGS;
|
||||
|
||||
affected_rows: 0
|
||||
|
||||
48
tests/cases/standalone/common/system/set_unsupported.sql
Normal file
48
tests/cases/standalone/common/system/set_unsupported.sql
Normal file
@@ -0,0 +1,48 @@
|
||||
-- Test unsupported set variables for MySQL protocol
|
||||
-- These should succeed with a warning instead of failing
|
||||
|
||||
-- Test setting an unsupported variable
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SET autocommit = 1;
|
||||
|
||||
-- Test setting with @@ prefix (previously this would succeed)
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SET @@autocommit = 1;
|
||||
|
||||
-- Test setting character_set_client (commonly used by MySQL clients)
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SET character_set_client = 'utf8mb4';
|
||||
|
||||
-- Test setting character_set_results
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SET character_set_results = 'utf8mb4';
|
||||
|
||||
-- Test setting sql_mode
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SET sql_mode = 'STRICT_TRANS_TABLES';
|
||||
|
||||
-- Test multiple unsupported settings
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SET @@session.sql_mode = 'TRADITIONAL';
|
||||
|
||||
-- Test NAMES (special MySQL syntax for character set)
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SET NAMES utf8mb4;
|
||||
|
||||
-- Test collation_connection
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SET collation_connection = 'utf8mb4_unicode_ci';
|
||||
|
||||
-- Test SHOW WARNINGS after setting unsupported variable
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SET some_unsupported_var = 123;
|
||||
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SHOW WARNINGS;
|
||||
|
||||
-- Test that warning is cleared after next statement
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SELECT 1;
|
||||
|
||||
-- SQLNESS PROTOCOL MYSQL
|
||||
SHOW WARNINGS;
|
||||
@@ -0,0 +1,204 @@
|
||||
-- Test file for StarRocks External Catalog MySQL Compatibility
|
||||
-- This test simulates the exact queries StarRocks JDBC connector sends
|
||||
-- Reference: MysqlSchemaResolver.java in StarRocks
|
||||
-- Setup: Create test table with partitions
|
||||
CREATE TABLE test_partitions (
|
||||
ts TIMESTAMP TIME INDEX,
|
||||
host STRING PRIMARY KEY,
|
||||
val DOUBLE
|
||||
) PARTITION ON COLUMNS (host) ();
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO test_partitions VALUES
|
||||
('2024-01-01 00:00:00', 'host1', 1.0),
|
||||
('2024-01-01 00:00:00', 'host2', 2.0);
|
||||
|
||||
Affected Rows: 2
|
||||
|
||||
-- ============================================
|
||||
-- Section 1: JDBC DatabaseMetaData API queries
|
||||
-- ============================================
|
||||
-- getCatalogs() -> SHOW DATABASES
|
||||
SHOW DATABASES;
|
||||
|
||||
+--------------------+
|
||||
| Database |
|
||||
+--------------------+
|
||||
| greptime_private |
|
||||
| information_schema |
|
||||
| public |
|
||||
+--------------------+
|
||||
|
||||
-- getTables(db, null, null, types) with backtick quoting
|
||||
SHOW FULL TABLES FROM `public` LIKE '%';
|
||||
|
||||
+------------------+-----------------+
|
||||
| Tables_in_public | Table_type |
|
||||
+------------------+-----------------+
|
||||
| numbers | LOCAL TEMPORARY |
|
||||
| test_partitions | BASE TABLE |
|
||||
+------------------+-----------------+
|
||||
|
||||
-- getColumns(db, null, tbl, "%") with backtick quoting
|
||||
SHOW FULL COLUMNS FROM `test_partitions` FROM `public` LIKE '%';
|
||||
|
||||
+-------+--------------+-----------+------+------------+---------+---------+---------------+-------+----------------------+
|
||||
| Field | Type | Collation | Null | Key | Default | Comment | Privileges | Extra | Greptime_type |
|
||||
+-------+--------------+-----------+------+------------+---------+---------+---------------+-------+----------------------+
|
||||
| host | string | utf8_bin | Yes | PRI | | | select,insert | | String |
|
||||
| ts | timestamp(3) | | No | TIME INDEX | | | select,insert | | TimestampMillisecond |
|
||||
| val | double | | Yes | | | | select,insert | | Float64 |
|
||||
+-------+--------------+-----------+------+------------+---------+---------+---------------+-------+----------------------+
|
||||
|
||||
-- ============================================
|
||||
-- Section 2: INFORMATION_SCHEMA queries
|
||||
-- ============================================
|
||||
-- Schema listing (alternative to SHOW DATABASES)
|
||||
SELECT catalog_name, schema_name FROM INFORMATION_SCHEMA.SCHEMATA
|
||||
WHERE schema_name NOT IN ('information_schema', 'pg_catalog')
|
||||
ORDER BY schema_name;
|
||||
|
||||
+--------------+------------------+
|
||||
| catalog_name | schema_name |
|
||||
+--------------+------------------+
|
||||
| greptime | greptime_private |
|
||||
| greptime | public |
|
||||
+--------------+------------------+
|
||||
|
||||
-- Tables listing
|
||||
SELECT table_catalog, table_schema, table_name, table_type
|
||||
FROM INFORMATION_SCHEMA.TABLES
|
||||
WHERE TABLE_SCHEMA = 'public' AND TABLE_NAME = 'test_partitions';
|
||||
|
||||
+---------------+--------------+-----------------+------------+
|
||||
| table_catalog | table_schema | table_name | table_type |
|
||||
+---------------+--------------+-----------------+------------+
|
||||
| greptime | public | test_partitions | BASE TABLE |
|
||||
+---------------+--------------+-----------------+------------+
|
||||
|
||||
-- Columns listing
|
||||
SELECT table_schema, table_name, column_name, data_type, is_nullable
|
||||
FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE TABLE_SCHEMA = 'public' AND TABLE_NAME = 'test_partitions'
|
||||
ORDER BY ordinal_position;
|
||||
|
||||
+--------------+-----------------+-------------+--------------+-------------+
|
||||
| table_schema | table_name | column_name | data_type | is_nullable |
|
||||
+--------------+-----------------+-------------+--------------+-------------+
|
||||
| public | test_partitions | ts | timestamp(3) | No |
|
||||
| public | test_partitions | host | string | Yes |
|
||||
| public | test_partitions | val | double | Yes |
|
||||
+--------------+-----------------+-------------+--------------+-------------+
|
||||
|
||||
-- ============================================
|
||||
-- Section 3: StarRocks Partition Queries
|
||||
-- These are the specific queries StarRocks sends for partition metadata
|
||||
-- ============================================
|
||||
-- List partition names (what StarRocks uses for partition identification)
|
||||
SELECT PARTITION_DESCRIPTION as NAME
|
||||
FROM INFORMATION_SCHEMA.PARTITIONS
|
||||
WHERE TABLE_SCHEMA = 'public' AND TABLE_NAME = 'test_partitions'
|
||||
AND PARTITION_NAME IS NOT NULL
|
||||
AND (PARTITION_METHOD = 'RANGE' or PARTITION_METHOD = 'RANGE COLUMNS')
|
||||
ORDER BY PARTITION_DESCRIPTION;
|
||||
|
||||
+------+
|
||||
| name |
|
||||
+------+
|
||||
| |
|
||||
+------+
|
||||
|
||||
-- Get partition columns (StarRocks uses this to identify partition key)
|
||||
SELECT DISTINCT PARTITION_EXPRESSION
|
||||
FROM INFORMATION_SCHEMA.PARTITIONS
|
||||
WHERE TABLE_SCHEMA = 'public' AND TABLE_NAME = 'test_partitions'
|
||||
AND PARTITION_NAME IS NOT NULL
|
||||
AND (PARTITION_METHOD = 'RANGE' or PARTITION_METHOD = 'RANGE COLUMNS')
|
||||
AND PARTITION_EXPRESSION IS NOT NULL;
|
||||
|
||||
+----------------------+
|
||||
| partition_expression |
|
||||
+----------------------+
|
||||
| host |
|
||||
+----------------------+
|
||||
|
||||
-- Get partitions with modification time (uses IF() function for NULL handling)
|
||||
-- StarRocks uses this for cache invalidation
|
||||
-- SQLNESS REPLACE (\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}) DATETIME
|
||||
SELECT PARTITION_NAME,
|
||||
IF(UPDATE_TIME IS NULL, CREATE_TIME, UPDATE_TIME) AS MODIFIED_TIME
|
||||
FROM INFORMATION_SCHEMA.PARTITIONS
|
||||
WHERE TABLE_SCHEMA = 'public' AND TABLE_NAME = 'test_partitions'
|
||||
AND PARTITION_NAME IS NOT NULL
|
||||
ORDER BY PARTITION_NAME;
|
||||
|
||||
+----------------+---------------------+
|
||||
| partition_name | modified_time |
|
||||
+----------------+---------------------+
|
||||
| p0 | DATETIME |
|
||||
+----------------+---------------------+
|
||||
|
||||
-- Get table modification time (for non-partitioned tables, StarRocks uses this)
|
||||
-- SQLNESS REPLACE (\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}) DATETIME
|
||||
SELECT TABLE_NAME AS NAME,
|
||||
IF(UPDATE_TIME IS NULL, CREATE_TIME, UPDATE_TIME) AS MODIFIED_TIME
|
||||
FROM INFORMATION_SCHEMA.PARTITIONS
|
||||
WHERE TABLE_SCHEMA = 'public' AND TABLE_NAME = 'test_partitions';
|
||||
|
||||
+-----------------+---------------------+
|
||||
| name | modified_time |
|
||||
+-----------------+---------------------+
|
||||
| test_partitions | DATETIME |
|
||||
+-----------------+---------------------+
|
||||
|
||||
-- ============================================
|
||||
-- Section 4: Raw PARTITIONS data inspection
|
||||
-- Verify GreptimeDB returns appropriate partition metadata
|
||||
-- ============================================
|
||||
-- Show what GreptimeDB returns for PARTITIONS
|
||||
-- SQLNESS REPLACE (\d{13,}) REGION_ID
|
||||
SELECT table_schema, table_name, partition_name, partition_method,
|
||||
partition_expression, partition_description, greptime_partition_id
|
||||
FROM INFORMATION_SCHEMA.PARTITIONS
|
||||
WHERE TABLE_SCHEMA = 'public' AND TABLE_NAME = 'test_partitions';
|
||||
|
||||
+--------------+-----------------+----------------+------------------+----------------------+-----------------------+-----------------------+
|
||||
| table_schema | table_name | partition_name | partition_method | partition_expression | partition_description | greptime_partition_id |
|
||||
+--------------+-----------------+----------------+------------------+----------------------+-----------------------+-----------------------+
|
||||
| public | test_partitions | p0 | RANGE | host | | REGION_ID |
|
||||
+--------------+-----------------+----------------+------------------+----------------------+-----------------------+-----------------------+
|
||||
|
||||
-- ============================================
|
||||
-- Section 5: IF() function tests with timestamps
|
||||
-- StarRocks heavily uses IF() for NULL timestamp handling
|
||||
-- ============================================
|
||||
SELECT IF(1, 'yes', 'no') as result;
|
||||
|
||||
+--------+
|
||||
| result |
|
||||
+--------+
|
||||
| yes |
|
||||
+--------+
|
||||
|
||||
SELECT IF(0, 'yes', 'no') as result;
|
||||
|
||||
+--------+
|
||||
| result |
|
||||
+--------+
|
||||
| no |
|
||||
+--------+
|
||||
|
||||
SELECT IF(NULL, 'yes', 'no') as result;
|
||||
|
||||
+--------+
|
||||
| result |
|
||||
+--------+
|
||||
| no |
|
||||
+--------+
|
||||
|
||||
-- Cleanup
|
||||
DROP TABLE test_partitions;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
111
tests/cases/standalone/common/system/starrocks_compatibility.sql
Normal file
111
tests/cases/standalone/common/system/starrocks_compatibility.sql
Normal file
@@ -0,0 +1,111 @@
|
||||
-- Test file for StarRocks External Catalog MySQL Compatibility
|
||||
-- This test simulates the exact queries StarRocks JDBC connector sends
|
||||
-- Reference: MysqlSchemaResolver.java in StarRocks
|
||||
|
||||
-- Setup: Create test table with partitions
|
||||
CREATE TABLE test_partitions (
|
||||
ts TIMESTAMP TIME INDEX,
|
||||
host STRING PRIMARY KEY,
|
||||
val DOUBLE
|
||||
) PARTITION ON COLUMNS (host) ();
|
||||
|
||||
INSERT INTO test_partitions VALUES
|
||||
('2024-01-01 00:00:00', 'host1', 1.0),
|
||||
('2024-01-01 00:00:00', 'host2', 2.0);
|
||||
|
||||
-- ============================================
|
||||
-- Section 1: JDBC DatabaseMetaData API queries
|
||||
-- ============================================
|
||||
|
||||
-- getCatalogs() -> SHOW DATABASES
|
||||
SHOW DATABASES;
|
||||
|
||||
-- getTables(db, null, null, types) with backtick quoting
|
||||
SHOW FULL TABLES FROM `public` LIKE '%';
|
||||
|
||||
-- getColumns(db, null, tbl, "%") with backtick quoting
|
||||
SHOW FULL COLUMNS FROM `test_partitions` FROM `public` LIKE '%';
|
||||
|
||||
-- ============================================
|
||||
-- Section 2: INFORMATION_SCHEMA queries
|
||||
-- ============================================
|
||||
|
||||
-- Schema listing (alternative to SHOW DATABASES)
|
||||
SELECT catalog_name, schema_name FROM INFORMATION_SCHEMA.SCHEMATA
|
||||
WHERE schema_name NOT IN ('information_schema', 'pg_catalog')
|
||||
ORDER BY schema_name;
|
||||
|
||||
-- Tables listing
|
||||
SELECT table_catalog, table_schema, table_name, table_type
|
||||
FROM INFORMATION_SCHEMA.TABLES
|
||||
WHERE TABLE_SCHEMA = 'public' AND TABLE_NAME = 'test_partitions';
|
||||
|
||||
-- Columns listing
|
||||
SELECT table_schema, table_name, column_name, data_type, is_nullable
|
||||
FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE TABLE_SCHEMA = 'public' AND TABLE_NAME = 'test_partitions'
|
||||
ORDER BY ordinal_position;
|
||||
|
||||
-- ============================================
|
||||
-- Section 3: StarRocks Partition Queries
|
||||
-- These are the specific queries StarRocks sends for partition metadata
|
||||
-- ============================================
|
||||
|
||||
-- List partition names (what StarRocks uses for partition identification)
|
||||
SELECT PARTITION_DESCRIPTION as NAME
|
||||
FROM INFORMATION_SCHEMA.PARTITIONS
|
||||
WHERE TABLE_SCHEMA = 'public' AND TABLE_NAME = 'test_partitions'
|
||||
AND PARTITION_NAME IS NOT NULL
|
||||
AND (PARTITION_METHOD = 'RANGE' or PARTITION_METHOD = 'RANGE COLUMNS')
|
||||
ORDER BY PARTITION_DESCRIPTION;
|
||||
|
||||
-- Get partition columns (StarRocks uses this to identify partition key)
|
||||
SELECT DISTINCT PARTITION_EXPRESSION
|
||||
FROM INFORMATION_SCHEMA.PARTITIONS
|
||||
WHERE TABLE_SCHEMA = 'public' AND TABLE_NAME = 'test_partitions'
|
||||
AND PARTITION_NAME IS NOT NULL
|
||||
AND (PARTITION_METHOD = 'RANGE' or PARTITION_METHOD = 'RANGE COLUMNS')
|
||||
AND PARTITION_EXPRESSION IS NOT NULL;
|
||||
|
||||
-- Get partitions with modification time (uses IF() function for NULL handling)
|
||||
-- StarRocks uses this for cache invalidation
|
||||
-- SQLNESS REPLACE (\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}) DATETIME
|
||||
SELECT PARTITION_NAME,
|
||||
IF(UPDATE_TIME IS NULL, CREATE_TIME, UPDATE_TIME) AS MODIFIED_TIME
|
||||
FROM INFORMATION_SCHEMA.PARTITIONS
|
||||
WHERE TABLE_SCHEMA = 'public' AND TABLE_NAME = 'test_partitions'
|
||||
AND PARTITION_NAME IS NOT NULL
|
||||
ORDER BY PARTITION_NAME;
|
||||
|
||||
-- Get table modification time (for non-partitioned tables, StarRocks uses this)
|
||||
-- SQLNESS REPLACE (\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}) DATETIME
|
||||
SELECT TABLE_NAME AS NAME,
|
||||
IF(UPDATE_TIME IS NULL, CREATE_TIME, UPDATE_TIME) AS MODIFIED_TIME
|
||||
FROM INFORMATION_SCHEMA.PARTITIONS
|
||||
WHERE TABLE_SCHEMA = 'public' AND TABLE_NAME = 'test_partitions';
|
||||
|
||||
-- ============================================
|
||||
-- Section 4: Raw PARTITIONS data inspection
|
||||
-- Verify GreptimeDB returns appropriate partition metadata
|
||||
-- ============================================
|
||||
|
||||
-- Show what GreptimeDB returns for PARTITIONS
|
||||
-- SQLNESS REPLACE (\d{13,}) REGION_ID
|
||||
SELECT table_schema, table_name, partition_name, partition_method,
|
||||
partition_expression, partition_description, greptime_partition_id
|
||||
FROM INFORMATION_SCHEMA.PARTITIONS
|
||||
WHERE TABLE_SCHEMA = 'public' AND TABLE_NAME = 'test_partitions';
|
||||
|
||||
-- ============================================
|
||||
-- Section 5: IF() function tests with timestamps
|
||||
-- StarRocks heavily uses IF() for NULL timestamp handling
|
||||
-- ============================================
|
||||
|
||||
SELECT IF(1, 'yes', 'no') as result;
|
||||
|
||||
SELECT IF(0, 'yes', 'no') as result;
|
||||
|
||||
SELECT IF(NULL, 'yes', 'no') as result;
|
||||
|
||||
-- Cleanup
|
||||
DROP TABLE test_partitions;
|
||||
@@ -45,23 +45,23 @@ Affected Rows: 0
|
||||
|
||||
SHOW TABLES;
|
||||
|
||||
+------------+
|
||||
| Tables |
|
||||
+------------+
|
||||
| numbers |
|
||||
| test_table |
|
||||
| test_view |
|
||||
+------------+
|
||||
+------------------+
|
||||
| Tables_in_public |
|
||||
+------------------+
|
||||
| numbers |
|
||||
| test_table |
|
||||
| test_view |
|
||||
+------------------+
|
||||
|
||||
SHOW FULL TABLES;
|
||||
|
||||
+------------+-----------------+
|
||||
| Tables | Table_type |
|
||||
+------------+-----------------+
|
||||
| numbers | LOCAL TEMPORARY |
|
||||
| test_table | BASE TABLE |
|
||||
| test_view | VIEW |
|
||||
+------------+-----------------+
|
||||
+------------------+-----------------+
|
||||
| Tables_in_public | Table_type |
|
||||
+------------------+-----------------+
|
||||
| numbers | LOCAL TEMPORARY |
|
||||
| test_table | BASE TABLE |
|
||||
| test_view | VIEW |
|
||||
+------------------+-----------------+
|
||||
|
||||
-- psql: \dv
|
||||
SELECT n.nspname as "Schema",
|
||||
@@ -182,11 +182,11 @@ Error: 4001(TableNotFound), Failed to plan SQL: Table not found: greptime.public
|
||||
|
||||
SHOW TABLES;
|
||||
|
||||
+---------+
|
||||
| Tables |
|
||||
+---------+
|
||||
| numbers |
|
||||
+---------+
|
||||
+------------------+
|
||||
| Tables_in_public |
|
||||
+------------------+
|
||||
| numbers |
|
||||
+------------------+
|
||||
|
||||
-- psql: \dv
|
||||
SELECT n.nspname as "Schema",
|
||||
|
||||
@@ -84,11 +84,11 @@ Affected Rows: 0
|
||||
|
||||
SHOW TABLES;
|
||||
|
||||
+---------+
|
||||
| Tables |
|
||||
+---------+
|
||||
| numbers |
|
||||
+---------+
|
||||
+------------------+
|
||||
| Tables_in_public |
|
||||
+------------------+
|
||||
| numbers |
|
||||
+------------------+
|
||||
|
||||
SHOW VIEWS;
|
||||
|
||||
|
||||
@@ -17,11 +17,11 @@ Affected Rows: 0
|
||||
-- SQLNESS ARG restart=true
|
||||
show tables;
|
||||
|
||||
+---------+
|
||||
| Tables |
|
||||
+---------+
|
||||
| numbers |
|
||||
+---------+
|
||||
+------------------+
|
||||
| Tables_in_public |
|
||||
+------------------+
|
||||
| numbers |
|
||||
+------------------+
|
||||
|
||||
create table t3 (c timestamp time index);
|
||||
|
||||
|
||||
@@ -113,7 +113,7 @@ Error: 3001(EngineExecuteQuery), Invalid query: TQL query only supports one f64
|
||||
SHOW TABLES;
|
||||
|
||||
+------------------------+
|
||||
| Tables |
|
||||
| Tables_in_public |
|
||||
+------------------------+
|
||||
| http_requests_two_vals |
|
||||
| numbers |
|
||||
|
||||
@@ -26,14 +26,14 @@ Affected Rows: 1
|
||||
-- SQLNESS ARG version=latest
|
||||
SHOW TABLES;
|
||||
|
||||
+---------------+
|
||||
| Tables |
|
||||
+---------------+
|
||||
| numbers |
|
||||
| test_ttl_0s |
|
||||
| test_ttl_1s |
|
||||
| test_ttl_none |
|
||||
+---------------+
|
||||
+------------------+
|
||||
| Tables_in_public |
|
||||
+------------------+
|
||||
| numbers |
|
||||
| test_ttl_0s |
|
||||
| test_ttl_1s |
|
||||
| test_ttl_none |
|
||||
+------------------+
|
||||
|
||||
SHOW CREATE TABLE test_ttl_1s;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user