Compare commits

..

24 Commits

Author SHA1 Message Date
shuiyisong
59ddfa84ec fix: check and clippy
Signed-off-by: shuiyisong <xixing.sys@gmail.com>
2025-11-26 18:35:21 +08:00
evenyag
dd043eadc4 feat: add file_scan_cost
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
7e6af2c7ee feat: collect the whole fetch time
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
87d3b17f4d feat: update parquet fetch metrics
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
5acac3d403 chore: fix compiler errors
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
f9c66ba0de feat: implement debug for new metrics
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
37847a8df6 feat: debug print metrics in ScanMetricsSet
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
6e06ac9e5c feat: init verbose metrics
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
09effc8128 feat: add fetch metrics to ReaderMetrics
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
c14728e3ae feat: collect more metrics for memory row group
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
cce4d56e00 feat: add apply metrics
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
69cf13b33a feat: add parquet metadata metrics
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
c83a282b39 feat: collect parquet row group metrics
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
5329efcdba feat: collect fulltext dir metrics for applier
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
50b5c90d53 feat: collect read metrics in appliers
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
fea2966dec feat: collect cache metrics for inverted and bloom index
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
e00452c4db feat: collect metadata fetch metrics for inverted index
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
7a31b2a8ea refactor: rename elapsed to fetch_elapsed
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
f363d73f72 feat: add metrics for range_read and metadata
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
7a6befcad3 feat: collect read metrics for inverted index
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
d6c75ec55f feat: implement BloomFilterReadMetrics for BloomFilterReader
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
5b8f1d819f feat: add metrics to fulltext index applier
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
b68286e8af feat: add metrics to bloom applier
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
evenyag
4519607bc6 feat: add inverted applier metrics
Signed-off-by: evenyag <realevenyag@gmail.com>
2025-11-26 18:20:06 +08:00
361 changed files with 4321 additions and 20775 deletions

View File

@@ -49,9 +49,14 @@ on:
description: Do not run integration tests during the build
type: boolean
default: true
build_linux_artifacts:
build_linux_amd64_artifacts:
type: boolean
description: Build linux artifacts (both amd64 and arm64)
description: Build linux-amd64 artifacts
required: false
default: false
build_linux_arm64_artifacts:
type: boolean
description: Build linux-arm64 artifacts
required: false
default: false
build_macos_artifacts:
@@ -139,7 +144,7 @@ jobs:
./.github/scripts/check-version.sh "${{ steps.create-version.outputs.version }}"
- name: Allocate linux-amd64 runner
if: ${{ inputs.build_linux_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
uses: ./.github/actions/start-runner
id: start-linux-amd64-runner
with:
@@ -153,7 +158,7 @@ jobs:
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
- name: Allocate linux-arm64 runner
if: ${{ inputs.build_linux_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
uses: ./.github/actions/start-runner
id: start-linux-arm64-runner
with:
@@ -168,7 +173,7 @@ jobs:
build-linux-amd64-artifacts:
name: Build linux-amd64 artifacts
if: ${{ inputs.build_linux_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [
allocate-runners,
]
@@ -190,7 +195,7 @@ jobs:
build-linux-arm64-artifacts:
name: Build linux-arm64 artifacts
if: ${{ inputs.build_linux_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [
allocate-runners,
]
@@ -212,7 +217,7 @@ jobs:
run-multi-lang-tests:
name: Run Multi-language SDK Tests
if: ${{ inputs.build_linux_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [
allocate-runners,
build-linux-amd64-artifacts,
@@ -381,18 +386,7 @@ jobs:
publish-github-release:
name: Create GitHub release and upload artifacts
# Use always() to run even when optional jobs (macos, windows) are skipped.
# Then check that required jobs succeeded and optional jobs didn't fail.
if: |
always() &&
(inputs.publish_github_release || github.event_name == 'push' || github.event_name == 'schedule') &&
needs.allocate-runners.result == 'success' &&
(needs.build-linux-amd64-artifacts.result == 'success' || needs.build-linux-amd64-artifacts.result == 'skipped') &&
(needs.build-linux-arm64-artifacts.result == 'success' || needs.build-linux-arm64-artifacts.result == 'skipped') &&
(needs.build-macos-artifacts.result == 'success' || needs.build-macos-artifacts.result == 'skipped') &&
(needs.build-windows-artifacts.result == 'success' || needs.build-windows-artifacts.result == 'skipped') &&
(needs.release-images-to-dockerhub.result == 'success' || needs.release-images-to-dockerhub.result == 'skipped') &&
(needs.run-multi-lang-tests.result == 'success' || needs.run-multi-lang-tests.result == 'skipped')
if: ${{ inputs.publish_github_release || github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [ # The job have to wait for all the artifacts are built.
allocate-runners,
build-linux-amd64-artifacts,

View File

@@ -2,41 +2,41 @@
## Individual Committers (in alphabetical order)
- [apdong2022](https://github.com/apdong2022)
- [beryl678](https://github.com/beryl678)
- [CookiePieWw](https://github.com/CookiePieWw)
- [etolbakov](https://github.com/etolbakov)
- [irenjj](https://github.com/irenjj)
- [KKould](https://github.com/KKould)
- [Lanqing Yang](https://github.com/lyang24)
- [nicecui](https://github.com/nicecui)
- [NiwakaDev](https://github.com/NiwakaDev)
- [paomian](https://github.com/paomian)
- [tisonkun](https://github.com/tisonkun)
- [Wenjie0329](https://github.com/Wenjie0329)
- [zhaoyingnan01](https://github.com/zhaoyingnan01)
- [zhongzc](https://github.com/zhongzc)
- [ZonaHex](https://github.com/ZonaHex)
- [zyy17](https://github.com/zyy17)
* [CookiePieWw](https://github.com/CookiePieWw)
* [etolbakov](https://github.com/etolbakov)
* [irenjj](https://github.com/irenjj)
* [KKould](https://github.com/KKould)
* [Lanqing Yang](https://github.com/lyang24)
* [NiwakaDev](https://github.com/NiwakaDev)
* [tisonkun](https://github.com/tisonkun)
## Team Members (in alphabetical order)
- [daviderli614](https://github.com/daviderli614)
- [discord9](https://github.com/discord9)
- [evenyag](https://github.com/evenyag)
- [fengjiachun](https://github.com/fengjiachun)
- [fengys1996](https://github.com/fengys1996)
- [GrepTime](https://github.com/GrepTime)
- [holalengyu](https://github.com/holalengyu)
- [killme2008](https://github.com/killme2008)
- [MichaelScofield](https://github.com/MichaelScofield)
- [shuiyisong](https://github.com/shuiyisong)
- [sunchanglong](https://github.com/sunchanglong)
- [sunng87](https://github.com/sunng87)
- [v0y4g3r](https://github.com/v0y4g3r)
- [waynexia](https://github.com/waynexia)
- [WenyXu](https://github.com/WenyXu)
- [xtang](https://github.com/xtang)
* [apdong2022](https://github.com/apdong2022)
* [beryl678](https://github.com/beryl678)
* [daviderli614](https://github.com/daviderli614)
* [discord9](https://github.com/discord9)
* [evenyag](https://github.com/evenyag)
* [fengjiachun](https://github.com/fengjiachun)
* [fengys1996](https://github.com/fengys1996)
* [GrepTime](https://github.com/GrepTime)
* [holalengyu](https://github.com/holalengyu)
* [killme2008](https://github.com/killme2008)
* [MichaelScofield](https://github.com/MichaelScofield)
* [nicecui](https://github.com/nicecui)
* [paomian](https://github.com/paomian)
* [shuiyisong](https://github.com/shuiyisong)
* [sunchanglong](https://github.com/sunchanglong)
* [sunng87](https://github.com/sunng87)
* [v0y4g3r](https://github.com/v0y4g3r)
* [waynexia](https://github.com/waynexia)
* [Wenjie0329](https://github.com/Wenjie0329)
* [WenyXu](https://github.com/WenyXu)
* [xtang](https://github.com/xtang)
* [zhaoyingnan01](https://github.com/zhaoyingnan01)
* [zhongzc](https://github.com/zhongzc)
* [ZonaHex](https://github.com/ZonaHex)
* [zyy17](https://github.com/zyy17)
## All Contributors

47
Cargo.lock generated
View File

@@ -738,12 +738,12 @@ dependencies = [
"api",
"async-trait",
"common-base",
"common-config",
"common-error",
"common-macro",
"common-telemetry",
"common-test-util",
"digest",
"notify",
"sha1",
"snafu 0.8.6",
"sql",
@@ -2055,7 +2055,6 @@ dependencies = [
"datanode",
"humantime-serde",
"meta-client",
"notify",
"object-store",
"serde",
"serde_json",
@@ -2254,7 +2253,6 @@ dependencies = [
"arrow-flight",
"bytes",
"common-base",
"common-config",
"common-error",
"common-macro",
"common-recordbatch",
@@ -2268,6 +2266,7 @@ dependencies = [
"hyper 1.6.0",
"hyper-util",
"lazy_static",
"notify",
"prost 0.13.5",
"rand 0.9.1",
"serde",
@@ -2846,15 +2845,6 @@ dependencies = [
"unicode-segmentation",
]
[[package]]
name = "convert_case"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "633458d4ef8c78b72454de2d54fd6ab2e60f9e02be22f3c6104cdc8a4e0fceb9"
dependencies = [
"unicode-segmentation",
]
[[package]]
name = "core-foundation"
version = "0.9.4"
@@ -3751,9 +3741,9 @@ dependencies = [
[[package]]
name = "datafusion-pg-catalog"
version = "0.12.3"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09bfd1feed7ed335227af0b65955ed825e467cf67fad6ecd089123202024cfd1"
checksum = "15824c98ff2009c23b0398d441499b147f7c5ac0e5ee993e7a473d79040e3626"
dependencies = [
"async-trait",
"datafusion",
@@ -4194,23 +4184,21 @@ dependencies = [
[[package]]
name = "derive_more"
version = "2.1.0"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "10b768e943bed7bf2cab53df09f4bc34bfd217cdb57d971e769874c9a6710618"
checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05"
dependencies = [
"derive_more-impl",
]
[[package]]
name = "derive_more-impl"
version = "2.1.0"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d286bfdaf75e988b4a78e013ecd79c581e06399ab53fbacd2d916c2f904f30b"
checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22"
dependencies = [
"convert_case 0.10.0",
"proc-macro2",
"quote",
"rustc_version",
"syn 2.0.106",
"unicode-xid",
]
@@ -4927,7 +4915,6 @@ dependencies = [
"async-stream",
"async-trait",
"auth",
"axum 0.8.4",
"bytes",
"cache",
"catalog",
@@ -4962,11 +4949,9 @@ dependencies = [
"hostname 0.4.1",
"humantime",
"humantime-serde",
"hyper-util",
"lazy_static",
"log-query",
"meta-client",
"meta-srv",
"num_cpus",
"opentelemetry-proto",
"operator",
@@ -4978,7 +4963,6 @@ dependencies = [
"prost 0.13.5",
"query",
"rand 0.9.1",
"reqwest",
"serde",
"serde_json",
"servers",
@@ -5367,7 +5351,7 @@ dependencies = [
[[package]]
name = "greptime-proto"
version = "0.1.0"
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=0423fa30203187c75e2937a668df1da699c8b96c#0423fa30203187c75e2937a668df1da699c8b96c"
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=0df99f09f1d6785055b2d9da96fc4ecc2bdf6803#0df99f09f1d6785055b2d9da96fc4ecc2bdf6803"
dependencies = [
"prost 0.13.5",
"prost-types 0.13.5",
@@ -7461,7 +7445,6 @@ dependencies = [
"once_cell",
"ordered-float 4.6.0",
"parking_lot 0.12.4",
"partition",
"prometheus",
"prost 0.13.5",
"rand 0.9.1",
@@ -7530,11 +7513,9 @@ dependencies = [
"common-test-util",
"common-time",
"common-wal",
"criterion 0.4.0",
"datafusion",
"datatypes",
"futures-util",
"fxhash",
"humantime-serde",
"itertools 0.14.0",
"lazy_static",
@@ -8380,7 +8361,6 @@ dependencies = [
"common-macro",
"common-telemetry",
"common-test-util",
"derive_builder 0.20.2",
"futures",
"humantime-serde",
"lazy_static",
@@ -9219,9 +9199,9 @@ dependencies = [
[[package]]
name = "pgwire"
version = "0.36.3"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "70a2bcdcc4b20a88e0648778ecf00415bbd5b447742275439c22176835056f99"
checksum = "d331bb0eef5bc83a221c0a85b1f205bccf094d4f72a26ae1d68a1b1c535123b7"
dependencies = [
"async-trait",
"base64 0.22.1",
@@ -9519,7 +9499,6 @@ name = "plugins"
version = "1.0.0-beta.2"
dependencies = [
"auth",
"catalog",
"clap 4.5.40",
"cli",
"common-base",
@@ -9528,7 +9507,6 @@ dependencies = [
"datanode",
"flow",
"frontend",
"meta-client",
"meta-srv",
"serde",
"snafu 0.8.6",
@@ -10853,7 +10831,7 @@ dependencies = [
[[package]]
name = "rskafka"
version = "0.6.0"
source = "git+https://github.com/GreptimeTeam/rskafka.git?rev=f5688f83e7da591cda3f2674c2408b4c0ed4ed50#f5688f83e7da591cda3f2674c2408b4c0ed4ed50"
source = "git+https://github.com/WenyXu/rskafka.git?rev=7b0f31ed39db049b4ee2e5f1e95b5a30be9baf76#7b0f31ed39db049b4ee2e5f1e95b5a30be9baf76"
dependencies = [
"bytes",
"chrono",
@@ -13085,7 +13063,6 @@ dependencies = [
"loki-proto",
"meta-client",
"meta-srv",
"mito2",
"moka",
"mysql_async",
"object-store",

View File

@@ -131,7 +131,7 @@ datafusion-functions = "50"
datafusion-functions-aggregate-common = "50"
datafusion-optimizer = "50"
datafusion-orc = "0.5"
datafusion-pg-catalog = "0.12.3"
datafusion-pg-catalog = "0.12.1"
datafusion-physical-expr = "50"
datafusion-physical-plan = "50"
datafusion-sql = "50"
@@ -139,7 +139,6 @@ datafusion-substrait = "50"
deadpool = "0.12"
deadpool-postgres = "0.14"
derive_builder = "0.20"
derive_more = { version = "2.1", features = ["full"] }
dotenv = "0.15"
either = "1.15"
etcd-client = { git = "https://github.com/GreptimeTeam/etcd-client", rev = "f62df834f0cffda355eba96691fe1a9a332b75a7", features = [
@@ -149,7 +148,7 @@ etcd-client = { git = "https://github.com/GreptimeTeam/etcd-client", rev = "f62d
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "0423fa30203187c75e2937a668df1da699c8b96c" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "0df99f09f1d6785055b2d9da96fc4ecc2bdf6803" }
hex = "0.4"
http = "1"
humantime = "2.1"
@@ -201,8 +200,7 @@ reqwest = { version = "0.12", default-features = false, features = [
"stream",
"multipart",
] }
# Branch: feat/request-timeout
rskafka = { git = "https://github.com/GreptimeTeam/rskafka.git", rev = "f5688f83e7da591cda3f2674c2408b4c0ed4ed50", features = [
rskafka = { git = "https://github.com/WenyXu/rskafka.git", rev = "7b0f31ed39db049b4ee2e5f1e95b5a30be9baf76", features = [
"transport-tls",
] }
rstest = "0.25"

View File

@@ -294,6 +294,7 @@
| `meta_client` | -- | -- | The metasrv client options. |
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
| `meta_client.timeout` | String | `3s` | Operation timeout. |
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
@@ -456,6 +457,7 @@
| `meta_client` | -- | -- | The metasrv client options. |
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
| `meta_client.timeout` | String | `3s` | Operation timeout. |
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
@@ -627,6 +629,7 @@
| `meta_client` | -- | -- | The metasrv client options. |
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
| `meta_client.timeout` | String | `3s` | Operation timeout. |
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |

View File

@@ -99,6 +99,9 @@ metasrv_addrs = ["127.0.0.1:3002"]
## Operation timeout.
timeout = "3s"
## Heartbeat timeout.
heartbeat_timeout = "500ms"
## DDL timeout.
ddl_timeout = "10s"

View File

@@ -78,6 +78,9 @@ metasrv_addrs = ["127.0.0.1:3002"]
## Operation timeout.
timeout = "3s"
## Heartbeat timeout.
heartbeat_timeout = "500ms"
## DDL timeout.
ddl_timeout = "10s"

View File

@@ -226,6 +226,9 @@ metasrv_addrs = ["127.0.0.1:3002"]
## Operation timeout.
timeout = "3s"
## Heartbeat timeout.
heartbeat_timeout = "500ms"
## DDL timeout.
ddl_timeout = "10s"

20
flake.lock generated
View File

@@ -8,11 +8,11 @@
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1765252472,
"narHash": "sha256-byMt/uMi7DJ8tRniFopDFZMO3leSjGp6GS4zWOFT+uQ=",
"lastModified": 1760078406,
"narHash": "sha256-JeJK0ZA845PtkCHkfo4KjeI1mYrsr2s3cxBYKhF4BoE=",
"owner": "nix-community",
"repo": "fenix",
"rev": "8456b985f6652e3eef0632ee9992b439735c5544",
"rev": "351277c60d104944122ee389cdf581c5ce2c6732",
"type": "github"
},
"original": {
@@ -41,16 +41,16 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1764983851,
"narHash": "sha256-y7RPKl/jJ/KAP/VKLMghMgXTlvNIJMHKskl8/Uuar7o=",
"lastModified": 1759994382,
"narHash": "sha256-wSK+3UkalDZRVHGCRikZ//CyZUJWDJkBDTQX1+G77Ow=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "d9bc5c7dceb30d8d6fafa10aeb6aa8a48c218454",
"rev": "5da4a26309e796daa7ffca72df93dbe53b8164c7",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-25.11",
"ref": "nixos-25.05",
"repo": "nixpkgs",
"type": "github"
}
@@ -65,11 +65,11 @@
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1765120009,
"narHash": "sha256-nG76b87rkaDzibWbnB5bYDm6a52b78A+fpm+03pqYIw=",
"lastModified": 1760014945,
"narHash": "sha256-ySdl7F9+oeWNHVrg3QL/brazqmJvYFEdpGnF3pyoDH8=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "5e3e9c4e61bba8a5e72134b9ffefbef8f531d008",
"rev": "90d2e1ce4dfe7dc49250a8b88a0f08ffdb9cb23f",
"type": "github"
},
"original": {

View File

@@ -2,7 +2,7 @@
description = "Development environment flake";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.11";
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05";
fenix = {
url = "github:nix-community/fenix";
inputs.nixpkgs.follows = "nixpkgs";
@@ -48,7 +48,7 @@
gnuplot ## for cargo bench
];
buildInputs = buildInputs;
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
NIX_HARDENING_ENABLE = "";
};
});

View File

@@ -23,9 +23,11 @@ use common_time::{Date, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth
use datatypes::json::value::{JsonNumber, JsonValue, JsonValueRef, JsonVariant};
use datatypes::prelude::{ConcreteDataType, ValueRef};
use datatypes::types::{
IntervalType, JsonFormat, JsonType, StructField, StructType, TimeType, TimestampType,
IntervalType, JsonFormat, StructField, StructType, TimeType, TimestampType,
};
use datatypes::value::{
ListValue, ListValueRef, OrderedF32, OrderedF64, StructValue, StructValueRef, Value,
};
use datatypes::value::{ListValueRef, OrderedF32, OrderedF64, StructValueRef, Value};
use datatypes::vectors::VectorRef;
use greptime_proto::v1::column_data_type_extension::TypeExt;
use greptime_proto::v1::ddl_request::Expr;
@@ -80,10 +82,6 @@ impl ColumnDataTypeWrapper {
pub fn to_parts(&self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) {
(self.datatype, self.datatype_ext.clone())
}
pub fn into_parts(self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) {
(self.datatype, self.datatype_ext)
}
}
impl From<ColumnDataTypeWrapper> for ConcreteDataType {
@@ -129,7 +127,6 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
};
ConcreteDataType::json_native_datatype(inner_type.into())
}
None => ConcreteDataType::Json(JsonType::null()),
_ => {
// invalid state, type extension is missing or invalid
ConcreteDataType::null_datatype()
@@ -444,22 +441,18 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
JsonFormat::Jsonb => Some(ColumnDataTypeExtension {
type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())),
}),
JsonFormat::Native(native_type) => {
if native_type.is_null() {
None
} else {
let native_type = ConcreteDataType::from(native_type.as_ref());
let (datatype, datatype_extension) =
ColumnDataTypeWrapper::try_from(native_type)?.into_parts();
Some(ColumnDataTypeExtension {
type_ext: Some(TypeExt::JsonNativeType(Box::new(
JsonNativeTypeExtension {
datatype: datatype as i32,
datatype_extension: datatype_extension.map(Box::new),
},
))),
})
}
JsonFormat::Native(inner) => {
let inner_type = ColumnDataTypeWrapper::try_from(
ConcreteDataType::from(inner.as_ref()),
)?;
Some(ColumnDataTypeExtension {
type_ext: Some(TypeExt::JsonNativeType(Box::new(
JsonNativeTypeExtension {
datatype: inner_type.datatype.into(),
datatype_extension: inner_type.datatype_ext.map(Box::new),
},
))),
})
}
}
} else {
@@ -708,7 +701,6 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
Some(Expr::CreateView(_)) => "ddl.create_view",
Some(Expr::DropView(_)) => "ddl.drop_view",
Some(Expr::AlterDatabase(_)) => "ddl.alter_database",
Some(Expr::CommentOn(_)) => "ddl.comment_on",
None => "ddl.empty",
}
}
@@ -895,6 +887,111 @@ pub fn is_column_type_value_eq(
.unwrap_or(false)
}
/// Convert value into proto's value.
pub fn to_proto_value(value: Value) -> v1::Value {
match value {
Value::Null => v1::Value { value_data: None },
Value::Boolean(v) => v1::Value {
value_data: Some(ValueData::BoolValue(v)),
},
Value::UInt8(v) => v1::Value {
value_data: Some(ValueData::U8Value(v.into())),
},
Value::UInt16(v) => v1::Value {
value_data: Some(ValueData::U16Value(v.into())),
},
Value::UInt32(v) => v1::Value {
value_data: Some(ValueData::U32Value(v)),
},
Value::UInt64(v) => v1::Value {
value_data: Some(ValueData::U64Value(v)),
},
Value::Int8(v) => v1::Value {
value_data: Some(ValueData::I8Value(v.into())),
},
Value::Int16(v) => v1::Value {
value_data: Some(ValueData::I16Value(v.into())),
},
Value::Int32(v) => v1::Value {
value_data: Some(ValueData::I32Value(v)),
},
Value::Int64(v) => v1::Value {
value_data: Some(ValueData::I64Value(v)),
},
Value::Float32(v) => v1::Value {
value_data: Some(ValueData::F32Value(*v)),
},
Value::Float64(v) => v1::Value {
value_data: Some(ValueData::F64Value(*v)),
},
Value::String(v) => v1::Value {
value_data: Some(ValueData::StringValue(v.as_utf8().to_string())),
},
Value::Binary(v) => v1::Value {
value_data: Some(ValueData::BinaryValue(v.to_vec())),
},
Value::Date(v) => v1::Value {
value_data: Some(ValueData::DateValue(v.val())),
},
Value::Timestamp(v) => match v.unit() {
TimeUnit::Second => v1::Value {
value_data: Some(ValueData::TimestampSecondValue(v.value())),
},
TimeUnit::Millisecond => v1::Value {
value_data: Some(ValueData::TimestampMillisecondValue(v.value())),
},
TimeUnit::Microsecond => v1::Value {
value_data: Some(ValueData::TimestampMicrosecondValue(v.value())),
},
TimeUnit::Nanosecond => v1::Value {
value_data: Some(ValueData::TimestampNanosecondValue(v.value())),
},
},
Value::Time(v) => match v.unit() {
TimeUnit::Second => v1::Value {
value_data: Some(ValueData::TimeSecondValue(v.value())),
},
TimeUnit::Millisecond => v1::Value {
value_data: Some(ValueData::TimeMillisecondValue(v.value())),
},
TimeUnit::Microsecond => v1::Value {
value_data: Some(ValueData::TimeMicrosecondValue(v.value())),
},
TimeUnit::Nanosecond => v1::Value {
value_data: Some(ValueData::TimeNanosecondValue(v.value())),
},
},
Value::IntervalYearMonth(v) => v1::Value {
value_data: Some(ValueData::IntervalYearMonthValue(v.to_i32())),
},
Value::IntervalDayTime(v) => v1::Value {
value_data: Some(ValueData::IntervalDayTimeValue(v.to_i64())),
},
Value::IntervalMonthDayNano(v) => v1::Value {
value_data: Some(ValueData::IntervalMonthDayNanoValue(
convert_month_day_nano_to_pb(v),
)),
},
Value::Decimal128(v) => v1::Value {
value_data: Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
},
Value::List(list_value) => v1::Value {
value_data: Some(ValueData::ListValue(v1::ListValue {
items: convert_list_to_pb_values(list_value),
})),
},
Value::Struct(struct_value) => v1::Value {
value_data: Some(ValueData::StructValue(v1::StructValue {
items: convert_struct_to_pb_values(struct_value),
})),
},
Value::Json(v) => v1::Value {
value_data: Some(ValueData::JsonValue(encode_json_value(*v))),
},
Value::Duration(_) => v1::Value { value_data: None },
}
}
fn encode_json_value(value: JsonValue) -> v1::JsonValue {
fn helper(json: JsonVariant) -> v1::JsonValue {
let value = match json {
@@ -955,6 +1052,22 @@ fn decode_json_value(value: &v1::JsonValue) -> JsonValueRef<'_> {
}
}
fn convert_list_to_pb_values(list_value: ListValue) -> Vec<v1::Value> {
list_value
.take_items()
.into_iter()
.map(to_proto_value)
.collect()
}
fn convert_struct_to_pb_values(struct_value: StructValue) -> Vec<v1::Value> {
struct_value
.take_items()
.into_iter()
.map(to_proto_value)
.collect()
}
/// Returns the [ColumnDataTypeWrapper] of the value.
///
/// If value is null, returns `None`.
@@ -1001,14 +1114,14 @@ pub fn vectors_to_rows<'a>(
let mut rows = vec![Row { values: vec![] }; row_count];
for column in columns {
for (row_index, row) in rows.iter_mut().enumerate() {
row.values.push(to_grpc_value(column.get(row_index)))
row.values.push(value_to_grpc_value(column.get(row_index)))
}
}
rows
}
pub fn to_grpc_value(value: Value) -> GrpcValue {
pub fn value_to_grpc_value(value: Value) -> GrpcValue {
GrpcValue {
value_data: match value {
Value::Null => None,
@@ -1048,7 +1161,7 @@ pub fn to_grpc_value(value: Value) -> GrpcValue {
let items = list_value
.take_items()
.into_iter()
.map(to_grpc_value)
.map(value_to_grpc_value)
.collect();
Some(ValueData::ListValue(v1::ListValue { items }))
}
@@ -1056,7 +1169,7 @@ pub fn to_grpc_value(value: Value) -> GrpcValue {
let items = struct_value
.take_items()
.into_iter()
.map(to_grpc_value)
.map(value_to_grpc_value)
.collect();
Some(ValueData::StructValue(v1::StructValue { items }))
}
@@ -1156,7 +1269,6 @@ mod tests {
use common_time::interval::IntervalUnit;
use datatypes::scalars::ScalarVector;
use datatypes::types::{Int8Type, Int32Type, UInt8Type, UInt32Type};
use datatypes::value::{ListValue, StructValue};
use datatypes::vectors::{
BooleanVector, DateVector, Float32Vector, PrimitiveVector, StringVector,
};
@@ -1760,7 +1872,7 @@ mod tests {
Arc::new(ConcreteDataType::boolean_datatype()),
));
let pb_value = to_grpc_value(value);
let pb_value = to_proto_value(value);
match pb_value.value_data.unwrap() {
ValueData::ListValue(pb_list_value) => {
@@ -1789,7 +1901,7 @@ mod tests {
.unwrap(),
);
let pb_value = to_grpc_value(value);
let pb_value = to_proto_value(value);
match pb_value.value_data.unwrap() {
ValueData::StructValue(pb_struct_value) => {

View File

@@ -15,11 +15,11 @@ workspace = true
api.workspace = true
async-trait.workspace = true
common-base.workspace = true
common-config.workspace = true
common-error.workspace = true
common-macro.workspace = true
common-telemetry.workspace = true
digest = "0.10"
notify.workspace = true
sha1 = "0.10"
snafu.workspace = true
sql.workspace = true

View File

@@ -75,12 +75,11 @@ pub enum Error {
username: String,
},
#[snafu(display("Failed to initialize a file watcher"))]
#[snafu(display("Failed to initialize a watcher for file {}", path))]
FileWatch {
path: String,
#[snafu(source)]
source: common_config::error::Error,
#[snafu(implicit)]
location: Location,
error: notify::Error,
},
#[snafu(display("User is not authorized to perform this action"))]

View File

@@ -12,14 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::path::Path;
use std::sync::mpsc::channel;
use std::sync::{Arc, Mutex};
use async_trait::async_trait;
use common_config::file_watcher::{FileWatcherBuilder, FileWatcherConfig};
use common_telemetry::{info, warn};
use snafu::ResultExt;
use notify::{EventKind, RecursiveMode, Watcher};
use snafu::{ResultExt, ensure};
use crate::error::{FileWatchSnafu, Result};
use crate::error::{FileWatchSnafu, InvalidConfigSnafu, Result};
use crate::user_provider::{UserInfoMap, authenticate_with_credential, load_credential_from_file};
use crate::{Identity, Password, UserInfoRef, UserProvider};
@@ -39,36 +41,61 @@ impl WatchFileUserProvider {
pub fn new(filepath: &str) -> Result<Self> {
let credential = load_credential_from_file(filepath)?;
let users = Arc::new(Mutex::new(credential));
let this = WatchFileUserProvider {
users: users.clone(),
};
let users_clone = users.clone();
let filepath_owned = filepath.to_string();
let (tx, rx) = channel::<notify::Result<notify::Event>>();
let mut debouncer =
notify::recommended_watcher(tx).context(FileWatchSnafu { path: "<none>" })?;
let mut dir = Path::new(filepath).to_path_buf();
ensure!(
dir.pop(),
InvalidConfigSnafu {
value: filepath,
msg: "UserProvider path must be a file path",
}
);
debouncer
.watch(&dir, RecursiveMode::NonRecursive)
.context(FileWatchSnafu { path: filepath })?;
FileWatcherBuilder::new()
.watch_path(filepath)
.context(FileWatchSnafu)?
.config(FileWatcherConfig::new())
.spawn(move || match load_credential_from_file(&filepath_owned) {
Ok(credential) => {
let mut users = users_clone.lock().expect("users credential must be valid");
#[cfg(not(test))]
info!("User provider file {} reloaded", &filepath_owned);
#[cfg(test)]
info!(
"User provider file {} reloaded: {:?}",
&filepath_owned, credential
let filepath = filepath.to_string();
std::thread::spawn(move || {
let filename = Path::new(&filepath).file_name();
let _hold = debouncer;
while let Ok(res) = rx.recv() {
if let Ok(event) = res {
let is_this_file = event.paths.iter().any(|p| p.file_name() == filename);
let is_relevant_event = matches!(
event.kind,
EventKind::Modify(_) | EventKind::Create(_) | EventKind::Remove(_)
);
*users = credential;
if is_this_file && is_relevant_event {
info!(?event.kind, "User provider file {} changed", &filepath);
match load_credential_from_file(&filepath) {
Ok(credential) => {
let mut users =
users.lock().expect("users credential must be valid");
#[cfg(not(test))]
info!("User provider file {filepath} reloaded");
#[cfg(test)]
info!("User provider file {filepath} reloaded: {credential:?}");
*users = credential;
}
Err(err) => {
warn!(
?err,
"Fail to load credential from file {filepath}; keep the old one",
)
}
}
}
}
Err(err) => {
warn!(
?err,
"Fail to load credential from file {}; keep the old one", &filepath_owned
)
}
})
.context(FileWatchSnafu)?;
}
});
Ok(WatchFileUserProvider { users })
Ok(this)
}
}

View File

@@ -5,6 +5,7 @@ edition.workspace = true
license.workspace = true
[features]
enterprise = []
testing = []
[lints]

View File

@@ -12,14 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
pub use client::{CachedKvBackend, CachedKvBackendBuilder, MetaKvBackend};
mod builder;
mod client;
mod manager;
mod table_cache;
pub use builder::{
CatalogManagerConfigurator, CatalogManagerConfiguratorRef, KvBackendCatalogManagerBuilder,
};
pub use client::{CachedKvBackend, CachedKvBackendBuilder, MetaKvBackend};
pub use builder::KvBackendCatalogManagerBuilder;
pub use manager::KvBackendCatalogManager;
pub use table_cache::{TableCache, TableCacheRef, new_table_cache};

View File

@@ -12,11 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::sync::Arc;
use common_catalog::consts::DEFAULT_CATALOG_NAME;
use common_error::ext::BoxedError;
use common_meta::cache::LayeredCacheRegistryRef;
use common_meta::key::TableMetadataManager;
use common_meta::key::flow::FlowMetadataManager;
@@ -25,34 +23,24 @@ use common_procedure::ProcedureManagerRef;
use moka::sync::Cache;
use partition::manager::PartitionRuleManager;
use crate::information_schema::{
InformationExtensionRef, InformationSchemaProvider, InformationSchemaTableFactoryRef,
};
#[cfg(feature = "enterprise")]
use crate::information_schema::InformationSchemaTableFactoryRef;
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
use crate::kvbackend::KvBackendCatalogManager;
use crate::kvbackend::manager::{CATALOG_CACHE_MAX_CAPACITY, SystemCatalog};
use crate::process_manager::ProcessManagerRef;
use crate::system_schema::numbers_table_provider::NumbersTableProvider;
use crate::system_schema::pg_catalog::PGCatalogProvider;
/// The configurator that customizes or enhances the [`KvBackendCatalogManagerBuilder`].
#[async_trait::async_trait]
pub trait CatalogManagerConfigurator<C>: Send + Sync {
async fn configure(
&self,
builder: KvBackendCatalogManagerBuilder,
ctx: C,
) -> std::result::Result<KvBackendCatalogManagerBuilder, BoxedError>;
}
pub type CatalogManagerConfiguratorRef<C> = Arc<dyn CatalogManagerConfigurator<C>>;
pub struct KvBackendCatalogManagerBuilder {
information_extension: InformationExtensionRef,
backend: KvBackendRef,
cache_registry: LayeredCacheRegistryRef,
procedure_manager: Option<ProcedureManagerRef>,
process_manager: Option<ProcessManagerRef>,
extra_information_table_factories: HashMap<String, InformationSchemaTableFactoryRef>,
#[cfg(feature = "enterprise")]
extra_information_table_factories:
std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
}
impl KvBackendCatalogManagerBuilder {
@@ -67,7 +55,8 @@ impl KvBackendCatalogManagerBuilder {
cache_registry,
procedure_manager: None,
process_manager: None,
extra_information_table_factories: HashMap::new(),
#[cfg(feature = "enterprise")]
extra_information_table_factories: std::collections::HashMap::new(),
}
}
@@ -82,9 +71,10 @@ impl KvBackendCatalogManagerBuilder {
}
/// Sets the extra information tables.
#[cfg(feature = "enterprise")]
pub fn with_extra_information_table_factories(
mut self,
factories: HashMap<String, InformationSchemaTableFactoryRef>,
factories: std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
) -> Self {
self.extra_information_table_factories = factories;
self
@@ -97,6 +87,7 @@ impl KvBackendCatalogManagerBuilder {
cache_registry,
procedure_manager,
process_manager,
#[cfg(feature = "enterprise")]
extra_information_table_factories,
} = self;
Arc::new_cyclic(|me| KvBackendCatalogManager {
@@ -120,6 +111,7 @@ impl KvBackendCatalogManagerBuilder {
process_manager.clone(),
backend.clone(),
);
#[cfg(feature = "enterprise")]
let provider = provider
.with_extra_table_factories(extra_information_table_factories.clone());
Arc::new(provider)
@@ -131,6 +123,7 @@ impl KvBackendCatalogManagerBuilder {
numbers_table_provider: NumbersTableProvider,
backend,
process_manager,
#[cfg(feature = "enterprise")]
extra_information_table_factories,
},
cache_registry,

View File

@@ -53,9 +53,9 @@ use crate::error::{
CacheNotFoundSnafu, GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu,
ListSchemasSnafu, ListTablesSnafu, Result, TableMetadataManagerSnafu,
};
use crate::information_schema::{
InformationExtensionRef, InformationSchemaProvider, InformationSchemaTableFactoryRef,
};
#[cfg(feature = "enterprise")]
use crate::information_schema::InformationSchemaTableFactoryRef;
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
use crate::kvbackend::TableCacheRef;
use crate::process_manager::ProcessManagerRef;
use crate::system_schema::SystemSchemaProvider;
@@ -557,6 +557,7 @@ pub(super) struct SystemCatalog {
pub(super) numbers_table_provider: NumbersTableProvider,
pub(super) backend: KvBackendRef,
pub(super) process_manager: Option<ProcessManagerRef>,
#[cfg(feature = "enterprise")]
pub(super) extra_information_table_factories:
std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
}
@@ -627,6 +628,7 @@ impl SystemCatalog {
self.process_manager.clone(),
self.backend.clone(),
);
#[cfg(feature = "enterprise")]
let provider = provider
.with_extra_table_factories(self.extra_information_table_factories.clone());
Arc::new(provider)

View File

@@ -117,6 +117,7 @@ macro_rules! setup_memory_table {
};
}
#[cfg(feature = "enterprise")]
pub struct MakeInformationTableRequest {
pub catalog_name: String,
pub catalog_manager: Weak<dyn CatalogManager>,
@@ -127,10 +128,12 @@ pub struct MakeInformationTableRequest {
///
/// This trait allows for extensibility of the information schema by providing
/// a way to dynamically create custom information schema tables.
#[cfg(feature = "enterprise")]
pub trait InformationSchemaTableFactory {
fn make_information_table(&self, req: MakeInformationTableRequest) -> SystemTableRef;
}
#[cfg(feature = "enterprise")]
pub type InformationSchemaTableFactoryRef = Arc<dyn InformationSchemaTableFactory + Send + Sync>;
/// The `information_schema` tables info provider.
@@ -140,7 +143,9 @@ pub struct InformationSchemaProvider {
process_manager: Option<ProcessManagerRef>,
flow_metadata_manager: Arc<FlowMetadataManager>,
tables: HashMap<String, TableRef>,
#[allow(dead_code)]
kv_backend: KvBackendRef,
#[cfg(feature = "enterprise")]
extra_table_factories: HashMap<String, InformationSchemaTableFactoryRef>,
}
@@ -161,6 +166,7 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
}
fn system_table(&self, name: &str) -> Option<SystemTableRef> {
#[cfg(feature = "enterprise")]
if let Some(factory) = self.extra_table_factories.get(name) {
let req = MakeInformationTableRequest {
catalog_name: self.catalog_name.clone(),
@@ -275,6 +281,7 @@ impl InformationSchemaProvider {
process_manager,
tables: HashMap::new(),
kv_backend,
#[cfg(feature = "enterprise")]
extra_table_factories: HashMap::new(),
};
@@ -283,6 +290,7 @@ impl InformationSchemaProvider {
provider
}
#[cfg(feature = "enterprise")]
pub(crate) fn with_extra_table_factories(
mut self,
factories: HashMap<String, InformationSchemaTableFactoryRef>,
@@ -350,6 +358,7 @@ impl InformationSchemaProvider {
if let Some(process_list) = self.build_table(PROCESS_LIST) {
tables.insert(PROCESS_LIST.to_string(), process_list);
}
#[cfg(feature = "enterprise")]
for name in self.extra_table_factories.keys() {
tables.insert(name.clone(), self.build_table(name).expect(name));
}

View File

@@ -211,7 +211,6 @@ struct InformationSchemaPartitionsBuilder {
partition_names: StringVectorBuilder,
partition_ordinal_positions: Int64VectorBuilder,
partition_expressions: StringVectorBuilder,
partition_descriptions: StringVectorBuilder,
create_times: TimestampSecondVectorBuilder,
partition_ids: UInt64VectorBuilder,
}
@@ -232,7 +231,6 @@ impl InformationSchemaPartitionsBuilder {
partition_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
partition_ordinal_positions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
partition_expressions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
partition_descriptions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
create_times: TimestampSecondVectorBuilder::with_capacity(INIT_CAPACITY),
partition_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
}
@@ -321,21 +319,6 @@ impl InformationSchemaPartitionsBuilder {
return;
}
// Get partition column names (shared by all partitions)
// In MySQL, PARTITION_EXPRESSION is the partitioning function expression (e.g., column name)
let partition_columns: String = table_info
.meta
.partition_column_names()
.cloned()
.collect::<Vec<_>>()
.join(", ");
let partition_expr_str = if partition_columns.is_empty() {
None
} else {
Some(partition_columns)
};
for (index, partition) in partitions.iter().enumerate() {
let partition_name = format!("p{index}");
@@ -345,12 +328,8 @@ impl InformationSchemaPartitionsBuilder {
self.partition_names.push(Some(&partition_name));
self.partition_ordinal_positions
.push(Some((index + 1) as i64));
// PARTITION_EXPRESSION: partition column names (same for all partitions)
self.partition_expressions
.push(partition_expr_str.as_deref());
// PARTITION_DESCRIPTION: partition boundary expression (different for each partition)
let description = partition.partition_expr.as_ref().map(|e| e.to_string());
self.partition_descriptions.push(description.as_deref());
let expression = partition.partition_expr.as_ref().map(|e| e.to_string());
self.partition_expressions.push(expression.as_deref());
self.create_times.push(Some(TimestampSecond::from(
table_info.meta.created_on.timestamp(),
)));
@@ -390,7 +369,7 @@ impl InformationSchemaPartitionsBuilder {
null_string_vector.clone(),
Arc::new(self.partition_expressions.finish()),
null_string_vector.clone(),
Arc::new(self.partition_descriptions.finish()),
null_string_vector.clone(),
// TODO(dennis): rows and index statistics info
null_i64_vector.clone(),
null_i64_vector.clone(),

View File

@@ -89,10 +89,6 @@ wrap_with_clap_prefix! {
region: Option<String>,
#[doc = "Enable virtual host style for the object store."]
enable_virtual_host_style: bool = Default::default(),
#[doc = "Allow anonymous access (disable credential signing) for testing."]
allow_anonymous: bool = Default::default(),
#[doc = "Disable config load from environment and files for testing."]
disable_config_load: bool = Default::default(),
}
}

View File

@@ -16,7 +16,7 @@ default = [
"meta-srv/pg_kvbackend",
"meta-srv/mysql_kvbackend",
]
enterprise = ["common-meta/enterprise", "frontend/enterprise", "meta-srv/enterprise"]
enterprise = ["common-meta/enterprise", "frontend/enterprise", "meta-srv/enterprise", "catalog/enterprise"]
tokio-console = ["common-telemetry/tokio-console"]
[lints]

View File

@@ -163,7 +163,7 @@ impl ObjbenchCommand {
available_indexes: Default::default(),
indexes: Default::default(),
index_file_size: 0,
index_version: 0,
index_file_id: None,
num_rows,
num_row_groups,
sequence: None,
@@ -564,7 +564,7 @@ fn new_noop_file_purger() -> FilePurgerRef {
#[derive(Debug)]
struct Noop;
impl FilePurger for Noop {
fn remove_file(&self, _file_meta: FileMeta, _is_delete: bool, _index_outdated: bool) {}
fn remove_file(&self, _file_meta: FileMeta, _is_delete: bool) {}
}
Arc::new(Noop)
}

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::Debug;
use std::path::Path;
use std::sync::Arc;
use std::time::Duration;
@@ -40,14 +39,12 @@ use flow::{
get_flow_auth_options,
};
use meta_client::{MetaClientOptions, MetaClientType};
use plugins::flownode::context::GrpcConfigureContext;
use servers::configurator::GrpcBuilderConfiguratorRef;
use snafu::{OptionExt, ResultExt, ensure};
use tracing_appender::non_blocking::WorkerGuard;
use crate::error::{
BuildCacheRegistrySnafu, InitMetadataSnafu, LoadLayeredConfigSnafu, MetaClientInitSnafu,
MissingConfigSnafu, OtherSnafu, Result, ShutdownFlownodeSnafu, StartFlownodeSnafu,
MissingConfigSnafu, Result, ShutdownFlownodeSnafu, StartFlownodeSnafu,
};
use crate::options::{GlobalOptions, GreptimeOptions};
use crate::{App, create_resource_limit_metrics, log_versions, maybe_activate_heap_profile};
@@ -58,14 +55,33 @@ type FlownodeOptions = GreptimeOptions<flow::FlownodeOptions>;
pub struct Instance {
flownode: FlownodeInstance,
// The components of flownode, which make it easier to expand based
// on the components.
#[cfg(feature = "enterprise")]
components: Components,
// Keep the logging guard to prevent the worker from being dropped.
_guard: Vec<WorkerGuard>,
}
#[cfg(feature = "enterprise")]
pub struct Components {
pub catalog_manager: catalog::CatalogManagerRef,
pub fe_client: Arc<FrontendClient>,
pub kv_backend: common_meta::kv_backend::KvBackendRef,
}
impl Instance {
pub fn new(flownode: FlownodeInstance, guard: Vec<WorkerGuard>) -> Self {
pub fn new(
flownode: FlownodeInstance,
#[cfg(feature = "enterprise")] components: Components,
guard: Vec<WorkerGuard>,
) -> Self {
Self {
flownode,
#[cfg(feature = "enterprise")]
components,
_guard: guard,
}
}
@@ -78,6 +94,11 @@ impl Instance {
pub fn flownode_mut(&mut self) -> &mut FlownodeInstance {
&mut self.flownode
}
#[cfg(feature = "enterprise")]
pub fn components(&self) -> &Components {
&self.components
}
}
#[async_trait::async_trait]
@@ -375,7 +396,7 @@ impl StartCommand {
let frontend_client = Arc::new(frontend_client);
let flownode_builder = FlownodeBuilder::new(
opts.clone(),
plugins.clone(),
plugins,
table_metadata_manager,
catalog_manager.clone(),
flow_metadata_manager,
@@ -384,29 +405,8 @@ impl StartCommand {
.with_heartbeat_task(heartbeat_task);
let mut flownode = flownode_builder.build().await.context(StartFlownodeSnafu)?;
let builder =
FlownodeServiceBuilder::grpc_server_builder(&opts, flownode.flownode_server());
let builder = if let Some(configurator) =
plugins.get::<GrpcBuilderConfiguratorRef<GrpcConfigureContext>>()
{
let context = GrpcConfigureContext {
kv_backend: cached_meta_backend.clone(),
fe_client: frontend_client.clone(),
flownode_id: member_id,
catalog_manager: catalog_manager.clone(),
};
configurator
.configure(builder, context)
.await
.context(OtherSnafu)?
} else {
builder
};
let grpc_server = builder.build();
let services = FlownodeServiceBuilder::new(&opts)
.with_grpc_server(grpc_server)
.with_default_grpc_server(flownode.flownode_server())
.enable_http_service()
.build()
.context(StartFlownodeSnafu)?;
@@ -430,6 +430,16 @@ impl StartCommand {
.set_frontend_invoker(invoker)
.await;
Ok(Instance::new(flownode, guard))
#[cfg(feature = "enterprise")]
let components = Components {
catalog_manager: catalog_manager.clone(),
fe_client: frontend_client,
kv_backend: cached_meta_backend,
};
#[cfg(not(feature = "enterprise"))]
return Ok(Instance::new(flownode, guard));
#[cfg(feature = "enterprise")]
Ok(Instance::new(flownode, components, guard))
}
}

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::Debug;
use std::path::Path;
use std::sync::Arc;
use std::time::Duration;
@@ -20,10 +19,7 @@ use std::time::Duration;
use async_trait::async_trait;
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
use catalog::information_extension::DistributedInformationExtension;
use catalog::kvbackend::{
CachedKvBackendBuilder, CatalogManagerConfiguratorRef, KvBackendCatalogManagerBuilder,
MetaKvBackend,
};
use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManagerBuilder, MetaKvBackend};
use catalog::process_manager::ProcessManager;
use clap::Parser;
use client::client_manager::NodeClients;
@@ -35,7 +31,6 @@ use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
use common_meta::heartbeat::handler::HandlerGroupExecutor;
use common_meta::heartbeat::handler::invalidate_table_cache::InvalidateCacheHandler;
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
use common_meta::heartbeat::handler::suspend::SuspendHandler;
use common_query::prelude::set_default_prefix;
use common_stat::ResourceStatImpl;
use common_telemetry::info;
@@ -46,17 +41,14 @@ use frontend::frontend::Frontend;
use frontend::heartbeat::HeartbeatTask;
use frontend::instance::builder::FrontendBuilder;
use frontend::server::Services;
use meta_client::{MetaClientOptions, MetaClientRef, MetaClientType};
use plugins::frontend::context::{
CatalogManagerConfigureContext, DistributedCatalogManagerConfigureContext,
};
use meta_client::{MetaClientOptions, MetaClientType};
use servers::addrs;
use servers::grpc::GrpcOptions;
use servers::tls::{TlsMode, TlsOption};
use snafu::{OptionExt, ResultExt};
use tracing_appender::non_blocking::WorkerGuard;
use crate::error::{self, OtherSnafu, Result};
use crate::error::{self, Result};
use crate::options::{GlobalOptions, GreptimeOptions};
use crate::{App, create_resource_limit_metrics, log_versions, maybe_activate_heap_profile};
@@ -424,30 +416,38 @@ impl StartCommand {
layered_cache_registry.clone(),
)
.with_process_manager(process_manager.clone());
let builder = if let Some(configurator) =
plugins.get::<CatalogManagerConfiguratorRef<CatalogManagerConfigureContext>>()
{
let ctx = DistributedCatalogManagerConfigureContext {
meta_client: meta_client.clone(),
};
let ctx = CatalogManagerConfigureContext::Distributed(ctx);
configurator
.configure(builder, ctx)
.await
.context(OtherSnafu)?
#[cfg(feature = "enterprise")]
let builder = if let Some(factories) = plugins.get() {
builder.with_extra_information_table_factories(factories)
} else {
builder
};
let catalog_manager = builder.build();
let executor = HandlerGroupExecutor::new(vec![
Arc::new(ParseMailboxMessageHandler),
Arc::new(InvalidateCacheHandler::new(layered_cache_registry.clone())),
]);
let mut resource_stat = ResourceStatImpl::default();
resource_stat.start_collect_cpu_usage();
let heartbeat_task = HeartbeatTask::new(
&opts,
meta_client.clone(),
opts.heartbeat.clone(),
Arc::new(executor),
Arc::new(resource_stat),
);
let heartbeat_task = Some(heartbeat_task);
let instance = FrontendBuilder::new(
opts.clone(),
cached_meta_backend.clone(),
layered_cache_registry.clone(),
catalog_manager,
client,
meta_client.clone(),
meta_client,
process_manager,
)
.with_plugin(plugins.clone())
@@ -455,9 +455,6 @@ impl StartCommand {
.try_build()
.await
.context(error::StartFrontendSnafu)?;
let heartbeat_task = Some(create_heartbeat_task(&opts, meta_client, &instance));
let instance = Arc::new(instance);
let servers = Services::new(opts, instance.clone(), plugins)
@@ -474,28 +471,6 @@ impl StartCommand {
}
}
pub fn create_heartbeat_task(
options: &frontend::frontend::FrontendOptions,
meta_client: MetaClientRef,
instance: &frontend::instance::Instance,
) -> HeartbeatTask {
let executor = Arc::new(HandlerGroupExecutor::new(vec![
Arc::new(ParseMailboxMessageHandler),
Arc::new(SuspendHandler::new(instance.suspend_state())),
Arc::new(InvalidateCacheHandler::new(
instance.cache_invalidator().clone(),
)),
]));
let stat = {
let mut stat = ResourceStatImpl::default();
stat.start_collect_cpu_usage();
Arc::new(stat)
};
HeartbeatTask::new(options, meta_client, executor, stat)
}
#[cfg(test)]
mod tests {
use std::io::Write;

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::{self, Debug};
use std::fmt;
use std::path::Path;
use std::time::Duration;
@@ -23,7 +23,7 @@ use common_config::Configurable;
use common_telemetry::info;
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
use common_version::{short_version, verbose_version};
use meta_srv::bootstrap::{MetasrvInstance, metasrv_builder};
use meta_srv::bootstrap::MetasrvInstance;
use meta_srv::metasrv::BackendImpl;
use snafu::ResultExt;
use tracing_appender::non_blocking::WorkerGuard;
@@ -177,7 +177,7 @@ pub struct StartCommand {
backend: Option<BackendImpl>,
}
impl Debug for StartCommand {
impl fmt::Debug for StartCommand {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("StartCommand")
.field("rpc_bind_addr", &self.rpc_bind_addr)
@@ -341,7 +341,7 @@ impl StartCommand {
.await
.context(StartMetaServerSnafu)?;
let builder = metasrv_builder(&opts, plugins, None)
let builder = meta_srv::bootstrap::metasrv_builder(&opts, plugins, None)
.await
.context(error::BuildMetaServerSnafu)?;
let metasrv = builder.build().await.context(error::BuildMetaServerSnafu)?;

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::Debug;
use std::net::SocketAddr;
use std::path::Path;
use std::sync::Arc;
@@ -21,7 +20,7 @@ use std::{fs, path};
use async_trait::async_trait;
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
use catalog::information_schema::InformationExtensionRef;
use catalog::kvbackend::{CatalogManagerConfiguratorRef, KvBackendCatalogManagerBuilder};
use catalog::kvbackend::KvBackendCatalogManagerBuilder;
use catalog::process_manager::ProcessManager;
use clap::Parser;
use common_base::Plugins;
@@ -32,7 +31,7 @@ use common_meta::cache::LayeredCacheRegistryBuilder;
use common_meta::ddl::flow_meta::FlowMetadataAllocator;
use common_meta::ddl::table_meta::TableMetadataAllocator;
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl};
use common_meta::ddl_manager::{DdlManager, DdlManagerConfiguratorRef};
use common_meta::ddl_manager::DdlManager;
use common_meta::key::flow::FlowMetadataManager;
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::KvBackendRef;
@@ -58,17 +57,13 @@ use frontend::instance::StandaloneDatanodeManager;
use frontend::instance::builder::FrontendBuilder;
use frontend::server::Services;
use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ};
use plugins::frontend::context::{
CatalogManagerConfigureContext, StandaloneCatalogManagerConfigureContext,
};
use plugins::standalone::context::DdlManagerConfigureContext;
use servers::tls::{TlsMode, TlsOption};
use snafu::ResultExt;
use standalone::StandaloneInformationExtension;
use standalone::options::StandaloneOptions;
use tracing_appender::non_blocking::WorkerGuard;
use crate::error::{OtherSnafu, Result, StartFlownodeSnafu};
use crate::error::{Result, StartFlownodeSnafu};
use crate::options::{GlobalOptions, GreptimeOptions};
use crate::{App, create_resource_limit_metrics, error, log_versions, maybe_activate_heap_profile};
@@ -121,15 +116,34 @@ pub struct Instance {
flownode: FlownodeInstance,
procedure_manager: ProcedureManagerRef,
wal_options_allocator: WalOptionsAllocatorRef,
// The components of standalone, which make it easier to expand based
// on the components.
#[cfg(feature = "enterprise")]
components: Components,
// Keep the logging guard to prevent the worker from being dropped.
_guard: Vec<WorkerGuard>,
}
#[cfg(feature = "enterprise")]
pub struct Components {
pub plugins: Plugins,
pub kv_backend: KvBackendRef,
pub frontend_client: Arc<FrontendClient>,
pub catalog_manager: catalog::CatalogManagerRef,
}
impl Instance {
/// Find the socket addr of a server by its `name`.
pub fn server_addr(&self, name: &str) -> Option<SocketAddr> {
self.frontend.server_handlers().addr(name)
}
#[cfg(feature = "enterprise")]
pub fn components(&self) -> &Components {
&self.components
}
}
#[async_trait]
@@ -401,13 +415,6 @@ impl StartCommand {
plugins.insert::<InformationExtensionRef>(information_extension.clone());
let process_manager = Arc::new(ProcessManager::new(opts.grpc.server_addr.clone(), None));
// for standalone not use grpc, but get a handler to frontend grpc client without
// actually make a connection
let (frontend_client, frontend_instance_handler) =
FrontendClient::from_empty_grpc_handler(opts.query.clone());
let frontend_client = Arc::new(frontend_client);
let builder = KvBackendCatalogManagerBuilder::new(
information_extension.clone(),
kv_backend.clone(),
@@ -415,17 +422,9 @@ impl StartCommand {
)
.with_procedure_manager(procedure_manager.clone())
.with_process_manager(process_manager.clone());
let builder = if let Some(configurator) =
plugins.get::<CatalogManagerConfiguratorRef<CatalogManagerConfigureContext>>()
{
let ctx = StandaloneCatalogManagerConfigureContext {
fe_client: frontend_client.clone(),
};
let ctx = CatalogManagerConfigureContext::Standalone(ctx);
configurator
.configure(builder, ctx)
.await
.context(OtherSnafu)?
#[cfg(feature = "enterprise")]
let builder = if let Some(factories) = plugins.get() {
builder.with_extra_information_table_factories(factories)
} else {
builder
};
@@ -440,6 +439,11 @@ impl StartCommand {
..Default::default()
};
// for standalone not use grpc, but get a handler to frontend grpc client without
// actually make a connection
let (frontend_client, frontend_instance_handler) =
FrontendClient::from_empty_grpc_handler(opts.query.clone());
let frontend_client = Arc::new(frontend_client);
let flow_builder = FlownodeBuilder::new(
flownode_options,
plugins.clone(),
@@ -510,21 +514,11 @@ impl StartCommand {
let ddl_manager = DdlManager::try_new(ddl_context, procedure_manager.clone(), true)
.context(error::InitDdlManagerSnafu)?;
let ddl_manager = if let Some(configurator) =
plugins.get::<DdlManagerConfiguratorRef<DdlManagerConfigureContext>>()
{
let ctx = DdlManagerConfigureContext {
kv_backend: kv_backend.clone(),
fe_client: frontend_client.clone(),
catalog_manager: catalog_manager.clone(),
};
configurator
.configure(ddl_manager, ctx)
.await
.context(OtherSnafu)?
} else {
ddl_manager
#[cfg(feature = "enterprise")]
let ddl_manager = {
let trigger_ddl_manager: Option<common_meta::ddl_manager::TriggerDdlManagerRef> =
plugins.get();
ddl_manager.with_trigger_ddl_manager(trigger_ddl_manager)
};
let procedure_executor = Arc::new(LocalProcedureExecutor::new(
@@ -580,12 +574,22 @@ impl StartCommand {
heartbeat_task: None,
};
#[cfg(feature = "enterprise")]
let components = Components {
plugins,
kv_backend,
frontend_client,
catalog_manager,
};
Ok(Instance {
datanode,
frontend,
flownode,
procedure_manager,
wal_options_allocator,
#[cfg(feature = "enterprise")]
components,
_guard: guard,
})
}

View File

@@ -52,6 +52,7 @@ fn test_load_datanode_example_config() {
meta_client: Some(MetaClientOptions {
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
timeout: Duration::from_secs(3),
heartbeat_timeout: Duration::from_millis(500),
ddl_timeout: Duration::from_secs(10),
connect_timeout: Duration::from_secs(1),
tcp_nodelay: true,
@@ -117,6 +118,7 @@ fn test_load_frontend_example_config() {
meta_client: Some(MetaClientOptions {
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
timeout: Duration::from_secs(3),
heartbeat_timeout: Duration::from_millis(500),
ddl_timeout: Duration::from_secs(10),
connect_timeout: Duration::from_secs(1),
tcp_nodelay: true,
@@ -239,6 +241,7 @@ fn test_load_flownode_example_config() {
meta_client: Some(MetaClientOptions {
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
timeout: Duration::from_secs(3),
heartbeat_timeout: Duration::from_millis(500),
ddl_timeout: Duration::from_secs(10),
connect_timeout: Duration::from_secs(1),
tcp_nodelay: true,

View File

@@ -32,12 +32,7 @@ impl Plugins {
pub fn insert<T: 'static + Send + Sync>(&self, value: T) {
let last = self.write().insert(value);
if last.is_some() {
panic!(
"Plugin of type {} already exists",
std::any::type_name::<T>()
);
}
assert!(last.is_none(), "each type of plugins must be one and only");
}
pub fn get<T: 'static + Send + Sync + Clone>(&self) -> Option<T> {
@@ -145,7 +140,7 @@ mod tests {
}
#[test]
#[should_panic(expected = "Plugin of type i32 already exists")]
#[should_panic(expected = "each type of plugins must be one and only")]
fn test_plugin_uniqueness() {
let plugins = Plugins::new();
plugins.insert(1i32);

View File

@@ -11,10 +11,8 @@ workspace = true
common-base.workspace = true
common-error.workspace = true
common-macro.workspace = true
common-telemetry.workspace = true
config.workspace = true
humantime-serde.workspace = true
notify.workspace = true
object-store.workspace = true
serde.workspace = true
serde_json.workspace = true

View File

@@ -49,41 +49,14 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to watch file: {}", path))]
FileWatch {
path: String,
#[snafu(source)]
error: notify::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to canonicalize path: {}", path))]
CanonicalizePath {
path: String,
#[snafu(source)]
error: std::io::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Invalid path '{}': expected a file, not a directory", path))]
InvalidPath {
path: String,
#[snafu(implicit)]
location: Location,
},
}
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::TomlFormat { .. }
| Error::LoadLayeredConfig { .. }
| Error::FileWatch { .. }
| Error::InvalidPath { .. }
| Error::CanonicalizePath { .. } => StatusCode::InvalidArguments,
Error::TomlFormat { .. } | Error::LoadLayeredConfig { .. } => {
StatusCode::InvalidArguments
}
Error::SerdeJson { .. } => StatusCode::Unexpected,
}
}

View File

@@ -1,355 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Common file watching utilities for configuration hot-reloading.
//!
//! This module provides a generic file watcher that can be used to watch
//! files for changes and trigger callbacks when changes occur.
//!
//! The watcher monitors the parent directory of each file rather than the
//! file itself. This ensures that file deletions and recreations are properly
//! tracked, which is common with editors that use atomic saves or when
//! configuration files are replaced.
use std::collections::HashSet;
use std::path::{Path, PathBuf};
use std::sync::mpsc::channel;
use common_telemetry::{error, info, warn};
use notify::{EventKind, RecursiveMode, Watcher};
use snafu::ResultExt;
use crate::error::{CanonicalizePathSnafu, FileWatchSnafu, InvalidPathSnafu, Result};
/// Configuration for the file watcher behavior.
#[derive(Debug, Clone, Default)]
pub struct FileWatcherConfig {
/// Whether to include Remove events in addition to Modify and Create.
pub include_remove_events: bool,
}
impl FileWatcherConfig {
pub fn new() -> Self {
Self::default()
}
pub fn with_modify_and_create(mut self) -> Self {
self.include_remove_events = false;
self
}
pub fn with_remove_events(mut self) -> Self {
self.include_remove_events = true;
self
}
}
/// A builder for creating file watchers with flexible configuration.
///
/// The watcher monitors the parent directory of each file to handle file
/// deletion and recreation properly. Events are filtered to only trigger
/// callbacks for the specific files being watched.
pub struct FileWatcherBuilder {
config: FileWatcherConfig,
/// Canonicalized paths of files to watch.
file_paths: Vec<PathBuf>,
}
impl FileWatcherBuilder {
/// Create a new builder with default configuration.
pub fn new() -> Self {
Self {
config: FileWatcherConfig::default(),
file_paths: Vec::new(),
}
}
/// Set the watcher configuration.
pub fn config(mut self, config: FileWatcherConfig) -> Self {
self.config = config;
self
}
/// Add a file path to watch.
///
/// Returns an error if the path is a directory.
/// The path is canonicalized for reliable comparison with events.
pub fn watch_path<P: AsRef<Path>>(mut self, path: P) -> Result<Self> {
let path = path.as_ref();
snafu::ensure!(
path.is_file(),
InvalidPathSnafu {
path: path.display().to_string(),
}
);
// Canonicalize the path for reliable comparison with event paths
let canonical = path.canonicalize().context(CanonicalizePathSnafu {
path: path.display().to_string(),
})?;
self.file_paths.push(canonical);
Ok(self)
}
/// Add multiple file paths to watch.
///
/// Returns an error if any path is a directory.
pub fn watch_paths<P: AsRef<Path>, I: IntoIterator<Item = P>>(
mut self,
paths: I,
) -> Result<Self> {
for path in paths {
self = self.watch_path(path)?;
}
Ok(self)
}
/// Build and spawn the file watcher with the given callback.
///
/// The callback is invoked when relevant file events are detected for
/// the watched files. The watcher monitors the parent directories to
/// handle file deletion and recreation properly.
///
/// The spawned watcher thread runs for the lifetime of the process.
pub fn spawn<F>(self, callback: F) -> Result<()>
where
F: Fn() + Send + 'static,
{
let (tx, rx) = channel::<notify::Result<notify::Event>>();
let mut watcher =
notify::recommended_watcher(tx).context(FileWatchSnafu { path: "<none>" })?;
// Collect unique parent directories to watch
let mut watched_dirs: HashSet<PathBuf> = HashSet::new();
for file_path in &self.file_paths {
if let Some(parent) = file_path.parent()
&& watched_dirs.insert(parent.to_path_buf())
{
watcher
.watch(parent, RecursiveMode::NonRecursive)
.context(FileWatchSnafu {
path: parent.display().to_string(),
})?;
}
}
let config = self.config;
let watched_files: HashSet<PathBuf> = self.file_paths.iter().cloned().collect();
info!(
"Spawning file watcher for paths: {:?} (watching parent directories)",
self.file_paths
.iter()
.map(|p| p.display().to_string())
.collect::<Vec<_>>()
);
std::thread::spawn(move || {
// Keep watcher alive in the thread
let _watcher = watcher;
while let Ok(res) = rx.recv() {
match res {
Ok(event) => {
if !is_relevant_event(&event.kind, &config) {
continue;
}
// Check if any of the event paths match our watched files
let is_watched_file = event.paths.iter().any(|event_path| {
// Try to canonicalize the event path for comparison
// If the file was deleted, canonicalize will fail, so we also
// compare the raw path
if let Ok(canonical) = event_path.canonicalize()
&& watched_files.contains(&canonical)
{
return true;
}
// For deleted files, compare using the raw path
watched_files.contains(event_path)
});
if !is_watched_file {
continue;
}
info!(?event.kind, ?event.paths, "Detected file change");
callback();
}
Err(err) => {
warn!("File watcher error: {}", err);
}
}
}
error!("File watcher channel closed unexpectedly");
});
Ok(())
}
}
impl Default for FileWatcherBuilder {
fn default() -> Self {
Self::new()
}
}
/// Check if an event kind is relevant based on the configuration.
fn is_relevant_event(kind: &EventKind, config: &FileWatcherConfig) -> bool {
match kind {
EventKind::Modify(_) | EventKind::Create(_) => true,
EventKind::Remove(_) => config.include_remove_events,
_ => false,
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::Duration;
use common_test_util::temp_dir::create_temp_dir;
use super::*;
#[test]
fn test_file_watcher_detects_changes() {
common_telemetry::init_default_ut_logging();
let dir = create_temp_dir("test_file_watcher");
let file_path = dir.path().join("test_file.txt");
// Create initial file
std::fs::write(&file_path, "initial content").unwrap();
let counter = Arc::new(AtomicUsize::new(0));
let counter_clone = counter.clone();
FileWatcherBuilder::new()
.watch_path(&file_path)
.unwrap()
.config(FileWatcherConfig::new())
.spawn(move || {
counter_clone.fetch_add(1, Ordering::SeqCst);
})
.unwrap();
// Give watcher time to start
std::thread::sleep(Duration::from_millis(100));
// Modify the file
std::fs::write(&file_path, "modified content").unwrap();
// Wait for the event to be processed
std::thread::sleep(Duration::from_millis(500));
assert!(
counter.load(Ordering::SeqCst) >= 1,
"Watcher should have detected at least one change"
);
}
#[test]
fn test_file_watcher_detects_delete_and_recreate() {
common_telemetry::init_default_ut_logging();
let dir = create_temp_dir("test_file_watcher_recreate");
let file_path = dir.path().join("test_file.txt");
// Create initial file
std::fs::write(&file_path, "initial content").unwrap();
let counter = Arc::new(AtomicUsize::new(0));
let counter_clone = counter.clone();
FileWatcherBuilder::new()
.watch_path(&file_path)
.unwrap()
.config(FileWatcherConfig::new())
.spawn(move || {
counter_clone.fetch_add(1, Ordering::SeqCst);
})
.unwrap();
// Give watcher time to start
std::thread::sleep(Duration::from_millis(100));
// Delete the file
std::fs::remove_file(&file_path).unwrap();
std::thread::sleep(Duration::from_millis(100));
// Recreate the file - this should still be detected because we watch the directory
std::fs::write(&file_path, "recreated content").unwrap();
// Wait for the event to be processed
std::thread::sleep(Duration::from_millis(500));
assert!(
counter.load(Ordering::SeqCst) >= 1,
"Watcher should have detected file recreation"
);
}
#[test]
fn test_file_watcher_ignores_other_files() {
common_telemetry::init_default_ut_logging();
let dir = create_temp_dir("test_file_watcher_other");
let watched_file = dir.path().join("watched.txt");
let other_file = dir.path().join("other.txt");
// Create both files
std::fs::write(&watched_file, "watched content").unwrap();
std::fs::write(&other_file, "other content").unwrap();
let counter = Arc::new(AtomicUsize::new(0));
let counter_clone = counter.clone();
FileWatcherBuilder::new()
.watch_path(&watched_file)
.unwrap()
.config(FileWatcherConfig::new())
.spawn(move || {
counter_clone.fetch_add(1, Ordering::SeqCst);
})
.unwrap();
// Give watcher time to start
std::thread::sleep(Duration::from_millis(100));
// Modify the other file - should NOT trigger callback
std::fs::write(&other_file, "modified other content").unwrap();
// Wait for potential event
std::thread::sleep(Duration::from_millis(500));
assert_eq!(
counter.load(Ordering::SeqCst),
0,
"Watcher should not have detected changes to other files"
);
// Now modify the watched file - SHOULD trigger callback
std::fs::write(&watched_file, "modified watched content").unwrap();
// Wait for the event to be processed
std::thread::sleep(Duration::from_millis(500));
assert!(
counter.load(Ordering::SeqCst) >= 1,
"Watcher should have detected change to watched file"
);
}
}

View File

@@ -14,7 +14,6 @@
pub mod config;
pub mod error;
pub mod file_watcher;
use std::time::Duration;

View File

@@ -21,8 +21,6 @@ pub mod status_code;
use http::{HeaderMap, HeaderValue};
pub use snafu;
use crate::status_code::StatusCode;
// HACK - these headers are here for shared in gRPC services. For common HTTP headers,
// please define in `src/servers/src/http/header.rs`.
pub const GREPTIME_DB_HEADER_ERROR_CODE: &str = "x-greptime-err-code";
@@ -48,29 +46,6 @@ pub fn from_err_code_msg_to_header(code: u32, msg: &str) -> HeaderMap {
header
}
/// Extract [StatusCode] and error message from [HeaderMap], if any.
///
/// Note that if the [StatusCode] is illegal, for example, a random number that is not pre-defined
/// as a [StatusCode], the result is still `None`.
pub fn from_header_to_err_code_msg(headers: &HeaderMap) -> Option<(StatusCode, &str)> {
let code = headers
.get(GREPTIME_DB_HEADER_ERROR_CODE)
.and_then(|value| {
value
.to_str()
.ok()
.and_then(|x| x.parse::<u32>().ok())
.and_then(StatusCode::from_u32)
});
let msg = headers
.get(GREPTIME_DB_HEADER_ERROR_MSG)
.and_then(|x| x.to_str().ok());
match (code, msg) {
(Some(code), Some(msg)) => Some((code, msg)),
_ => None,
}
}
/// Returns the external root cause of the source error (exclude the current error).
pub fn root_source(err: &dyn std::error::Error) -> Option<&dyn std::error::Error> {
// There are some divergence about the behavior of the `sources()` API

View File

@@ -42,8 +42,6 @@ pub enum StatusCode {
External = 1007,
/// The request is deadline exceeded (typically server-side).
DeadlineExceeded = 1008,
/// Service got suspended for various reason. For example, resources exceed limit.
Suspended = 1009,
// ====== End of common status code ================
// ====== Begin of SQL related status code =========
@@ -177,8 +175,7 @@ impl StatusCode {
| StatusCode::AccessDenied
| StatusCode::PermissionDenied
| StatusCode::RequestOutdated
| StatusCode::External
| StatusCode::Suspended => false,
| StatusCode::External => false,
}
}
@@ -226,8 +223,7 @@ impl StatusCode {
| StatusCode::InvalidAuthHeader
| StatusCode::AccessDenied
| StatusCode::PermissionDenied
| StatusCode::RequestOutdated
| StatusCode::Suspended => false,
| StatusCode::RequestOutdated => false,
}
}
@@ -351,8 +347,7 @@ pub fn status_to_tonic_code(status_code: StatusCode) -> Code {
| StatusCode::RegionNotReady => Code::Unavailable,
StatusCode::RuntimeResourcesExhausted
| StatusCode::RateLimited
| StatusCode::RegionBusy
| StatusCode::Suspended => Code::ResourceExhausted,
| StatusCode::RegionBusy => Code::ResourceExhausted,
StatusCode::UnsupportedPasswordType
| StatusCode::UserPasswordMismatch
| StatusCode::AuthHeaderNotFound

View File

@@ -39,7 +39,7 @@ datafusion-functions-aggregate-common.workspace = true
datafusion-pg-catalog.workspace = true
datafusion-physical-expr.workspace = true
datatypes.workspace = true
derive_more.workspace = true
derive_more = { version = "1", default-features = false, features = ["display"] }
geo = { version = "0.29", optional = true }
geo-types = { version = "0.7", optional = true }
geohash = { version = "0.13", optional = true }

View File

@@ -14,7 +14,6 @@
mod binary;
mod ctx;
mod if_func;
mod is_null;
mod unary;
@@ -23,7 +22,6 @@ pub use ctx::EvalContext;
pub use unary::scalar_unary_op;
use crate::function_registry::FunctionRegistry;
use crate::scalars::expression::if_func::IfFunction;
use crate::scalars::expression::is_null::IsNullFunction;
pub(crate) struct ExpressionFunction;
@@ -31,6 +29,5 @@ pub(crate) struct ExpressionFunction;
impl ExpressionFunction {
pub fn register(registry: &FunctionRegistry) {
registry.register_scalar(IsNullFunction::default());
registry.register_scalar(IfFunction::default());
}
}

View File

@@ -1,404 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
use std::fmt::Display;
use arrow::array::ArrowNativeTypeOp;
use arrow::datatypes::ArrowPrimitiveType;
use datafusion::arrow::array::{Array, ArrayRef, AsArray, BooleanArray, PrimitiveArray};
use datafusion::arrow::compute::kernels::zip::zip;
use datafusion::arrow::datatypes::DataType;
use datafusion_common::DataFusionError;
use datafusion_expr::type_coercion::binary::comparison_coercion;
use datafusion_expr::{ColumnarValue, ScalarFunctionArgs, Signature, Volatility};
use crate::function::Function;
const NAME: &str = "if";
/// MySQL-compatible IF function: IF(condition, true_value, false_value)
///
/// Returns true_value if condition is TRUE (not NULL and not 0),
/// otherwise returns false_value.
///
/// MySQL truthy rules:
/// - NULL -> false
/// - 0 (numeric zero) -> false
/// - Any non-zero numeric -> true
/// - Boolean true/false -> use directly
#[derive(Clone, Debug)]
pub struct IfFunction {
signature: Signature,
}
impl Default for IfFunction {
fn default() -> Self {
Self {
signature: Signature::any(3, Volatility::Immutable),
}
}
}
impl Display for IfFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", NAME.to_ascii_uppercase())
}
}
impl Function for IfFunction {
fn name(&self) -> &str {
NAME
}
fn return_type(&self, input_types: &[DataType]) -> datafusion_common::Result<DataType> {
// Return the common type of true_value and false_value (args[1] and args[2])
if input_types.len() < 3 {
return Err(DataFusionError::Plan(format!(
"{} requires 3 arguments, got {}",
NAME,
input_types.len()
)));
}
let true_type = &input_types[1];
let false_type = &input_types[2];
// Use comparison_coercion to find common type
comparison_coercion(true_type, false_type).ok_or_else(|| {
DataFusionError::Plan(format!(
"Cannot find common type for IF function between {:?} and {:?}",
true_type, false_type
))
})
}
fn signature(&self) -> &Signature {
&self.signature
}
fn invoke_with_args(
&self,
args: ScalarFunctionArgs,
) -> datafusion_common::Result<ColumnarValue> {
if args.args.len() != 3 {
return Err(DataFusionError::Plan(format!(
"{} requires exactly 3 arguments, got {}",
NAME,
args.args.len()
)));
}
let condition = &args.args[0];
let true_value = &args.args[1];
let false_value = &args.args[2];
// Convert condition to boolean array using MySQL truthy rules
let bool_array = to_boolean_array(condition, args.number_rows)?;
// Convert true and false values to arrays
let true_array = true_value.to_array(args.number_rows)?;
let false_array = false_value.to_array(args.number_rows)?;
// Use zip to select values based on condition
// zip expects &dyn Datum, and ArrayRef (Arc<dyn Array>) implements Datum
let result = zip(&bool_array, &true_array, &false_array)?;
Ok(ColumnarValue::Array(result))
}
}
/// Convert a ColumnarValue to a BooleanArray using MySQL truthy rules:
/// - NULL -> false
/// - 0 (any numeric zero) -> false
/// - Non-zero numeric -> true
/// - Boolean -> use directly
fn to_boolean_array(
value: &ColumnarValue,
num_rows: usize,
) -> datafusion_common::Result<BooleanArray> {
let array = value.to_array(num_rows)?;
array_to_bool(array)
}
/// Convert an integer PrimitiveArray to BooleanArray using MySQL truthy rules:
/// NULL -> false, 0 -> false, non-zero -> true
fn int_array_to_bool<T>(array: &PrimitiveArray<T>) -> BooleanArray
where
T: ArrowPrimitiveType,
T::Native: ArrowNativeTypeOp,
{
BooleanArray::from_iter(
array
.iter()
.map(|opt| Some(opt.is_some_and(|v| !v.is_zero()))),
)
}
/// Convert a float PrimitiveArray to BooleanArray using MySQL truthy rules:
/// NULL -> false, 0 (including -0.0) -> false, NaN -> true, other non-zero -> true
fn float_array_to_bool<T>(array: &PrimitiveArray<T>) -> BooleanArray
where
T: ArrowPrimitiveType,
T::Native: ArrowNativeTypeOp + num_traits::Float,
{
use num_traits::Float;
BooleanArray::from_iter(
array
.iter()
.map(|opt| Some(opt.is_some_and(|v| v.is_nan() || !v.is_zero()))),
)
}
/// Convert an Array to BooleanArray using MySQL truthy rules
fn array_to_bool(array: ArrayRef) -> datafusion_common::Result<BooleanArray> {
use arrow::datatypes::*;
match array.data_type() {
DataType::Boolean => {
let bool_array = array.as_boolean();
Ok(BooleanArray::from_iter(
bool_array.iter().map(|opt| Some(opt.unwrap_or(false))),
))
}
DataType::Int8 => Ok(int_array_to_bool(array.as_primitive::<Int8Type>())),
DataType::Int16 => Ok(int_array_to_bool(array.as_primitive::<Int16Type>())),
DataType::Int32 => Ok(int_array_to_bool(array.as_primitive::<Int32Type>())),
DataType::Int64 => Ok(int_array_to_bool(array.as_primitive::<Int64Type>())),
DataType::UInt8 => Ok(int_array_to_bool(array.as_primitive::<UInt8Type>())),
DataType::UInt16 => Ok(int_array_to_bool(array.as_primitive::<UInt16Type>())),
DataType::UInt32 => Ok(int_array_to_bool(array.as_primitive::<UInt32Type>())),
DataType::UInt64 => Ok(int_array_to_bool(array.as_primitive::<UInt64Type>())),
// Float16 needs special handling since half::f16 doesn't implement num_traits::Float
DataType::Float16 => {
let typed_array = array.as_primitive::<Float16Type>();
Ok(BooleanArray::from_iter(typed_array.iter().map(|opt| {
Some(opt.is_some_and(|v| {
let f = v.to_f32();
f.is_nan() || !f.is_zero()
}))
})))
}
DataType::Float32 => Ok(float_array_to_bool(array.as_primitive::<Float32Type>())),
DataType::Float64 => Ok(float_array_to_bool(array.as_primitive::<Float64Type>())),
// Null type is always false.
// Note: NullArray::is_null() returns false (physical null), so we must handle it explicitly.
// See: https://github.com/apache/arrow-rs/issues/4840
DataType::Null => Ok(BooleanArray::from(vec![false; array.len()])),
// For other types, treat non-null as true
_ => {
let len = array.len();
Ok(BooleanArray::from_iter(
(0..len).map(|i| Some(!array.is_null(i))),
))
}
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use arrow_schema::Field;
use datafusion_common::ScalarValue;
use datafusion_common::arrow::array::{AsArray, Int32Array, StringArray};
use super::*;
#[test]
fn test_if_function_basic() {
let if_func = IfFunction::default();
assert_eq!("if", if_func.name());
// Test IF(true, 'yes', 'no') -> 'yes'
let result = if_func
.invoke_with_args(ScalarFunctionArgs {
args: vec![
ColumnarValue::Scalar(ScalarValue::Boolean(Some(true))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
],
arg_fields: vec![],
number_rows: 1,
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
config_options: Arc::new(Default::default()),
})
.unwrap();
if let ColumnarValue::Array(arr) = result {
let str_arr = arr.as_string::<i32>();
assert_eq!(str_arr.value(0), "yes");
} else {
panic!("Expected Array result");
}
}
#[test]
fn test_if_function_false() {
let if_func = IfFunction::default();
// Test IF(false, 'yes', 'no') -> 'no'
let result = if_func
.invoke_with_args(ScalarFunctionArgs {
args: vec![
ColumnarValue::Scalar(ScalarValue::Boolean(Some(false))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
],
arg_fields: vec![],
number_rows: 1,
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
config_options: Arc::new(Default::default()),
})
.unwrap();
if let ColumnarValue::Array(arr) = result {
let str_arr = arr.as_string::<i32>();
assert_eq!(str_arr.value(0), "no");
} else {
panic!("Expected Array result");
}
}
#[test]
fn test_if_function_null_is_false() {
let if_func = IfFunction::default();
// Test IF(NULL, 'yes', 'no') -> 'no' (NULL is treated as false)
// Using Boolean(None) - typed null
let result = if_func
.invoke_with_args(ScalarFunctionArgs {
args: vec![
ColumnarValue::Scalar(ScalarValue::Boolean(None)),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
],
arg_fields: vec![],
number_rows: 1,
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
config_options: Arc::new(Default::default()),
})
.unwrap();
if let ColumnarValue::Array(arr) = result {
let str_arr = arr.as_string::<i32>();
assert_eq!(str_arr.value(0), "no");
} else {
panic!("Expected Array result");
}
// Test IF(NULL, 'yes', 'no') -> 'no' using ScalarValue::Null (untyped null from SQL NULL literal)
let result = if_func
.invoke_with_args(ScalarFunctionArgs {
args: vec![
ColumnarValue::Scalar(ScalarValue::Null),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
],
arg_fields: vec![],
number_rows: 1,
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
config_options: Arc::new(Default::default()),
})
.unwrap();
if let ColumnarValue::Array(arr) = result {
let str_arr = arr.as_string::<i32>();
assert_eq!(str_arr.value(0), "no");
} else {
panic!("Expected Array result");
}
}
#[test]
fn test_if_function_numeric_truthy() {
let if_func = IfFunction::default();
// Test IF(1, 'yes', 'no') -> 'yes' (non-zero is true)
let result = if_func
.invoke_with_args(ScalarFunctionArgs {
args: vec![
ColumnarValue::Scalar(ScalarValue::Int32(Some(1))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
],
arg_fields: vec![],
number_rows: 1,
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
config_options: Arc::new(Default::default()),
})
.unwrap();
if let ColumnarValue::Array(arr) = result {
let str_arr = arr.as_string::<i32>();
assert_eq!(str_arr.value(0), "yes");
} else {
panic!("Expected Array result");
}
// Test IF(0, 'yes', 'no') -> 'no' (zero is false)
let result = if_func
.invoke_with_args(ScalarFunctionArgs {
args: vec![
ColumnarValue::Scalar(ScalarValue::Int32(Some(0))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("yes".to_string()))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("no".to_string()))),
],
arg_fields: vec![],
number_rows: 1,
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
config_options: Arc::new(Default::default()),
})
.unwrap();
if let ColumnarValue::Array(arr) = result {
let str_arr = arr.as_string::<i32>();
assert_eq!(str_arr.value(0), "no");
} else {
panic!("Expected Array result");
}
}
#[test]
fn test_if_function_with_arrays() {
let if_func = IfFunction::default();
// Test with array condition
let condition = Int32Array::from(vec![Some(1), Some(0), None, Some(5)]);
let true_val = StringArray::from(vec!["yes", "yes", "yes", "yes"]);
let false_val = StringArray::from(vec!["no", "no", "no", "no"]);
let result = if_func
.invoke_with_args(ScalarFunctionArgs {
args: vec![
ColumnarValue::Array(Arc::new(condition)),
ColumnarValue::Array(Arc::new(true_val)),
ColumnarValue::Array(Arc::new(false_val)),
],
arg_fields: vec![],
number_rows: 4,
return_field: Arc::new(Field::new("", DataType::Utf8, true)),
config_options: Arc::new(Default::default()),
})
.unwrap();
if let ColumnarValue::Array(arr) = result {
let str_arr = arr.as_string::<i32>();
assert_eq!(str_arr.value(0), "yes"); // 1 is true
assert_eq!(str_arr.value(1), "no"); // 0 is false
assert_eq!(str_arr.value(2), "no"); // NULL is false
assert_eq!(str_arr.value(3), "yes"); // 5 is true
} else {
panic!("Expected Array result");
}
}
}

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::Display;
use std::sync::Arc;
use datafusion_common::arrow::array::{Array, AsArray, BooleanBuilder};

View File

@@ -17,7 +17,7 @@ use std::sync::Arc;
use common_catalog::consts::{
DEFAULT_PRIVATE_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, PG_CATALOG_NAME,
};
use datafusion::arrow::array::{ArrayRef, StringArray, StringBuilder, as_boolean_array};
use datafusion::arrow::array::{ArrayRef, StringArray, as_boolean_array};
use datafusion::catalog::TableFunction;
use datafusion::common::ScalarValue;
use datafusion::common::utils::SingleRowListArrayBuilder;
@@ -34,15 +34,10 @@ const CURRENT_SCHEMA_FUNCTION_NAME: &str = "current_schema";
const CURRENT_SCHEMAS_FUNCTION_NAME: &str = "current_schemas";
const SESSION_USER_FUNCTION_NAME: &str = "session_user";
const CURRENT_DATABASE_FUNCTION_NAME: &str = "current_database";
const OBJ_DESCRIPTION_FUNCTION_NAME: &str = "obj_description";
const COL_DESCRIPTION_FUNCTION_NAME: &str = "col_description";
const SHOBJ_DESCRIPTION_FUNCTION_NAME: &str = "shobj_description";
const PG_MY_TEMP_SCHEMA_FUNCTION_NAME: &str = "pg_my_temp_schema";
define_nullary_udf!(CurrentSchemaFunction);
define_nullary_udf!(SessionUserFunction);
define_nullary_udf!(CurrentDatabaseFunction);
define_nullary_udf!(PgMyTempSchemaFunction);
impl Function for CurrentDatabaseFunction {
fn name(&self) -> &str {
@@ -178,175 +173,6 @@ impl Function for CurrentSchemasFunction {
}
}
/// PostgreSQL obj_description - returns NULL for compatibility
#[derive(Display, Debug, Clone)]
#[display("{}", self.name())]
pub(super) struct ObjDescriptionFunction {
signature: Signature,
}
impl ObjDescriptionFunction {
pub fn new() -> Self {
Self {
signature: Signature::one_of(
vec![
TypeSignature::Exact(vec![DataType::Int64, DataType::Utf8]),
TypeSignature::Exact(vec![DataType::UInt32, DataType::Utf8]),
TypeSignature::Exact(vec![DataType::Int64]),
TypeSignature::Exact(vec![DataType::UInt32]),
],
Volatility::Stable,
),
}
}
}
impl Function for ObjDescriptionFunction {
fn name(&self) -> &str {
OBJ_DESCRIPTION_FUNCTION_NAME
}
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
Ok(DataType::Utf8)
}
fn signature(&self) -> &Signature {
&self.signature
}
fn invoke_with_args(
&self,
args: ScalarFunctionArgs,
) -> datafusion_common::Result<ColumnarValue> {
let num_rows = args.number_rows;
let mut builder = StringBuilder::with_capacity(num_rows, 0);
for _ in 0..num_rows {
builder.append_null();
}
Ok(ColumnarValue::Array(Arc::new(builder.finish())))
}
}
/// PostgreSQL col_description - returns NULL for compatibility
#[derive(Display, Debug, Clone)]
#[display("{}", self.name())]
pub(super) struct ColDescriptionFunction {
signature: Signature,
}
impl ColDescriptionFunction {
pub fn new() -> Self {
Self {
signature: Signature::one_of(
vec![
TypeSignature::Exact(vec![DataType::Int64, DataType::Int32]),
TypeSignature::Exact(vec![DataType::UInt32, DataType::Int32]),
TypeSignature::Exact(vec![DataType::Int64, DataType::Int64]),
TypeSignature::Exact(vec![DataType::UInt32, DataType::Int64]),
],
Volatility::Stable,
),
}
}
}
impl Function for ColDescriptionFunction {
fn name(&self) -> &str {
COL_DESCRIPTION_FUNCTION_NAME
}
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
Ok(DataType::Utf8)
}
fn signature(&self) -> &Signature {
&self.signature
}
fn invoke_with_args(
&self,
args: ScalarFunctionArgs,
) -> datafusion_common::Result<ColumnarValue> {
let num_rows = args.number_rows;
let mut builder = StringBuilder::with_capacity(num_rows, 0);
for _ in 0..num_rows {
builder.append_null();
}
Ok(ColumnarValue::Array(Arc::new(builder.finish())))
}
}
/// PostgreSQL shobj_description - returns NULL for compatibility
#[derive(Display, Debug, Clone)]
#[display("{}", self.name())]
pub(super) struct ShobjDescriptionFunction {
signature: Signature,
}
impl ShobjDescriptionFunction {
pub fn new() -> Self {
Self {
signature: Signature::one_of(
vec![
TypeSignature::Exact(vec![DataType::Int64, DataType::Utf8]),
TypeSignature::Exact(vec![DataType::UInt64, DataType::Utf8]),
TypeSignature::Exact(vec![DataType::Int32, DataType::Utf8]),
TypeSignature::Exact(vec![DataType::UInt32, DataType::Utf8]),
],
Volatility::Stable,
),
}
}
}
impl Function for ShobjDescriptionFunction {
fn name(&self) -> &str {
SHOBJ_DESCRIPTION_FUNCTION_NAME
}
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
Ok(DataType::Utf8)
}
fn signature(&self) -> &Signature {
&self.signature
}
fn invoke_with_args(
&self,
args: ScalarFunctionArgs,
) -> datafusion_common::Result<ColumnarValue> {
let num_rows = args.number_rows;
let mut builder = StringBuilder::with_capacity(num_rows, 0);
for _ in 0..num_rows {
builder.append_null();
}
Ok(ColumnarValue::Array(Arc::new(builder.finish())))
}
}
/// PostgreSQL pg_my_temp_schema - returns 0 (no temp schema) for compatibility
impl Function for PgMyTempSchemaFunction {
fn name(&self) -> &str {
PG_MY_TEMP_SCHEMA_FUNCTION_NAME
}
fn return_type(&self, _: &[DataType]) -> datafusion_common::Result<DataType> {
Ok(DataType::UInt32)
}
fn signature(&self) -> &Signature {
&self.signature
}
fn invoke_with_args(
&self,
_args: ScalarFunctionArgs,
) -> datafusion_common::Result<ColumnarValue> {
Ok(ColumnarValue::Scalar(ScalarValue::UInt32(Some(0))))
}
}
pub(super) struct PGCatalogFunction;
impl PGCatalogFunction {
@@ -386,100 +212,5 @@ impl PGCatalogFunction {
registry.register(pg_catalog::create_pg_total_relation_size_udf());
registry.register(pg_catalog::create_pg_stat_get_numscans());
registry.register(pg_catalog::create_pg_get_constraintdef());
registry.register(pg_catalog::create_pg_get_partition_ancestors_udf());
registry.register(pg_catalog::quote_ident_udf::create_quote_ident_udf());
registry.register(pg_catalog::quote_ident_udf::create_parse_ident_udf());
registry.register_scalar(ObjDescriptionFunction::new());
registry.register_scalar(ColDescriptionFunction::new());
registry.register_scalar(ShobjDescriptionFunction::new());
registry.register_scalar(PgMyTempSchemaFunction::default());
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use arrow_schema::Field;
use datafusion::arrow::array::Array;
use datafusion_common::ScalarValue;
use datafusion_expr::ColumnarValue;
use super::*;
fn create_test_args(args: Vec<ColumnarValue>, number_rows: usize) -> ScalarFunctionArgs {
ScalarFunctionArgs {
args,
arg_fields: vec![],
number_rows,
return_field: Arc::new(Field::new("result", DataType::Utf8, true)),
config_options: Arc::new(Default::default()),
}
}
#[test]
fn test_obj_description_function() {
let func = ObjDescriptionFunction::new();
assert_eq!("obj_description", func.name());
assert_eq!(DataType::Utf8, func.return_type(&[]).unwrap());
let args = create_test_args(
vec![
ColumnarValue::Scalar(ScalarValue::Int64(Some(1234))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("pg_class".to_string()))),
],
1,
);
let result = func.invoke_with_args(args).unwrap();
if let ColumnarValue::Array(arr) = result {
assert_eq!(1, arr.len());
assert!(arr.is_null(0));
} else {
panic!("Expected Array result");
}
}
#[test]
fn test_col_description_function() {
let func = ColDescriptionFunction::new();
assert_eq!("col_description", func.name());
assert_eq!(DataType::Utf8, func.return_type(&[]).unwrap());
let args = create_test_args(
vec![
ColumnarValue::Scalar(ScalarValue::Int64(Some(1234))),
ColumnarValue::Scalar(ScalarValue::Int64(Some(1))),
],
1,
);
let result = func.invoke_with_args(args).unwrap();
if let ColumnarValue::Array(arr) = result {
assert_eq!(1, arr.len());
assert!(arr.is_null(0));
} else {
panic!("Expected Array result");
}
}
#[test]
fn test_shobj_description_function() {
let func = ShobjDescriptionFunction::new();
assert_eq!("shobj_description", func.name());
assert_eq!(DataType::Utf8, func.return_type(&[]).unwrap());
let args = create_test_args(
vec![
ColumnarValue::Scalar(ScalarValue::Int64(Some(1))),
ColumnarValue::Scalar(ScalarValue::Utf8(Some("pg_database".to_string()))),
],
1,
);
let result = func.invoke_with_args(args).unwrap();
if let ColumnarValue::Array(arr) = result {
assert_eq!(1, arr.len());
assert!(arr.is_null(0));
} else {
panic!("Expected Array result");
}
}
}

View File

@@ -12,7 +12,6 @@ api.workspace = true
arrow-flight.workspace = true
bytes.workspace = true
common-base.workspace = true
common-config.workspace = true
common-error.workspace = true
common-macro.workspace = true
common-recordbatch.workspace = true
@@ -24,6 +23,7 @@ datatypes.workspace = true
flatbuffers = "25.2"
hyper.workspace = true
lazy_static.workspace = true
notify.workspace = true
prost.workspace = true
serde.workspace = true
serde_json.workspace = true

View File

@@ -38,10 +38,11 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to watch config file"))]
#[snafu(display("Failed to watch config file path: {}", path))]
FileWatch {
path: String,
#[snafu(source)]
source: common_config::error::Error,
error: notify::Error,
#[snafu(implicit)]
location: Location,
},

View File

@@ -46,16 +46,13 @@ pub struct DoPutResponse {
request_id: i64,
/// The successfully ingested rows number.
affected_rows: AffectedRows,
/// The elapsed time in seconds for handling the bulk insert.
elapsed_secs: f64,
}
impl DoPutResponse {
pub fn new(request_id: i64, affected_rows: AffectedRows, elapsed_secs: f64) -> Self {
pub fn new(request_id: i64, affected_rows: AffectedRows) -> Self {
Self {
request_id,
affected_rows,
elapsed_secs,
}
}
@@ -66,10 +63,6 @@ impl DoPutResponse {
pub fn affected_rows(&self) -> AffectedRows {
self.affected_rows
}
pub fn elapsed_secs(&self) -> f64 {
self.elapsed_secs
}
}
impl TryFrom<PutResult> for DoPutResponse {
@@ -93,11 +86,8 @@ mod tests {
#[test]
fn test_serde_do_put_response() {
let x = DoPutResponse::new(42, 88, 0.123);
let x = DoPutResponse::new(42, 88);
let serialized = serde_json::to_string(&x).unwrap();
assert_eq!(
serialized,
r#"{"request_id":42,"affected_rows":88,"elapsed_secs":0.123}"#
);
assert_eq!(serialized, r#"{"request_id":42,"affected_rows":88}"#);
}
}

View File

@@ -15,10 +15,11 @@
use std::path::Path;
use std::result::Result as StdResult;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use common_config::file_watcher::{FileWatcherBuilder, FileWatcherConfig};
use common_telemetry::{error, info};
use notify::{EventKind, RecursiveMode, Watcher};
use snafu::ResultExt;
use crate::error::{FileWatchSnafu, Result};
@@ -118,28 +119,45 @@ where
return Ok(());
}
let watch_paths: Vec<_> = tls_config
.get_tls_option()
.watch_paths()
.iter()
.map(|p| p.to_path_buf())
.collect();
let tls_config_for_watcher = tls_config.clone();
FileWatcherBuilder::new()
.watch_paths(&watch_paths)
.context(FileWatchSnafu)?
.config(FileWatcherConfig::new())
.spawn(move || {
if let Err(err) = tls_config_for_watcher.reload() {
error!("Failed to reload TLS config: {}", err);
} else {
info!("Reloaded TLS cert/key file successfully.");
on_reload();
let (tx, rx) = channel::<notify::Result<notify::Event>>();
let mut watcher = notify::recommended_watcher(tx).context(FileWatchSnafu { path: "<none>" })?;
// Watch all paths returned by the TlsConfigLoader
for path in tls_config.get_tls_option().watch_paths() {
watcher
.watch(path, RecursiveMode::NonRecursive)
.with_context(|_| FileWatchSnafu {
path: path.display().to_string(),
})?;
}
info!("Spawning background task for watching TLS cert/key file changes");
std::thread::spawn(move || {
let _watcher = watcher;
loop {
match rx.recv() {
Ok(Ok(event)) => {
if let EventKind::Modify(_) | EventKind::Create(_) = event.kind {
info!("Detected TLS cert/key file change: {:?}", event);
if let Err(err) = tls_config_for_watcher.reload() {
error!("Failed to reload TLS config: {}", err);
} else {
info!("Reloaded TLS cert/key file successfully.");
on_reload();
}
}
}
Ok(Err(err)) => {
error!("Failed to watch TLS cert/key file: {}", err);
}
Err(err) => {
error!("TLS cert/key file watcher channel closed: {}", err);
}
}
})
.context(FileWatchSnafu)?;
}
});
Ok(())
}

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::{Display, Formatter};
use std::hash::{DefaultHasher, Hash, Hasher};
use std::str::FromStr;
@@ -61,7 +60,7 @@ pub trait ClusterInfo {
}
/// The key of [NodeInfo] in the storage. The format is `__meta_cluster_node_info-0-{role}-{node_id}`.
#[derive(Debug, Clone, Copy, Eq, Hash, PartialEq, Serialize, Deserialize, PartialOrd, Ord)]
#[derive(Debug, Clone, Copy, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct NodeInfoKey {
/// The role of the node. It can be `[Role::Datanode]` or `[Role::Frontend]`.
pub role: Role,
@@ -136,7 +135,7 @@ pub struct NodeInfo {
pub hostname: String,
}
#[derive(Debug, Clone, Copy, Eq, Hash, PartialEq, Serialize, Deserialize, PartialOrd, Ord)]
#[derive(Debug, Clone, Copy, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub enum Role {
Datanode,
Frontend,
@@ -242,12 +241,6 @@ impl From<&NodeInfoKey> for Vec<u8> {
}
}
impl Display for NodeInfoKey {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}-{}", self.role, self.node_id)
}
}
impl FromStr for NodeInfo {
type Err = Error;

View File

@@ -31,7 +31,6 @@ use crate::region_registry::LeaderRegionRegistryRef;
pub mod alter_database;
pub mod alter_logical_tables;
pub mod alter_table;
pub mod comment_on;
pub mod create_database;
pub mod create_flow;
pub mod create_logical_tables;

View File

@@ -301,8 +301,8 @@ fn build_new_table_info(
| AlterKind::UnsetTableOptions { .. }
| AlterKind::SetIndexes { .. }
| AlterKind::UnsetIndexes { .. }
| AlterKind::DropDefaults { .. }
| AlterKind::SetDefaults { .. } => {}
| AlterKind::DropDefaults { .. } => {}
AlterKind::SetDefaults { .. } => {}
}
info!(

View File

@@ -1,509 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use async_trait::async_trait;
use chrono::Utc;
use common_catalog::format_full_table_name;
use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu};
use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status};
use common_telemetry::tracing::info;
use datatypes::schema::COMMENT_KEY as COLUMN_COMMENT_KEY;
use serde::{Deserialize, Serialize};
use snafu::{OptionExt, ResultExt, ensure};
use store_api::storage::TableId;
use strum::AsRefStr;
use table::metadata::RawTableInfo;
use table::requests::COMMENT_KEY as TABLE_COMMENT_KEY;
use table::table_name::TableName;
use crate::cache_invalidator::Context;
use crate::ddl::DdlContext;
use crate::ddl::utils::map_to_procedure_error;
use crate::error::{ColumnNotFoundSnafu, FlowNotFoundSnafu, Result, TableNotFoundSnafu};
use crate::instruction::CacheIdent;
use crate::key::flow::flow_info::{FlowInfoKey, FlowInfoValue};
use crate::key::table_info::{TableInfoKey, TableInfoValue};
use crate::key::table_name::TableNameKey;
use crate::key::{DeserializedValueWithBytes, FlowId, MetadataKey, MetadataValue};
use crate::lock_key::{CatalogLock, FlowNameLock, SchemaLock, TableNameLock};
use crate::rpc::ddl::{CommentObjectType, CommentOnTask};
use crate::rpc::store::PutRequest;
pub struct CommentOnProcedure {
pub context: DdlContext,
pub data: CommentOnData,
}
impl CommentOnProcedure {
pub const TYPE_NAME: &'static str = "metasrv-procedure::CommentOn";
pub fn new(task: CommentOnTask, context: DdlContext) -> Self {
Self {
context,
data: CommentOnData::new(task),
}
}
pub fn from_json(json: &str, context: DdlContext) -> ProcedureResult<Self> {
let data = serde_json::from_str(json).context(FromJsonSnafu)?;
Ok(Self { context, data })
}
pub async fn on_prepare(&mut self) -> Result<Status> {
match self.data.object_type {
CommentObjectType::Table | CommentObjectType::Column => {
self.prepare_table_or_column().await?;
}
CommentObjectType::Flow => {
self.prepare_flow().await?;
}
}
// Fast path: if comment is unchanged, skip update
if self.data.is_unchanged {
let object_desc = match self.data.object_type {
CommentObjectType::Table => format!(
"table {}",
format_full_table_name(
&self.data.catalog_name,
&self.data.schema_name,
&self.data.object_name,
)
),
CommentObjectType::Column => format!(
"column {}.{}",
format_full_table_name(
&self.data.catalog_name,
&self.data.schema_name,
&self.data.object_name,
),
self.data.column_name.as_ref().unwrap()
),
CommentObjectType::Flow => {
format!("flow {}.{}", self.data.catalog_name, self.data.object_name)
}
};
info!("Comment unchanged for {}, skipping update", object_desc);
return Ok(Status::done());
}
self.data.state = CommentOnState::UpdateMetadata;
Ok(Status::executing(true))
}
async fn prepare_table_or_column(&mut self) -> Result<()> {
let table_name_key = TableNameKey::new(
&self.data.catalog_name,
&self.data.schema_name,
&self.data.object_name,
);
let table_id = self
.context
.table_metadata_manager
.table_name_manager()
.get(table_name_key)
.await?
.with_context(|| TableNotFoundSnafu {
table_name: format_full_table_name(
&self.data.catalog_name,
&self.data.schema_name,
&self.data.object_name,
),
})?
.table_id();
let table_info = self
.context
.table_metadata_manager
.table_info_manager()
.get(table_id)
.await?
.with_context(|| TableNotFoundSnafu {
table_name: format_full_table_name(
&self.data.catalog_name,
&self.data.schema_name,
&self.data.object_name,
),
})?;
// For column comments, validate the column exists
if self.data.object_type == CommentObjectType::Column {
let column_name = self.data.column_name.as_ref().unwrap();
let column_exists = table_info
.table_info
.meta
.schema
.column_schemas
.iter()
.any(|col| &col.name == column_name);
ensure!(
column_exists,
ColumnNotFoundSnafu {
column_name,
column_id: 0u32, // column_id is not known here
}
);
}
self.data.table_id = Some(table_id);
// Check if comment is unchanged for early exit optimization
match self.data.object_type {
CommentObjectType::Table => {
let current_comment = &table_info.table_info.desc;
if &self.data.comment == current_comment {
self.data.is_unchanged = true;
}
}
CommentObjectType::Column => {
let column_name = self.data.column_name.as_ref().unwrap();
let column_schema = table_info
.table_info
.meta
.schema
.column_schemas
.iter()
.find(|col| &col.name == column_name)
.unwrap(); // Safe: validated above
let current_comment = column_schema.metadata().get(COLUMN_COMMENT_KEY);
if self.data.comment.as_deref() == current_comment.map(String::as_str) {
self.data.is_unchanged = true;
}
}
CommentObjectType::Flow => {
// this branch is handled in `prepare_flow`
}
}
self.data.table_info = Some(table_info);
Ok(())
}
async fn prepare_flow(&mut self) -> Result<()> {
let flow_name_value = self
.context
.flow_metadata_manager
.flow_name_manager()
.get(&self.data.catalog_name, &self.data.object_name)
.await?
.with_context(|| FlowNotFoundSnafu {
flow_name: &self.data.object_name,
})?;
let flow_id = flow_name_value.flow_id();
let flow_info = self
.context
.flow_metadata_manager
.flow_info_manager()
.get_raw(flow_id)
.await?
.with_context(|| FlowNotFoundSnafu {
flow_name: &self.data.object_name,
})?;
self.data.flow_id = Some(flow_id);
// Check if comment is unchanged for early exit optimization
let current_comment = &flow_info.get_inner_ref().comment;
let new_comment = self.data.comment.as_deref().unwrap_or("");
if new_comment == current_comment.as_str() {
self.data.is_unchanged = true;
}
self.data.flow_info = Some(flow_info);
Ok(())
}
pub async fn on_update_metadata(&mut self) -> Result<Status> {
match self.data.object_type {
CommentObjectType::Table => {
self.update_table_comment().await?;
}
CommentObjectType::Column => {
self.update_column_comment().await?;
}
CommentObjectType::Flow => {
self.update_flow_comment().await?;
}
}
self.data.state = CommentOnState::InvalidateCache;
Ok(Status::executing(true))
}
async fn update_table_comment(&mut self) -> Result<()> {
let table_info_value = self.data.table_info.as_ref().unwrap();
let mut new_table_info = table_info_value.table_info.clone();
new_table_info.desc = self.data.comment.clone();
// Sync comment to table options
sync_table_comment_option(
&mut new_table_info.meta.options,
new_table_info.desc.as_deref(),
);
self.update_table_info(table_info_value, new_table_info)
.await?;
info!(
"Updated comment for table {}.{}.{}",
self.data.catalog_name, self.data.schema_name, self.data.object_name
);
Ok(())
}
async fn update_column_comment(&mut self) -> Result<()> {
let table_info_value = self.data.table_info.as_ref().unwrap();
let mut new_table_info = table_info_value.table_info.clone();
let column_name = self.data.column_name.as_ref().unwrap();
let column_schema = new_table_info
.meta
.schema
.column_schemas
.iter_mut()
.find(|col| &col.name == column_name)
.unwrap(); // Safe: validated in prepare
update_column_comment_metadata(column_schema, self.data.comment.clone());
self.update_table_info(table_info_value, new_table_info)
.await?;
info!(
"Updated comment for column {}.{}.{}.{}",
self.data.catalog_name, self.data.schema_name, self.data.object_name, column_name
);
Ok(())
}
async fn update_flow_comment(&mut self) -> Result<()> {
let flow_id = self.data.flow_id.unwrap();
let flow_info_value = self.data.flow_info.as_ref().unwrap();
let mut new_flow_info = flow_info_value.get_inner_ref().clone();
new_flow_info.comment = self.data.comment.clone().unwrap_or_default();
new_flow_info.updated_time = Utc::now();
let raw_value = new_flow_info.try_as_raw_value()?;
self.context
.table_metadata_manager
.kv_backend()
.put(
PutRequest::new()
.with_key(FlowInfoKey::new(flow_id).to_bytes())
.with_value(raw_value),
)
.await?;
info!(
"Updated comment for flow {}.{}",
self.data.catalog_name, self.data.object_name
);
Ok(())
}
async fn update_table_info(
&self,
current_table_info: &DeserializedValueWithBytes<TableInfoValue>,
new_table_info: RawTableInfo,
) -> Result<()> {
let table_id = current_table_info.table_info.ident.table_id;
let new_table_info_value = current_table_info.update(new_table_info);
let raw_value = new_table_info_value.try_as_raw_value()?;
self.context
.table_metadata_manager
.kv_backend()
.put(
PutRequest::new()
.with_key(TableInfoKey::new(table_id).to_bytes())
.with_value(raw_value),
)
.await?;
Ok(())
}
pub async fn on_invalidate_cache(&mut self) -> Result<Status> {
let cache_invalidator = &self.context.cache_invalidator;
match self.data.object_type {
CommentObjectType::Table | CommentObjectType::Column => {
let table_id = self.data.table_id.unwrap();
let table_name = TableName::new(
self.data.catalog_name.clone(),
self.data.schema_name.clone(),
self.data.object_name.clone(),
);
let cache_ident = vec![
CacheIdent::TableId(table_id),
CacheIdent::TableName(table_name),
];
cache_invalidator
.invalidate(&Context::default(), &cache_ident)
.await?;
}
CommentObjectType::Flow => {
let flow_id = self.data.flow_id.unwrap();
let cache_ident = vec![CacheIdent::FlowId(flow_id)];
cache_invalidator
.invalidate(&Context::default(), &cache_ident)
.await?;
}
}
Ok(Status::done())
}
}
#[async_trait]
impl Procedure for CommentOnProcedure {
fn type_name(&self) -> &str {
Self::TYPE_NAME
}
async fn execute(&mut self, _ctx: &ProcedureContext) -> ProcedureResult<Status> {
match self.data.state {
CommentOnState::Prepare => self.on_prepare().await,
CommentOnState::UpdateMetadata => self.on_update_metadata().await,
CommentOnState::InvalidateCache => self.on_invalidate_cache().await,
}
.map_err(map_to_procedure_error)
}
fn dump(&self) -> ProcedureResult<String> {
serde_json::to_string(&self.data).context(ToJsonSnafu)
}
fn lock_key(&self) -> LockKey {
let catalog = &self.data.catalog_name;
let schema = &self.data.schema_name;
let lock_key = match self.data.object_type {
CommentObjectType::Table | CommentObjectType::Column => {
vec![
CatalogLock::Read(catalog).into(),
SchemaLock::read(catalog, schema).into(),
TableNameLock::new(catalog, schema, &self.data.object_name).into(),
]
}
CommentObjectType::Flow => {
vec![
CatalogLock::Read(catalog).into(),
FlowNameLock::new(catalog, &self.data.object_name).into(),
]
}
};
LockKey::new(lock_key)
}
}
#[derive(Debug, Serialize, Deserialize, AsRefStr)]
enum CommentOnState {
Prepare,
UpdateMetadata,
InvalidateCache,
}
/// The data of comment on procedure.
#[derive(Debug, Serialize, Deserialize)]
pub struct CommentOnData {
state: CommentOnState,
catalog_name: String,
schema_name: String,
object_type: CommentObjectType,
object_name: String,
/// Column name (only for Column comments)
column_name: Option<String>,
comment: Option<String>,
/// Cached table ID (for Table/Column)
#[serde(skip_serializing_if = "Option::is_none")]
table_id: Option<TableId>,
/// Cached table info (for Table/Column)
#[serde(skip)]
table_info: Option<DeserializedValueWithBytes<TableInfoValue>>,
/// Cached flow ID (for Flow)
#[serde(skip_serializing_if = "Option::is_none")]
flow_id: Option<FlowId>,
/// Cached flow info (for Flow)
#[serde(skip)]
flow_info: Option<DeserializedValueWithBytes<FlowInfoValue>>,
/// Whether the comment is unchanged (optimization for early exit)
#[serde(skip)]
is_unchanged: bool,
}
impl CommentOnData {
pub fn new(task: CommentOnTask) -> Self {
Self {
state: CommentOnState::Prepare,
catalog_name: task.catalog_name,
schema_name: task.schema_name,
object_type: task.object_type,
object_name: task.object_name,
column_name: task.column_name,
comment: task.comment,
table_id: None,
table_info: None,
flow_id: None,
flow_info: None,
is_unchanged: false,
}
}
}
fn update_column_comment_metadata(
column_schema: &mut datatypes::schema::ColumnSchema,
comment: Option<String>,
) {
match comment {
Some(value) => {
column_schema
.mut_metadata()
.insert(COLUMN_COMMENT_KEY.to_string(), value);
}
None => {
column_schema.mut_metadata().remove(COLUMN_COMMENT_KEY);
}
}
}
fn sync_table_comment_option(options: &mut table::requests::TableOptions, comment: Option<&str>) {
match comment {
Some(value) => {
options
.extra_options
.insert(TABLE_COMMENT_KEY.to_string(), value.to_string());
}
None => {
options.extra_options.remove(TABLE_COMMENT_KEY);
}
}
}

View File

@@ -14,7 +14,6 @@
use std::sync::Arc;
use common_error::ext::BoxedError;
use common_procedure::{
BoxedProcedureLoader, Output, ProcedureId, ProcedureManagerRef, ProcedureWithId, watcher,
};
@@ -27,7 +26,6 @@ use store_api::storage::TableId;
use crate::ddl::alter_database::AlterDatabaseProcedure;
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
use crate::ddl::alter_table::AlterTableProcedure;
use crate::ddl::comment_on::CommentOnProcedure;
use crate::ddl::create_database::CreateDatabaseProcedure;
use crate::ddl::create_flow::CreateFlowProcedure;
use crate::ddl::create_logical_tables::CreateLogicalTablesProcedure;
@@ -53,34 +51,21 @@ use crate::rpc::ddl::DdlTask::CreateTrigger;
#[cfg(feature = "enterprise")]
use crate::rpc::ddl::DdlTask::DropTrigger;
use crate::rpc::ddl::DdlTask::{
AlterDatabase, AlterLogicalTables, AlterTable, CommentOn, CreateDatabase, CreateFlow,
CreateLogicalTables, CreateTable, CreateView, DropDatabase, DropFlow, DropLogicalTables,
DropTable, DropView, TruncateTable,
AlterDatabase, AlterLogicalTables, AlterTable, CreateDatabase, CreateFlow, CreateLogicalTables,
CreateTable, CreateView, DropDatabase, DropFlow, DropLogicalTables, DropTable, DropView,
TruncateTable,
};
#[cfg(feature = "enterprise")]
use crate::rpc::ddl::trigger::CreateTriggerTask;
#[cfg(feature = "enterprise")]
use crate::rpc::ddl::trigger::DropTriggerTask;
use crate::rpc::ddl::{
AlterDatabaseTask, AlterTableTask, CommentOnTask, CreateDatabaseTask, CreateFlowTask,
CreateTableTask, CreateViewTask, DropDatabaseTask, DropFlowTask, DropTableTask, DropViewTask,
QueryContext, SubmitDdlTaskRequest, SubmitDdlTaskResponse, TruncateTableTask,
AlterDatabaseTask, AlterTableTask, CreateDatabaseTask, CreateFlowTask, CreateTableTask,
CreateViewTask, DropDatabaseTask, DropFlowTask, DropTableTask, DropViewTask, QueryContext,
SubmitDdlTaskRequest, SubmitDdlTaskResponse, TruncateTableTask,
};
use crate::rpc::router::RegionRoute;
/// A configurator that customizes or enhances a [`DdlManager`].
#[async_trait::async_trait]
pub trait DdlManagerConfigurator<C>: Send + Sync {
/// Configures the given [`DdlManager`] using the provided [`DdlManagerConfigureContext`].
async fn configure(
&self,
ddl_manager: DdlManager,
ctx: C,
) -> std::result::Result<DdlManager, BoxedError>;
}
pub type DdlManagerConfiguratorRef<C> = Arc<dyn DdlManagerConfigurator<C>>;
pub type DdlManagerRef = Arc<DdlManager>;
pub type BoxedProcedureLoaderFactory = dyn Fn(DdlContext) -> BoxedProcedureLoader;
@@ -163,8 +148,11 @@ impl DdlManager {
}
#[cfg(feature = "enterprise")]
pub fn with_trigger_ddl_manager(mut self, trigger_ddl_manager: TriggerDdlManagerRef) -> Self {
self.trigger_ddl_manager = Some(trigger_ddl_manager);
pub fn with_trigger_ddl_manager(
mut self,
trigger_ddl_manager: Option<TriggerDdlManagerRef>,
) -> Self {
self.trigger_ddl_manager = trigger_ddl_manager;
self
}
@@ -193,8 +181,7 @@ impl DdlManager {
TruncateTableProcedure,
CreateDatabaseProcedure,
DropDatabaseProcedure,
DropViewProcedure,
CommentOnProcedure
DropViewProcedure
);
for (type_name, loader_factory) in loaders {
@@ -410,19 +397,6 @@ impl DdlManager {
self.submit_procedure(procedure_with_id).await
}
/// Submits and executes a comment on task.
#[tracing::instrument(skip_all)]
pub async fn submit_comment_on_task(
&self,
comment_on_task: CommentOnTask,
) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
let procedure = CommentOnProcedure::new(comment_on_task, context);
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
self.submit_procedure(procedure_with_id).await
}
async fn submit_procedure(
&self,
procedure_with_id: ProcedureWithId,
@@ -491,7 +465,6 @@ impl DdlManager {
handle_create_view_task(self, create_view_task).await
}
DropView(drop_view_task) => handle_drop_view_task(self, drop_view_task).await,
CommentOn(comment_on_task) => handle_comment_on_task(self, comment_on_task).await,
#[cfg(feature = "enterprise")]
CreateTrigger(create_trigger_task) => {
handle_create_trigger_task(
@@ -923,26 +896,6 @@ async fn handle_create_view_task(
})
}
async fn handle_comment_on_task(
ddl_manager: &DdlManager,
comment_on_task: CommentOnTask,
) -> Result<SubmitDdlTaskResponse> {
let (id, _) = ddl_manager
.submit_comment_on_task(comment_on_task.clone())
.await?;
let procedure_id = id.to_string();
info!(
"Comment on {}.{}.{} is updated via procedure_id {id:?}",
comment_on_task.catalog_name, comment_on_task.schema_name, comment_on_task.object_name
);
Ok(SubmitDdlTaskResponse {
key: procedure_id.into(),
..Default::default()
})
}
#[cfg(test)]
mod tests {
use std::sync::Arc;

View File

@@ -14,8 +14,6 @@
use std::time::Duration;
use etcd_client::ConnectOptions;
/// Heartbeat interval time (is the basic unit of various time).
pub const HEARTBEAT_INTERVAL_MILLIS: u64 = 3000;
@@ -43,23 +41,6 @@ pub const POSTGRES_KEEP_ALIVE_SECS: u64 = 30;
/// In a lease, there are two opportunities for renewal.
pub const META_KEEP_ALIVE_INTERVAL_SECS: u64 = META_LEASE_SECS / 2;
/// The timeout of the heartbeat request.
pub const HEARTBEAT_TIMEOUT: Duration = Duration::from_secs(META_KEEP_ALIVE_INTERVAL_SECS + 1);
/// The keep-alive interval of the heartbeat channel.
pub const HEARTBEAT_CHANNEL_KEEP_ALIVE_INTERVAL_SECS: Duration = Duration::from_secs(15);
/// The keep-alive timeout of the heartbeat channel.
pub const HEARTBEAT_CHANNEL_KEEP_ALIVE_TIMEOUT_SECS: Duration = Duration::from_secs(5);
/// The default options for the etcd client.
pub fn default_etcd_client_options() -> ConnectOptions {
ConnectOptions::new()
.with_keep_alive_while_idle(true)
.with_keep_alive(Duration::from_secs(15), Duration::from_secs(5))
.with_connect_timeout(Duration::from_secs(10))
}
/// The default mailbox round-trip timeout.
pub const MAILBOX_RTT_SECS: u64 = 1;

View File

@@ -272,6 +272,13 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to send message: {err_msg}"))]
SendMessage {
err_msg: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to serde json"))]
SerdeJson {
#[snafu(source)]
@@ -1111,7 +1118,7 @@ impl ErrorExt for Error {
| DeserializeFlexbuffers { .. }
| ConvertTimeRanges { .. } => StatusCode::Unexpected,
GetKvCache { .. } | CacheNotGet { .. } => StatusCode::Internal,
SendMessage { .. } | GetKvCache { .. } | CacheNotGet { .. } => StatusCode::Internal,
SchemaAlreadyExists { .. } => StatusCode::DatabaseAlreadyExists,

View File

@@ -23,7 +23,6 @@ use crate::heartbeat::mailbox::{IncomingMessage, MailboxRef};
pub mod invalidate_table_cache;
pub mod parse_mailbox_message;
pub mod suspend;
#[cfg(test)]
mod tests;

View File

@@ -1,69 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use async_trait::async_trait;
use common_telemetry::{info, warn};
use crate::error::Result;
use crate::heartbeat::handler::{
HandleControl, HeartbeatResponseHandler, HeartbeatResponseHandlerContext,
};
use crate::instruction::Instruction;
/// A heartbeat response handler that handles special "suspend" error.
/// It will simply set or clear (if previously set) the inner suspend atomic state.
pub struct SuspendHandler {
suspend: Arc<AtomicBool>,
}
impl SuspendHandler {
pub fn new(suspend: Arc<AtomicBool>) -> Self {
Self { suspend }
}
}
#[async_trait]
impl HeartbeatResponseHandler for SuspendHandler {
fn is_acceptable(&self, context: &HeartbeatResponseHandlerContext) -> bool {
matches!(
context.incoming_message,
Some((_, Instruction::Suspend)) | None
)
}
async fn handle(&self, context: &mut HeartbeatResponseHandlerContext) -> Result<HandleControl> {
let flip_state = |expect: bool| {
self.suspend
.compare_exchange(expect, !expect, Ordering::Relaxed, Ordering::Relaxed)
.is_ok()
};
if let Some((_, Instruction::Suspend)) = context.incoming_message.take() {
if flip_state(false) {
warn!("Suspend instruction received from meta, entering suspension state");
}
} else {
// Suspended components are made always tried to get rid of this state, we don't want
// an "un-suspend" instruction to resume them running. That can be error-prone.
// So if the "suspend" instruction is not found in the heartbeat, just unset the state.
if flip_state(true) {
info!("clear suspend state");
}
}
Ok(HandleControl::Continue)
}
}

View File

@@ -15,8 +15,8 @@
use std::sync::Arc;
use tokio::sync::mpsc::Sender;
use tokio::sync::mpsc::error::SendError;
use crate::error::{self, Result};
use crate::instruction::{Instruction, InstructionReply};
pub type IncomingMessage = (MessageMeta, Instruction);
@@ -51,8 +51,13 @@ impl HeartbeatMailbox {
Self { sender }
}
pub async fn send(&self, message: OutgoingMessage) -> Result<(), SendError<OutgoingMessage>> {
self.sender.send(message).await
pub async fn send(&self, message: OutgoingMessage) -> Result<()> {
self.sender.send(message).await.map_err(|e| {
error::SendMessageSnafu {
err_msg: e.to_string(),
}
.build()
})
}
}

View File

@@ -339,16 +339,6 @@ pub struct FlushRegions {
pub error_strategy: FlushErrorStrategy,
}
impl Display for FlushRegions {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"FlushRegions(region_ids={:?}, strategy={:?}, error_strategy={:?})",
self.region_ids, self.strategy, self.error_strategy
)
}
}
impl FlushRegions {
/// Create synchronous single-region flush
pub fn sync_single(region_id: RegionId) -> Self {
@@ -539,8 +529,6 @@ pub enum Instruction {
GetFileRefs(GetFileRefs),
/// Triggers garbage collection for a region.
GcRegions(GcRegions),
/// Temporary suspend serving reads or writes
Suspend,
}
impl Instruction {

View File

@@ -94,7 +94,7 @@ impl TableInfoValue {
}
}
pub fn update(&self, new_table_info: RawTableInfo) -> Self {
pub(crate) fn update(&self, new_table_info: RawTableInfo) -> Self {
Self {
table_info: new_table_info,
version: self.version + 1,

View File

@@ -34,8 +34,6 @@ pub mod memory;
#[cfg(any(feature = "mysql_kvbackend", feature = "pg_kvbackend"))]
pub mod rds;
pub mod test;
#[cfg(any(test, feature = "testing"))]
pub mod test_util;
pub mod txn;
pub mod util;
pub type KvBackendRef<E = Error> = Arc<dyn KvBackend<Error = E> + Send + Sync>;

View File

@@ -1,125 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use std::sync::Arc;
use derive_builder::Builder;
use crate::error::Result;
use crate::kv_backend::txn::{Txn, TxnResponse};
use crate::kv_backend::{
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
BatchPutResponse, DeleteRangeRequest, DeleteRangeResponse, KvBackend, PutRequest, PutResponse,
RangeRequest, RangeResponse, TxnService,
};
pub type MockFn<Req, Resp> = Arc<dyn Fn(Req) -> Result<Resp> + Send + Sync>;
/// A mock kv backend for testing.
#[derive(Builder)]
pub struct MockKvBackend {
#[builder(setter(strip_option), default)]
pub range_fn: Option<MockFn<RangeRequest, RangeResponse>>,
#[builder(setter(strip_option), default)]
pub put_fn: Option<MockFn<PutRequest, PutResponse>>,
#[builder(setter(strip_option), default)]
pub batch_put_fn: Option<MockFn<BatchPutRequest, BatchPutResponse>>,
#[builder(setter(strip_option), default)]
pub batch_get_fn: Option<MockFn<BatchGetRequest, BatchGetResponse>>,
#[builder(setter(strip_option), default)]
pub delete_range_fn: Option<MockFn<DeleteRangeRequest, DeleteRangeResponse>>,
#[builder(setter(strip_option), default)]
pub batch_delete_fn: Option<MockFn<BatchDeleteRequest, BatchDeleteResponse>>,
#[builder(setter(strip_option), default)]
pub txn: Option<MockFn<Txn, TxnResponse>>,
#[builder(setter(strip_option), default)]
pub max_txn_ops: Option<usize>,
}
#[async_trait::async_trait]
impl TxnService for MockKvBackend {
type Error = crate::error::Error;
async fn txn(&self, txn: Txn) -> Result<TxnResponse> {
if let Some(f) = &self.txn {
f(txn)
} else {
unimplemented!()
}
}
fn max_txn_ops(&self) -> usize {
self.max_txn_ops.unwrap()
}
}
#[async_trait::async_trait]
impl KvBackend for MockKvBackend {
fn name(&self) -> &str {
"mock_kv_backend"
}
fn as_any(&self) -> &dyn Any {
self
}
async fn range(&self, req: RangeRequest) -> Result<RangeResponse> {
if let Some(f) = &self.range_fn {
f(req)
} else {
unimplemented!()
}
}
async fn put(&self, req: PutRequest) -> Result<PutResponse> {
if let Some(f) = &self.put_fn {
f(req)
} else {
unimplemented!()
}
}
async fn batch_put(&self, req: BatchPutRequest) -> Result<BatchPutResponse> {
if let Some(f) = &self.batch_put_fn {
f(req)
} else {
unimplemented!()
}
}
async fn batch_get(&self, req: BatchGetRequest) -> Result<BatchGetResponse> {
if let Some(f) = &self.batch_get_fn {
f(req)
} else {
unimplemented!()
}
}
async fn delete_range(&self, req: DeleteRangeRequest) -> Result<DeleteRangeResponse> {
if let Some(f) = &self.delete_range_fn {
f(req)
} else {
unimplemented!()
}
}
async fn batch_delete(&self, req: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
if let Some(f) = &self.batch_delete_fn {
f(req)
} else {
unimplemented!()
}
}
}

View File

@@ -23,20 +23,19 @@ use api::v1::alter_database_expr::Kind as PbAlterDatabaseKind;
use api::v1::meta::ddl_task_request::Task;
use api::v1::meta::{
AlterDatabaseTask as PbAlterDatabaseTask, AlterTableTask as PbAlterTableTask,
AlterTableTasks as PbAlterTableTasks, CommentOnTask as PbCommentOnTask,
CreateDatabaseTask as PbCreateDatabaseTask, CreateFlowTask as PbCreateFlowTask,
CreateTableTask as PbCreateTableTask, CreateTableTasks as PbCreateTableTasks,
CreateViewTask as PbCreateViewTask, DdlTaskRequest as PbDdlTaskRequest,
DdlTaskResponse as PbDdlTaskResponse, DropDatabaseTask as PbDropDatabaseTask,
DropFlowTask as PbDropFlowTask, DropTableTask as PbDropTableTask,
DropTableTasks as PbDropTableTasks, DropViewTask as PbDropViewTask, Partition, ProcedureId,
AlterTableTasks as PbAlterTableTasks, CreateDatabaseTask as PbCreateDatabaseTask,
CreateFlowTask as PbCreateFlowTask, CreateTableTask as PbCreateTableTask,
CreateTableTasks as PbCreateTableTasks, CreateViewTask as PbCreateViewTask,
DdlTaskRequest as PbDdlTaskRequest, DdlTaskResponse as PbDdlTaskResponse,
DropDatabaseTask as PbDropDatabaseTask, DropFlowTask as PbDropFlowTask,
DropTableTask as PbDropTableTask, DropTableTasks as PbDropTableTasks,
DropViewTask as PbDropViewTask, Partition, ProcedureId,
TruncateTableTask as PbTruncateTableTask,
};
use api::v1::{
AlterDatabaseExpr, AlterTableExpr, CommentObjectType as PbCommentObjectType, CommentOnExpr,
CreateDatabaseExpr, CreateFlowExpr, CreateTableExpr, CreateViewExpr, DropDatabaseExpr,
DropFlowExpr, DropTableExpr, DropViewExpr, EvalInterval, ExpireAfter, Option as PbOption,
QueryContext as PbQueryContext, TruncateTableExpr,
AlterDatabaseExpr, AlterTableExpr, CreateDatabaseExpr, CreateFlowExpr, CreateTableExpr,
CreateViewExpr, DropDatabaseExpr, DropFlowExpr, DropTableExpr, DropViewExpr, EvalInterval,
ExpireAfter, Option as PbOption, QueryContext as PbQueryContext, TruncateTableExpr,
};
use base64::Engine as _;
use base64::engine::general_purpose;
@@ -79,7 +78,6 @@ pub enum DdlTask {
DropView(DropViewTask),
#[cfg(feature = "enterprise")]
CreateTrigger(trigger::CreateTriggerTask),
CommentOn(CommentOnTask),
}
impl DdlTask {
@@ -202,11 +200,6 @@ impl DdlTask {
view_info,
})
}
/// Creates a [`DdlTask`] to comment on a table, column, or flow.
pub fn new_comment_on(task: CommentOnTask) -> Self {
DdlTask::CommentOn(task)
}
}
impl TryFrom<Task> for DdlTask {
@@ -285,7 +278,6 @@ impl TryFrom<Task> for DdlTask {
.fail()
}
}
Task::CommentOnTask(comment_on) => Ok(DdlTask::CommentOn(comment_on.try_into()?)),
}
}
}
@@ -340,7 +332,6 @@ impl TryFrom<SubmitDdlTaskRequest> for PbDdlTaskRequest {
DdlTask::CreateTrigger(task) => Task::CreateTriggerTask(task.try_into()?),
#[cfg(feature = "enterprise")]
DdlTask::DropTrigger(task) => Task::DropTriggerTask(task.into()),
DdlTask::CommentOn(task) => Task::CommentOnTask(task.into()),
};
Ok(Self {
@@ -1286,119 +1277,6 @@ impl From<DropFlowTask> for PbDropFlowTask {
}
}
/// Represents the ID of the object being commented on (Table or Flow).
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum CommentObjectId {
Table(TableId),
Flow(FlowId),
}
/// Comment on table, column, or flow
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct CommentOnTask {
pub catalog_name: String,
pub schema_name: String,
pub object_type: CommentObjectType,
pub object_name: String,
/// Column name (only for Column comments)
pub column_name: Option<String>,
/// Object ID (Table or Flow) for validation and cache invalidation
pub object_id: Option<CommentObjectId>,
pub comment: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum CommentObjectType {
Table,
Column,
Flow,
}
impl CommentOnTask {
pub fn table_ref(&self) -> TableReference<'_> {
TableReference {
catalog: &self.catalog_name,
schema: &self.schema_name,
table: &self.object_name,
}
}
}
// Proto conversions for CommentObjectType
impl From<CommentObjectType> for PbCommentObjectType {
fn from(object_type: CommentObjectType) -> Self {
match object_type {
CommentObjectType::Table => PbCommentObjectType::Table,
CommentObjectType::Column => PbCommentObjectType::Column,
CommentObjectType::Flow => PbCommentObjectType::Flow,
}
}
}
impl TryFrom<i32> for CommentObjectType {
type Error = error::Error;
fn try_from(value: i32) -> Result<Self> {
match value {
0 => Ok(CommentObjectType::Table),
1 => Ok(CommentObjectType::Column),
2 => Ok(CommentObjectType::Flow),
_ => error::InvalidProtoMsgSnafu {
err_msg: format!(
"Invalid CommentObjectType value: {}. Valid values are: 0 (Table), 1 (Column), 2 (Flow)",
value
),
}
.fail(),
}
}
}
// Proto conversions for CommentOnTask
impl TryFrom<PbCommentOnTask> for CommentOnTask {
type Error = error::Error;
fn try_from(pb: PbCommentOnTask) -> Result<Self> {
let comment_on = pb.comment_on.context(error::InvalidProtoMsgSnafu {
err_msg: "expected comment_on",
})?;
Ok(CommentOnTask {
catalog_name: comment_on.catalog_name,
schema_name: comment_on.schema_name,
object_type: comment_on.object_type.try_into()?,
object_name: comment_on.object_name,
column_name: if comment_on.column_name.is_empty() {
None
} else {
Some(comment_on.column_name)
},
comment: if comment_on.comment.is_empty() {
None
} else {
Some(comment_on.comment)
},
object_id: None,
})
}
}
impl From<CommentOnTask> for PbCommentOnTask {
fn from(task: CommentOnTask) -> Self {
let pb_object_type: PbCommentObjectType = task.object_type.into();
PbCommentOnTask {
comment_on: Some(CommentOnExpr {
catalog_name: task.catalog_name,
schema_name: task.schema_name,
object_type: pb_object_type as i32,
object_name: task.object_name,
column_name: task.column_name.unwrap_or_default(),
comment: task.comment.unwrap_or_default(),
}),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct QueryContext {
pub(crate) current_catalog: String,

View File

@@ -14,7 +14,7 @@
use common_telemetry::{debug, error, info};
use common_wal::config::kafka::common::{
DEFAULT_BACKOFF_CONFIG, DEFAULT_CONNECT_TIMEOUT, KafkaConnectionConfig, KafkaTopicConfig,
DEFAULT_BACKOFF_CONFIG, KafkaConnectionConfig, KafkaTopicConfig,
};
use rskafka::client::error::Error as RsKafkaError;
use rskafka::client::error::ProtocolError::TopicAlreadyExists;
@@ -205,13 +205,11 @@ impl KafkaTopicCreator {
self.partition_client(topic).await.unwrap()
}
}
/// Builds a kafka [Client](rskafka::client::Client).
pub async fn build_kafka_client(connection: &KafkaConnectionConfig) -> Result<Client> {
// Builds an kafka controller client for creating topics.
let mut builder = ClientBuilder::new(connection.broker_endpoints.clone())
.backoff_config(DEFAULT_BACKOFF_CONFIG)
.connect_timeout(Some(DEFAULT_CONNECT_TIMEOUT));
.backoff_config(DEFAULT_BACKOFF_CONFIG);
if let Some(sasl) = &connection.sasl {
builder = builder.sasl_config(sasl.config.clone().into_sasl_config());
};

View File

@@ -246,6 +246,14 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Loader for {type_name} is not implemented: {reason}"))]
ProcedureLoaderNotImplemented {
#[snafu(implicit)]
location: Location,
type_name: String,
reason: String,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -266,7 +274,8 @@ impl ErrorExt for Error {
Error::ToJson { .. }
| Error::DeleteState { .. }
| Error::FromJson { .. }
| Error::WaitWatcher { .. } => StatusCode::Internal,
| Error::WaitWatcher { .. }
| Error::ProcedureLoaderNotImplemented { .. } => StatusCode::Internal,
Error::RetryTimesExceeded { .. }
| Error::RollbackTimesExceeded { .. }

View File

@@ -331,29 +331,8 @@ impl Runner {
}
match status {
Status::Executing { .. } => {
let prev_state = self.meta.state();
if !matches!(prev_state, ProcedureState::Running) {
info!(
"Set Procedure {}-{} state to running, prev_state: {:?}",
self.procedure.type_name(),
self.meta.id,
prev_state
);
self.meta.set_state(ProcedureState::Running);
}
}
Status::Executing { .. } => {}
Status::Suspended { subprocedures, .. } => {
let prev_state = self.meta.state();
if !matches!(prev_state, ProcedureState::Running) {
info!(
"Set Procedure {}-{} state to running, prev_state: {:?}",
self.procedure.type_name(),
self.meta.id,
prev_state
);
self.meta.set_state(ProcedureState::Running);
}
self.on_suspended(subprocedures).await;
}
Status::Done { output } => {
@@ -414,12 +393,8 @@ impl Runner {
return;
}
if self.procedure.rollback_supported() {
self.meta
.set_state(ProcedureState::prepare_rollback(Arc::new(e)));
} else {
self.meta.set_state(ProcedureState::failed(Arc::new(e)));
}
self.meta
.set_state(ProcedureState::prepare_rollback(Arc::new(e)));
}
}
}
@@ -1105,10 +1080,20 @@ mod tests {
let mut runner = new_runner(meta.clone(), Box::new(fail), procedure_store.clone());
runner.manager_ctx.start();
runner.execute_once(&ctx).await;
let state = runner.meta.state();
assert!(state.is_prepare_rollback(), "{state:?}");
runner.execute_once(&ctx).await;
let state = runner.meta.state();
assert!(state.is_failed(), "{state:?}");
check_files(&object_store, &procedure_store, ctx.procedure_id, &[]).await;
check_files(
&object_store,
&procedure_store,
ctx.procedure_id,
&["0000000000.rollback"],
)
.await;
}
#[tokio::test]
@@ -1161,8 +1146,6 @@ mod tests {
async move {
if times == 1 {
Err(Error::retry_later(MockError::new(StatusCode::Unexpected)))
} else if times == 2 {
Ok(Status::executing(false))
} else {
Ok(Status::done())
}
@@ -1189,10 +1172,6 @@ mod tests {
let state = runner.meta.state();
assert!(state.is_retrying(), "{state:?}");
runner.execute_once(&ctx).await;
let state = runner.meta.state();
assert!(state.is_running(), "{state:?}");
runner.execute_once(&ctx).await;
let state = runner.meta.state();
assert!(state.is_done(), "{state:?}");
@@ -1206,86 +1185,6 @@ mod tests {
.await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_execute_on_retry_later_error_with_child() {
common_telemetry::init_default_ut_logging();
let mut times = 0;
let child_id = ProcedureId::random();
let exec_fn = move |_| {
times += 1;
async move {
debug!("times: {}", times);
if times == 1 {
Err(Error::retry_later(MockError::new(StatusCode::Unexpected)))
} else if times == 2 {
let exec_fn = |_| {
async { Err(Error::external(MockError::new(StatusCode::Unexpected))) }
.boxed()
};
let fail = ProcedureAdapter {
data: "fail".to_string(),
lock_key: LockKey::single_exclusive("catalog.schema.table.region-0"),
poison_keys: PoisonKeys::default(),
exec_fn,
rollback_fn: None,
};
Ok(Status::Suspended {
subprocedures: vec![ProcedureWithId {
id: child_id,
procedure: Box::new(fail),
}],
persist: true,
})
} else {
Ok(Status::done())
}
}
.boxed()
};
let retry_later = ProcedureAdapter {
data: "retry_later".to_string(),
lock_key: LockKey::single_exclusive("catalog.schema.table"),
poison_keys: PoisonKeys::default(),
exec_fn,
rollback_fn: None,
};
let dir = create_temp_dir("retry_later");
let meta = retry_later.new_meta(ROOT_ID);
let ctx = context_without_provider(meta.id);
let object_store = test_util::new_object_store(&dir);
let procedure_store = Arc::new(ProcedureStore::from_object_store(object_store.clone()));
let mut runner = new_runner(meta.clone(), Box::new(retry_later), procedure_store.clone());
runner.manager_ctx.start();
debug!("execute_once 1");
runner.execute_once(&ctx).await;
let state = runner.meta.state();
assert!(state.is_retrying(), "{state:?}");
let moved_meta = meta.clone();
tokio::spawn(async move {
moved_meta.child_notify.notify_one();
});
runner.execute_once(&ctx).await;
let state = runner.meta.state();
assert!(state.is_running(), "{state:?}");
runner.execute_once(&ctx).await;
let state = runner.meta.state();
assert!(state.is_done(), "{state:?}");
assert!(meta.state().is_done());
check_files(
&object_store,
&procedure_store,
ctx.procedure_id,
&["0000000000.step", "0000000001.commit"],
)
.await;
}
#[tokio::test]
async fn test_execute_exceed_max_retry_later() {
let exec_fn =
@@ -1405,7 +1304,7 @@ mod tests {
async fn test_child_error() {
let mut times = 0;
let child_id = ProcedureId::random();
common_telemetry::init_default_ut_logging();
let exec_fn = move |ctx: Context| {
times += 1;
async move {
@@ -1630,7 +1529,7 @@ mod tests {
runner.execute_once(&ctx).await;
let state = runner.meta.state();
assert!(state.is_failed(), "{state:?}");
assert!(state.is_prepare_rollback(), "{state:?}");
let procedure_id = runner
.manager_ctx
@@ -1697,6 +1596,11 @@ mod tests {
let state = runner.meta.state();
assert!(state.is_running(), "{state:?}");
runner.execute_once(&ctx).await;
let state = runner.meta.state();
assert!(state.is_prepare_rollback(), "{state:?}");
assert!(meta.state().is_prepare_rollback());
runner.execute_once(&ctx).await;
let state = runner.meta.state();
assert!(state.is_failed(), "{state:?}");

View File

@@ -46,22 +46,6 @@ pub enum OutputData {
Stream(SendableRecordBatchStream),
}
impl OutputData {
/// Consume the data to pretty printed string.
pub async fn pretty_print(self) -> String {
match self {
OutputData::AffectedRows(x) => {
format!("Affected Rows: {x}")
}
OutputData::RecordBatches(x) => x.pretty_print().unwrap_or_else(|e| e.to_string()),
OutputData::Stream(x) => common_recordbatch::util::collect_batches(x)
.await
.and_then(|x| x.pretty_print())
.unwrap_or_else(|e| e.to_string()),
}
}
}
/// OutputMeta stores meta information produced/generated during the execution
#[derive(Debug, Default)]
pub struct OutputMeta {

View File

@@ -188,13 +188,6 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to align JSON array, reason: {reason}"))]
AlignJsonArray {
reason: String,
#[snafu(implicit)]
location: Location,
},
}
impl ErrorExt for Error {
@@ -210,8 +203,7 @@ impl ErrorExt for Error {
| Error::ToArrowScalar { .. }
| Error::ProjectArrowRecordBatch { .. }
| Error::PhysicalExpr { .. }
| Error::RecordBatchSliceIndexOverflow { .. }
| Error::AlignJsonArray { .. } => StatusCode::Internal,
| Error::RecordBatchSliceIndexOverflow { .. } => StatusCode::Internal,
Error::PollStream { .. } => StatusCode::EngineExecuteQuery,

View File

@@ -18,7 +18,7 @@ pub mod adapter;
pub mod cursor;
pub mod error;
pub mod filter;
pub mod recordbatch;
mod recordbatch;
pub mod util;
use std::fmt;

View File

@@ -20,8 +20,7 @@ use datafusion::arrow::util::pretty::pretty_format_batches;
use datafusion_common::arrow::array::ArrayRef;
use datafusion_common::arrow::compute;
use datafusion_common::arrow::datatypes::{DataType as ArrowDataType, SchemaRef as ArrowSchemaRef};
use datatypes::arrow::array::{Array, AsArray, RecordBatchOptions, StructArray, new_null_array};
use datatypes::extension::json::is_json_extension_type;
use datatypes::arrow::array::{Array, AsArray, RecordBatchOptions};
use datatypes::prelude::DataType;
use datatypes::schema::SchemaRef;
use datatypes::vectors::{Helper, VectorRef};
@@ -31,8 +30,8 @@ use snafu::{OptionExt, ResultExt, ensure};
use crate::DfRecordBatch;
use crate::error::{
self, AlignJsonArraySnafu, ArrowComputeSnafu, ColumnNotExistsSnafu, DataTypesSnafu,
NewDfRecordBatchSnafu, ProjectArrowRecordBatchSnafu, Result,
self, ArrowComputeSnafu, ColumnNotExistsSnafu, DataTypesSnafu, ProjectArrowRecordBatchSnafu,
Result,
};
/// A two-dimensional batch of column-oriented data with a defined schema.
@@ -60,8 +59,6 @@ impl RecordBatch {
// TODO(LFC): Remove the casting here once `Batch` is no longer used.
let arrow_arrays = Self::cast_view_arrays(schema.arrow_schema(), arrow_arrays)?;
let arrow_arrays = maybe_align_json_array_with_schema(schema.arrow_schema(), arrow_arrays)?;
let df_record_batch = DfRecordBatch::try_new(schema.arrow_schema().clone(), arrow_arrays)
.context(error::NewDfRecordBatchSnafu)?;
@@ -330,111 +327,12 @@ pub fn merge_record_batches(schema: SchemaRef, batches: &[RecordBatch]) -> Resul
Ok(RecordBatch::from_df_record_batch(schema, record_batch))
}
/// Align a json array `json_array` to the json type `schema_type`. The `schema_type` is often the
/// "largest" json type after some insertions in the table schema, while the json array previously
/// written in the SST could be lagged behind it. So it's important to "amend" the json array's
/// missing fields with null arrays, to align the array's data type with the provided one.
///
/// # Panics
///
/// - The json array is not an Arrow [StructArray], or the provided data type `schema_type` is not
/// of Struct type. Both of which shouldn't happen unless we switch our implementation of how
/// json array is physically stored.
pub fn align_json_array(json_array: &ArrayRef, schema_type: &ArrowDataType) -> Result<ArrayRef> {
let json_type = json_array.data_type();
if json_type == schema_type {
return Ok(json_array.clone());
}
let json_array = json_array.as_struct();
let array_fields = json_array.fields();
let array_columns = json_array.columns();
let ArrowDataType::Struct(schema_fields) = schema_type else {
unreachable!()
};
let mut aligned = Vec::with_capacity(schema_fields.len());
// Compare the fields in the json array and the to-be-aligned schema, amending with null arrays
// on the way. It's very important to note that fields in the json array and in the json type
// are both SORTED.
let mut i = 0; // point to the schema fields
let mut j = 0; // point to the array fields
while i < schema_fields.len() && j < array_fields.len() {
let schema_field = &schema_fields[i];
let array_field = &array_fields[j];
if schema_field.name() == array_field.name() {
if matches!(schema_field.data_type(), ArrowDataType::Struct(_)) {
// A `StructArray`s in a json array must be another json array. (Like a nested json
// object in a json value.)
aligned.push(align_json_array(
&array_columns[j],
schema_field.data_type(),
)?);
} else {
aligned.push(array_columns[j].clone());
}
j += 1;
} else {
aligned.push(new_null_array(schema_field.data_type(), json_array.len()));
}
i += 1;
}
if i < schema_fields.len() {
for field in &schema_fields[i..] {
aligned.push(new_null_array(field.data_type(), json_array.len()));
}
}
ensure!(
j == array_fields.len(),
AlignJsonArraySnafu {
reason: format!(
"this json array has more fields {:?}",
array_fields[j..]
.iter()
.map(|x| x.name())
.collect::<Vec<_>>(),
)
}
);
let json_array =
StructArray::try_new(schema_fields.clone(), aligned, json_array.nulls().cloned())
.context(NewDfRecordBatchSnafu)?;
Ok(Arc::new(json_array))
}
fn maybe_align_json_array_with_schema(
schema: &ArrowSchemaRef,
arrays: Vec<ArrayRef>,
) -> Result<Vec<ArrayRef>> {
if schema.fields().iter().all(|f| !is_json_extension_type(f)) {
return Ok(arrays);
}
let mut aligned = Vec::with_capacity(arrays.len());
for (field, array) in schema.fields().iter().zip(arrays.into_iter()) {
if !is_json_extension_type(field) {
aligned.push(array);
continue;
}
let json_array = align_json_array(&array, field.data_type())?;
aligned.push(json_array);
}
Ok(aligned)
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use datatypes::arrow::array::{
AsArray, BooleanArray, Float64Array, Int64Array, ListArray, UInt32Array,
};
use datatypes::arrow::datatypes::{
DataType, Field, Fields, Int64Type, Schema as ArrowSchema, UInt32Type,
};
use datatypes::arrow::array::{AsArray, UInt32Array};
use datatypes::arrow::datatypes::{DataType, Field, Schema as ArrowSchema, UInt32Type};
use datatypes::arrow_array::StringArray;
use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnSchema, Schema};
@@ -442,165 +340,6 @@ mod tests {
use super::*;
#[test]
fn test_align_json_array() -> Result<()> {
struct TestCase {
json_array: ArrayRef,
schema_type: DataType,
expected: std::result::Result<ArrayRef, String>,
}
impl TestCase {
fn new(
json_array: StructArray,
schema_type: Fields,
expected: std::result::Result<Vec<ArrayRef>, String>,
) -> Self {
Self {
json_array: Arc::new(json_array),
schema_type: DataType::Struct(schema_type.clone()),
expected: expected
.map(|x| Arc::new(StructArray::new(schema_type, x, None)) as ArrayRef),
}
}
fn test(self) -> Result<()> {
let result = align_json_array(&self.json_array, &self.schema_type);
match (result, self.expected) {
(Ok(json_array), Ok(expected)) => assert_eq!(&json_array, &expected),
(Ok(json_array), Err(e)) => {
panic!("expecting error {e} but actually get: {json_array:?}")
}
(Err(e), Err(expected)) => assert_eq!(e.to_string(), expected),
(Err(e), Ok(_)) => return Err(e),
}
Ok(())
}
}
// Test empty json array can be aligned with a complex json type.
TestCase::new(
StructArray::new_empty_fields(2, None),
Fields::from(vec![
Field::new("int", DataType::Int64, true),
Field::new_struct(
"nested",
vec![Field::new("bool", DataType::Boolean, true)],
true,
),
Field::new("string", DataType::Utf8, true),
]),
Ok(vec![
Arc::new(Int64Array::new_null(2)) as ArrayRef,
Arc::new(StructArray::new_null(
Fields::from(vec![Arc::new(Field::new("bool", DataType::Boolean, true))]),
2,
)),
Arc::new(StringArray::new_null(2)),
]),
)
.test()?;
// Test simple json array alignment.
TestCase::new(
StructArray::from(vec![(
Arc::new(Field::new("float", DataType::Float64, true)),
Arc::new(Float64Array::from(vec![1.0, 2.0, 3.0])) as ArrayRef,
)]),
Fields::from(vec![
Field::new("float", DataType::Float64, true),
Field::new("string", DataType::Utf8, true),
]),
Ok(vec![
Arc::new(Float64Array::from(vec![1.0, 2.0, 3.0])) as ArrayRef,
Arc::new(StringArray::new_null(3)),
]),
)
.test()?;
// Test complex json array alignment.
TestCase::new(
StructArray::from(vec![
(
Arc::new(Field::new_list(
"list",
Field::new_list_field(DataType::Int64, true),
true,
)),
Arc::new(ListArray::from_iter_primitive::<Int64Type, _, _>(vec![
Some(vec![Some(1)]),
None,
Some(vec![Some(2), Some(3)]),
])) as ArrayRef,
),
(
Arc::new(Field::new_struct(
"nested",
vec![Field::new("int", DataType::Int64, true)],
true,
)),
Arc::new(StructArray::from(vec![(
Arc::new(Field::new("int", DataType::Int64, true)),
Arc::new(Int64Array::from(vec![-1, -2, -3])) as ArrayRef,
)])),
),
(
Arc::new(Field::new("string", DataType::Utf8, true)),
Arc::new(StringArray::from(vec!["a", "b", "c"])),
),
]),
Fields::from(vec![
Field::new("bool", DataType::Boolean, true),
Field::new_list("list", Field::new_list_field(DataType::Int64, true), true),
Field::new_struct(
"nested",
vec![
Field::new("float", DataType::Float64, true),
Field::new("int", DataType::Int64, true),
],
true,
),
Field::new("string", DataType::Utf8, true),
]),
Ok(vec![
Arc::new(BooleanArray::new_null(3)) as ArrayRef,
Arc::new(ListArray::from_iter_primitive::<Int64Type, _, _>(vec![
Some(vec![Some(1)]),
None,
Some(vec![Some(2), Some(3)]),
])),
Arc::new(StructArray::from(vec![
(
Arc::new(Field::new("float", DataType::Float64, true)),
Arc::new(Float64Array::new_null(3)) as ArrayRef,
),
(
Arc::new(Field::new("int", DataType::Int64, true)),
Arc::new(Int64Array::from(vec![-1, -2, -3])),
),
])),
Arc::new(StringArray::from(vec!["a", "b", "c"])),
]),
)
.test()?;
// Test align failed.
TestCase::new(
StructArray::try_from(vec![
("i", Arc::new(Int64Array::from(vec![1])) as ArrayRef),
("j", Arc::new(Int64Array::from(vec![2])) as ArrayRef),
])
.unwrap(),
Fields::from(vec![Field::new("i", DataType::Int64, true)]),
Err(
r#"Failed to align JSON array, reason: this json array has more fields ["j"]"#
.to_string(),
),
)
.test()?;
Ok(())
}
#[test]
fn test_record_batch() {
let arrow_schema = Arc::new(ArrowSchema::new(vec![

View File

@@ -231,15 +231,13 @@ pub fn sql_value_to_value(
}
}
let value_datatype = value.data_type();
// The datatype of json value is determined by its actual data, so we can't simply "cast" it here.
if value_datatype.is_json() || value_datatype == *data_type {
Ok(value)
} else {
if value.data_type() != *data_type {
datatypes::types::cast(value, data_type).with_context(|_| InvalidCastSnafu {
sql_value: sql_val.clone(),
datatype: data_type,
})
} else {
Ok(value)
}
}

View File

@@ -16,7 +16,6 @@ use common_time::timezone::Timezone;
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::ColumnDefaultConstraint;
use datatypes::schema::constraint::{CURRENT_TIMESTAMP, CURRENT_TIMESTAMP_FN};
use snafu::ensure;
use sqlparser::ast::ValueWithSpan;
pub use sqlparser::ast::{
BinaryOperator, ColumnDef, ColumnOption, ColumnOptionDef, DataType, Expr, Function,
@@ -38,14 +37,6 @@ pub fn parse_column_default_constraint(
.iter()
.find(|o| matches!(o.option, ColumnOption::Default(_)))
{
ensure!(
!data_type.is_json(),
UnsupportedDefaultValueSnafu {
column_name,
reason: "json column cannot have a default value",
}
);
let default_constraint = match &opt.option {
ColumnOption::Default(Expr::Value(v)) => ColumnDefaultConstraint::Value(
sql_value_to_value(column_name, data_type, &v.value, timezone, None, false)?,
@@ -91,7 +82,7 @@ pub fn parse_column_default_constraint(
} else {
return UnsupportedDefaultValueSnafu {
column_name,
reason: format!("expr '{expr}' not supported"),
expr: *expr.clone(),
}
.fail();
}
@@ -99,14 +90,14 @@ pub fn parse_column_default_constraint(
ColumnOption::Default(others) => {
return UnsupportedDefaultValueSnafu {
column_name,
reason: format!("expr '{others}' not supported"),
expr: others.clone(),
}
.fail();
}
_ => {
return UnsupportedDefaultValueSnafu {
column_name,
reason: format!("option '{}' not supported", opt.option),
expr: Expr::Value(SqlValue::Null.into()),
}
.fail();
}

View File

@@ -55,11 +55,13 @@ pub enum Error {
},
#[snafu(display(
"Unsupported default constraint for column: '{column_name}', reason: {reason}"
"Unsupported expr in default constraint: {} for column: {}",
expr,
column_name
))]
UnsupportedDefaultValue {
column_name: String,
reason: String,
expr: Expr,
#[snafu(implicit)]
location: Location,
},

View File

@@ -36,9 +36,6 @@ pub const DEFAULT_BACKOFF_CONFIG: BackoffConfig = BackoffConfig {
deadline: Some(Duration::from_secs(3)),
};
/// The default connect timeout for kafka client.
pub const DEFAULT_CONNECT_TIMEOUT: Duration = Duration::from_secs(10);
/// Default interval for auto WAL pruning.
pub const DEFAULT_AUTO_PRUNE_INTERVAL: Duration = Duration::from_mins(30);
/// Default limit for concurrent auto pruning tasks.

View File

@@ -22,7 +22,6 @@ use common_base::Plugins;
use common_error::ext::BoxedError;
use common_greptimedb_telemetry::GreptimeDBTelemetryTask;
use common_meta::cache::{LayeredCacheRegistry, SchemaCacheRef, TableSchemaCacheRef};
use common_meta::cache_invalidator::CacheInvalidatorRef;
use common_meta::datanode::TopicStatsReporter;
use common_meta::key::runtime_switch::RuntimeSwitchManager;
use common_meta::key::{SchemaMetadataManager, SchemaMetadataManagerRef};
@@ -282,11 +281,21 @@ impl DatanodeBuilder {
open_all_regions.await?;
}
let mut resource_stat = ResourceStatImpl::default();
resource_stat.start_collect_cpu_usage();
let heartbeat_task = if let Some(meta_client) = meta_client {
let task = self
.create_heartbeat_task(&region_server, meta_client, cache_registry)
.await?;
Some(task)
Some(
HeartbeatTask::try_new(
&self.opts,
region_server.clone(),
meta_client,
cache_registry,
self.plugins.clone(),
Arc::new(resource_stat),
)
.await?,
)
} else {
None
};
@@ -315,29 +324,6 @@ impl DatanodeBuilder {
})
}
async fn create_heartbeat_task(
&self,
region_server: &RegionServer,
meta_client: MetaClientRef,
cache_invalidator: CacheInvalidatorRef,
) -> Result<HeartbeatTask> {
let stat = {
let mut stat = ResourceStatImpl::default();
stat.start_collect_cpu_usage();
Arc::new(stat)
};
HeartbeatTask::try_new(
&self.opts,
region_server.clone(),
meta_client,
cache_invalidator,
self.plugins.clone(),
stat,
)
.await
}
/// Builds [ObjectStoreManager] from [StorageConfig].
pub async fn build_object_store_manager(cfg: &StorageConfig) -> Result<ObjectStoreManagerRef> {
let object_store = store::new_object_store(cfg.store.clone(), &cfg.data_home).await?;

View File

@@ -25,7 +25,6 @@ use common_meta::datanode::REGION_STATISTIC_KEY;
use common_meta::distributed_time_constants::META_KEEP_ALIVE_INTERVAL_SECS;
use common_meta::heartbeat::handler::invalidate_table_cache::InvalidateCacheHandler;
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
use common_meta::heartbeat::handler::suspend::SuspendHandler;
use common_meta::heartbeat::handler::{
HandlerGroupExecutor, HeartbeatResponseHandlerContext, HeartbeatResponseHandlerExecutorRef,
};
@@ -92,7 +91,6 @@ impl HeartbeatTask {
let resp_handler_executor = Arc::new(HandlerGroupExecutor::new(vec![
region_alive_keeper.clone(),
Arc::new(ParseMailboxMessageHandler),
Arc::new(SuspendHandler::new(region_server.suspend_state())),
Arc::new(
RegionHeartbeatResponseHandler::new(region_server.clone())
.with_open_region_parallelism(opts.init_regions_parallelism),

View File

@@ -99,30 +99,26 @@ impl RegionHeartbeatResponseHandler {
self
}
fn build_handler(
&self,
instruction: &Instruction,
) -> MetaResult<Option<Box<InstructionHandlers>>> {
fn build_handler(&self, instruction: &Instruction) -> MetaResult<Box<InstructionHandlers>> {
match instruction {
Instruction::CloseRegions(_) => Ok(Some(Box::new(CloseRegionsHandler.into()))),
Instruction::OpenRegions(_) => Ok(Some(Box::new(
Instruction::CloseRegions(_) => Ok(Box::new(CloseRegionsHandler.into())),
Instruction::OpenRegions(_) => Ok(Box::new(
OpenRegionsHandler {
open_region_parallelism: self.open_region_parallelism,
}
.into(),
))),
Instruction::FlushRegions(_) => Ok(Some(Box::new(FlushRegionsHandler.into()))),
Instruction::DowngradeRegions(_) => Ok(Some(Box::new(DowngradeRegionsHandler.into()))),
Instruction::UpgradeRegions(_) => Ok(Some(Box::new(
)),
Instruction::FlushRegions(_) => Ok(Box::new(FlushRegionsHandler.into())),
Instruction::DowngradeRegions(_) => Ok(Box::new(DowngradeRegionsHandler.into())),
Instruction::UpgradeRegions(_) => Ok(Box::new(
UpgradeRegionsHandler {
upgrade_region_parallelism: self.open_region_parallelism,
}
.into(),
))),
Instruction::GetFileRefs(_) => Ok(Some(Box::new(GetFileRefsHandler.into()))),
Instruction::GcRegions(_) => Ok(Some(Box::new(GcRegionsHandler.into()))),
)),
Instruction::GetFileRefs(_) => Ok(Box::new(GetFileRefsHandler.into())),
Instruction::GcRegions(_) => Ok(Box::new(GcRegionsHandler.into())),
Instruction::InvalidateCaches(_) => InvalidHeartbeatResponseSnafu.fail(),
Instruction::Suspend => Ok(None),
}
}
}
@@ -220,24 +216,30 @@ impl HeartbeatResponseHandler for RegionHeartbeatResponseHandler {
.context(InvalidHeartbeatResponseSnafu)?;
let mailbox = ctx.mailbox.clone();
if let Some(handler) = self.build_handler(&instruction)? {
let context = HandlerContext {
region_server: self.region_server.clone(),
downgrade_tasks: self.downgrade_tasks.clone(),
flush_tasks: self.flush_tasks.clone(),
gc_tasks: self.gc_tasks.clone(),
};
let _handle = common_runtime::spawn_global(async move {
let reply = handler.handle(&context, instruction).await;
if let Some(reply) = reply
&& let Err(e) = mailbox.send((meta, reply)).await
{
let error = e.to_string();
let (meta, reply) = e.0;
error!("Failed to send reply {reply} to {meta:?}: {error}");
}
});
}
let region_server = self.region_server.clone();
let downgrade_tasks = self.downgrade_tasks.clone();
let flush_tasks = self.flush_tasks.clone();
let gc_tasks = self.gc_tasks.clone();
let handler = self.build_handler(&instruction)?;
let _handle = common_runtime::spawn_global(async move {
let reply = handler
.handle(
&HandlerContext {
region_server,
downgrade_tasks,
flush_tasks,
gc_tasks,
},
instruction,
)
.await;
if let Some(reply) = reply
&& let Err(e) = mailbox.send((meta, reply)).await
{
error!(e; "Failed to send reply to mailbox");
}
});
Ok(HandleControl::Continue)
}

View File

@@ -320,15 +320,4 @@ mod tests {
assert!(flush_reply.results[0].1.is_ok());
assert!(flush_reply.results[1].1.is_err());
}
#[test]
fn test_flush_regions_display() {
let region_id = RegionId::new(1024, 1);
let flush_regions = FlushRegions::sync_single(region_id);
let display = format!("{}", flush_regions);
assert_eq!(
display,
"FlushRegions(region_ids=[4398046511105(1024, 1)], strategy=Sync, error_strategy=FailFast)"
);
}
}

View File

@@ -17,7 +17,6 @@ mod catalog;
use std::collections::HashMap;
use std::fmt::Debug;
use std::ops::Deref;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use std::time::Duration;
@@ -53,9 +52,7 @@ pub use query::dummy_catalog::{
DummyCatalogList, DummyTableProviderFactory, TableProviderFactoryRef,
};
use serde_json;
use servers::error::{
self as servers_error, ExecuteGrpcRequestSnafu, Result as ServerResult, SuspendedSnafu,
};
use servers::error::{self as servers_error, ExecuteGrpcRequestSnafu, Result as ServerResult};
use servers::grpc::FlightCompression;
use servers::grpc::flight::{FlightCraft, FlightRecordBatchStream, TonicStream};
use servers::grpc::region_server::RegionServerHandler;
@@ -92,7 +89,6 @@ use crate::region_server::catalog::{NameAwareCatalogList, NameAwareDataSourceInj
pub struct RegionServer {
inner: Arc<RegionServerInner>,
flight_compression: FlightCompression,
suspend: Arc<AtomicBool>,
}
pub struct RegionStat {
@@ -140,7 +136,6 @@ impl RegionServer {
),
)),
flight_compression,
suspend: Arc::new(AtomicBool::new(false)),
}
}
@@ -600,14 +595,6 @@ impl RegionServer {
.handle_sync_region(engine_with_status.engine(), region_id, manifest_info)
.await
}
fn is_suspended(&self) -> bool {
self.suspend.load(Ordering::Relaxed)
}
pub(crate) fn suspend_state(&self) -> Arc<AtomicBool> {
self.suspend.clone()
}
}
#[async_trait]
@@ -657,8 +644,6 @@ impl FlightCraft for RegionServer {
&self,
request: Request<Ticket>,
) -> TonicResult<Response<TonicStream<FlightData>>> {
ensure!(!self.is_suspended(), SuspendedSnafu);
let ticket = request.into_inner().ticket;
let request = api::v1::region::QueryRequest::decode(ticket.as_ref())
.context(servers_error::InvalidFlightTicketSnafu)?;
@@ -1215,8 +1200,7 @@ impl RegionServerInner {
| RegionRequest::Flush(_)
| RegionRequest::Compact(_)
| RegionRequest::Truncate(_)
| RegionRequest::BuildIndex(_)
| RegionRequest::EnterStaging(_) => RegionChange::None,
| RegionRequest::BuildIndex(_) => RegionChange::None,
RegionRequest::Catchup(_) => RegionChange::Catchup,
};
@@ -1276,6 +1260,7 @@ impl RegionServerInner {
.with_context(|_| HandleRegionRequestSnafu { region_id })?
.new_opened_logical_region_ids()
else {
warn!("No new opened logical regions");
return Ok(());
};

View File

@@ -24,8 +24,8 @@ use common_query::Output;
use common_runtime::Runtime;
use common_runtime::runtime::{BuilderBuild, RuntimeTrait};
use datafusion::catalog::TableFunction;
use datafusion::dataframe::DataFrame;
use datafusion_expr::{AggregateUDF, LogicalPlan};
use query::dataframe::DataFrame;
use query::planner::LogicalPlanner;
use query::query_engine::{DescribeResult, QueryEngineState};
use query::{QueryEngine, QueryEngineContext};

View File

@@ -15,6 +15,7 @@
use std::fmt;
use std::sync::Arc;
use arrow::compute::cast as arrow_array_cast;
use arrow::datatypes::{
DataType as ArrowDataType, IntervalUnit as ArrowIntervalUnit, TimeUnit as ArrowTimeUnit,
};
@@ -367,10 +368,8 @@ impl ConcreteDataType {
/// Checks if the data type can cast to another data type.
pub fn can_arrow_type_cast_to(&self, to_type: &ConcreteDataType) -> bool {
match (self, to_type) {
(ConcreteDataType::Json(this), ConcreteDataType::Json(that)) => that.is_include(this),
_ => arrow::compute::can_cast_types(&self.as_arrow_type(), &to_type.as_arrow_type()),
}
let array = arrow_array::new_empty_array(&self.as_arrow_type());
arrow_array_cast(array.as_ref(), &to_type.as_arrow_type()).is_ok()
}
/// Try to cast data type as a [`DurationType`].

View File

@@ -15,7 +15,7 @@
use std::sync::Arc;
use arrow_schema::extension::ExtensionType;
use arrow_schema::{ArrowError, DataType, FieldRef};
use arrow_schema::{ArrowError, DataType};
use serde::{Deserialize, Serialize};
use crate::json::JsonStructureSettings;
@@ -102,8 +102,3 @@ impl ExtensionType for JsonExtensionType {
Ok(json)
}
}
/// Check if this field is to be treated as json extension type.
pub fn is_json_extension_type(field: &FieldRef) -> bool {
field.extension_type_name() == Some(JsonExtensionType::NAME)
}

View File

@@ -260,7 +260,7 @@ impl JsonValue {
ConcreteDataType::Json(self.json_type().clone())
}
pub fn json_type(&self) -> &JsonType {
pub(crate) fn json_type(&self) -> &JsonType {
self.json_type.get_or_init(|| self.json_variant.json_type())
}
@@ -268,14 +268,6 @@ impl JsonValue {
matches!(self.json_variant, JsonVariant::Null)
}
/// Check if this JSON value is an empty object.
pub fn is_empty_object(&self) -> bool {
match &self.json_variant {
JsonVariant::Object(object) => object.is_empty(),
_ => false,
}
}
pub(crate) fn as_i64(&self) -> Option<i64> {
match self.json_variant {
JsonVariant::Number(n) => n.as_i64(),

View File

@@ -273,9 +273,8 @@ fn collect_fields(column_schemas: &[ColumnSchema]) -> Result<FieldsAndIndices> {
_ => None,
};
if let Some(extype) = extype {
field
.metadata_mut()
.insert(TYPE_KEY.to_string(), extype.to_string());
let metadata = HashMap::from([(TYPE_KEY.to_string(), extype.to_string())]);
field = field.with_metadata(metadata);
}
fields.push(field);
ensure!(

View File

@@ -20,7 +20,7 @@ mod decimal_type;
mod dictionary_type;
mod duration_type;
mod interval_type;
pub mod json_type;
pub(crate) mod json_type;
mod list_type;
mod null_type;
mod primitive_type;

View File

@@ -18,6 +18,7 @@ use std::str::FromStr;
use std::sync::Arc;
use arrow::datatypes::DataType as ArrowDataType;
use arrow_schema::Fields;
use common_base::bytes::Bytes;
use serde::{Deserialize, Serialize};
use snafu::ResultExt;
@@ -35,7 +36,7 @@ use crate::vectors::json::builder::JsonVectorBuilder;
use crate::vectors::{BinaryVectorBuilder, MutableVector};
pub const JSON_TYPE_NAME: &str = "Json";
const JSON_PLAIN_FIELD_NAME: &str = "__json_plain__";
const JSON_PLAIN_FIELD_NAME: &str = "__plain__";
const JSON_PLAIN_FIELD_METADATA_KEY: &str = "is_plain_json";
pub type JsonObjectType = BTreeMap<String, JsonNativeType>;
@@ -58,10 +59,6 @@ pub enum JsonNativeType {
}
impl JsonNativeType {
pub fn is_null(&self) -> bool {
matches!(self, JsonNativeType::Null)
}
pub fn u64() -> Self {
Self::Number(JsonNumberType::U64)
}
@@ -190,7 +187,7 @@ impl JsonType {
}
}
pub fn null() -> Self {
pub(crate) fn empty() -> Self {
Self {
format: JsonFormat::Native(Box::new(JsonNativeType::Null)),
}
@@ -211,7 +208,7 @@ impl JsonType {
}
/// Try to merge this json type with others, error on datatype conflict.
pub fn merge(&mut self, other: &JsonType) -> Result<()> {
pub(crate) fn merge(&mut self, other: &JsonType) -> Result<()> {
match (&self.format, &other.format) {
(JsonFormat::Jsonb, JsonFormat::Jsonb) => Ok(()),
(JsonFormat::Native(this), JsonFormat::Native(that)) => {
@@ -226,8 +223,7 @@ impl JsonType {
}
}
/// Check if it can merge with `other` json type.
pub fn is_mergeable(&self, other: &JsonType) -> bool {
pub(crate) fn is_mergeable(&self, other: &JsonType) -> bool {
match (&self.format, &other.format) {
(JsonFormat::Jsonb, JsonFormat::Jsonb) => true,
(JsonFormat::Native(this), JsonFormat::Native(that)) => {
@@ -236,43 +232,6 @@ impl JsonType {
_ => false,
}
}
/// Check if it includes all fields in `other` json type.
pub fn is_include(&self, other: &JsonType) -> bool {
match (&self.format, &other.format) {
(JsonFormat::Jsonb, JsonFormat::Jsonb) => true,
(JsonFormat::Native(this), JsonFormat::Native(that)) => {
is_include(this.as_ref(), that.as_ref())
}
_ => false,
}
}
}
fn is_include(this: &JsonNativeType, that: &JsonNativeType) -> bool {
fn is_include_object(this: &JsonObjectType, that: &JsonObjectType) -> bool {
for (type_name, that_type) in that {
let Some(this_type) = this.get(type_name) else {
return false;
};
if !is_include(this_type, that_type) {
return false;
}
}
true
}
match (this, that) {
(this, that) if this == that => true,
(JsonNativeType::Array(this), JsonNativeType::Array(that)) => {
is_include(this.as_ref(), that.as_ref())
}
(JsonNativeType::Object(this), JsonNativeType::Object(that)) => {
is_include_object(this, that)
}
(_, JsonNativeType::Null) => true,
_ => false,
}
}
/// A special struct type for denoting "plain"(not object) json value. It has only one field, with
@@ -358,14 +317,14 @@ impl DataType for JsonType {
fn as_arrow_type(&self) -> ArrowDataType {
match self.format {
JsonFormat::Jsonb => ArrowDataType::Binary,
JsonFormat::Native(_) => self.as_struct_type().as_arrow_type(),
JsonFormat::Native(_) => ArrowDataType::Struct(Fields::empty()),
}
}
fn create_mutable_vector(&self, capacity: usize) -> Box<dyn MutableVector> {
match &self.format {
match self.format {
JsonFormat::Jsonb => Box::new(BinaryVectorBuilder::with_capacity(capacity)),
JsonFormat::Native(x) => Box::new(JsonVectorBuilder::new(*x.clone(), capacity)),
JsonFormat::Native(_) => Box::new(JsonVectorBuilder::with_capacity(capacity)),
}
}
@@ -377,12 +336,6 @@ impl DataType for JsonType {
}
}
impl Display for JsonType {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.name())
}
}
/// Converts a json type value to string
pub fn jsonb_to_string(val: &[u8]) -> Result<String> {
match jsonb::from_slice(val) {
@@ -413,204 +366,6 @@ mod tests {
use super::*;
use crate::json::JsonStructureSettings;
#[test]
fn test_json_type_include() {
fn test(this: &JsonNativeType, that: &JsonNativeType, expected: bool) {
assert_eq!(is_include(this, that), expected);
}
test(&JsonNativeType::Null, &JsonNativeType::Null, true);
test(&JsonNativeType::Null, &JsonNativeType::Bool, false);
test(&JsonNativeType::Bool, &JsonNativeType::Null, true);
test(&JsonNativeType::Bool, &JsonNativeType::Bool, true);
test(&JsonNativeType::Bool, &JsonNativeType::u64(), false);
test(&JsonNativeType::u64(), &JsonNativeType::Null, true);
test(&JsonNativeType::u64(), &JsonNativeType::u64(), true);
test(&JsonNativeType::u64(), &JsonNativeType::String, false);
test(&JsonNativeType::String, &JsonNativeType::Null, true);
test(&JsonNativeType::String, &JsonNativeType::String, true);
test(
&JsonNativeType::String,
&JsonNativeType::Array(Box::new(JsonNativeType::f64())),
false,
);
test(
&JsonNativeType::Array(Box::new(JsonNativeType::f64())),
&JsonNativeType::Null,
true,
);
test(
&JsonNativeType::Array(Box::new(JsonNativeType::f64())),
&JsonNativeType::Array(Box::new(JsonNativeType::Null)),
true,
);
test(
&JsonNativeType::Array(Box::new(JsonNativeType::f64())),
&JsonNativeType::Array(Box::new(JsonNativeType::f64())),
true,
);
test(
&JsonNativeType::Array(Box::new(JsonNativeType::f64())),
&JsonNativeType::String,
false,
);
test(
&JsonNativeType::Array(Box::new(JsonNativeType::f64())),
&JsonNativeType::Object(JsonObjectType::new()),
false,
);
let simple_json_object = &JsonNativeType::Object(JsonObjectType::from([(
"foo".to_string(),
JsonNativeType::String,
)]));
test(simple_json_object, &JsonNativeType::Null, true);
test(simple_json_object, simple_json_object, true);
test(simple_json_object, &JsonNativeType::i64(), false);
test(
simple_json_object,
&JsonNativeType::Object(JsonObjectType::from([(
"bar".to_string(),
JsonNativeType::i64(),
)])),
false,
);
let complex_json_object = &JsonNativeType::Object(JsonObjectType::from([
(
"nested".to_string(),
JsonNativeType::Object(JsonObjectType::from([(
"a".to_string(),
JsonNativeType::Object(JsonObjectType::from([(
"b".to_string(),
JsonNativeType::Object(JsonObjectType::from([(
"c".to_string(),
JsonNativeType::String,
)])),
)])),
)])),
),
("bar".to_string(), JsonNativeType::i64()),
]));
test(complex_json_object, &JsonNativeType::Null, true);
test(complex_json_object, &JsonNativeType::String, false);
test(complex_json_object, complex_json_object, true);
test(
complex_json_object,
&JsonNativeType::Object(JsonObjectType::from([(
"bar".to_string(),
JsonNativeType::i64(),
)])),
true,
);
test(
complex_json_object,
&JsonNativeType::Object(JsonObjectType::from([
(
"nested".to_string(),
JsonNativeType::Object(JsonObjectType::from([(
"a".to_string(),
JsonNativeType::Null,
)])),
),
("bar".to_string(), JsonNativeType::i64()),
])),
true,
);
test(
complex_json_object,
&JsonNativeType::Object(JsonObjectType::from([
(
"nested".to_string(),
JsonNativeType::Object(JsonObjectType::from([(
"a".to_string(),
JsonNativeType::String,
)])),
),
("bar".to_string(), JsonNativeType::i64()),
])),
false,
);
test(
complex_json_object,
&JsonNativeType::Object(JsonObjectType::from([
(
"nested".to_string(),
JsonNativeType::Object(JsonObjectType::from([(
"a".to_string(),
JsonNativeType::Object(JsonObjectType::from([(
"b".to_string(),
JsonNativeType::String,
)])),
)])),
),
("bar".to_string(), JsonNativeType::i64()),
])),
false,
);
test(
complex_json_object,
&JsonNativeType::Object(JsonObjectType::from([
(
"nested".to_string(),
JsonNativeType::Object(JsonObjectType::from([(
"a".to_string(),
JsonNativeType::Object(JsonObjectType::from([(
"b".to_string(),
JsonNativeType::Object(JsonObjectType::from([(
"c".to_string(),
JsonNativeType::Null,
)])),
)])),
)])),
),
("bar".to_string(), JsonNativeType::i64()),
])),
true,
);
test(
complex_json_object,
&JsonNativeType::Object(JsonObjectType::from([
(
"nested".to_string(),
JsonNativeType::Object(JsonObjectType::from([(
"a".to_string(),
JsonNativeType::Object(JsonObjectType::from([(
"b".to_string(),
JsonNativeType::Object(JsonObjectType::from([(
"c".to_string(),
JsonNativeType::Bool,
)])),
)])),
)])),
),
("bar".to_string(), JsonNativeType::i64()),
])),
false,
);
test(
complex_json_object,
&JsonNativeType::Object(JsonObjectType::from([(
"nested".to_string(),
JsonNativeType::Object(JsonObjectType::from([(
"a".to_string(),
JsonNativeType::Object(JsonObjectType::from([(
"b".to_string(),
JsonNativeType::Object(JsonObjectType::from([(
"c".to_string(),
JsonNativeType::String,
)])),
)])),
)])),
)])),
true,
);
}
#[test]
fn test_merge_json_type() -> Result<()> {
fn test(

View File

@@ -20,7 +20,6 @@ use crate::data_type::ConcreteDataType;
use crate::error::{Result, TryFromValueSnafu, UnsupportedOperationSnafu};
use crate::json::value::JsonValueRef;
use crate::prelude::{ValueRef, Vector, VectorRef};
use crate::types::json_type::JsonNativeType;
use crate::types::{JsonType, json_type};
use crate::value::StructValueRef;
use crate::vectors::{MutableVector, StructVectorBuilder};
@@ -182,9 +181,9 @@ pub(crate) struct JsonVectorBuilder {
}
impl JsonVectorBuilder {
pub(crate) fn new(json_type: JsonNativeType, capacity: usize) -> Self {
pub(crate) fn with_capacity(capacity: usize) -> Self {
Self {
merged_type: JsonType::new_native(json_type),
merged_type: JsonType::empty(),
capacity,
builders: vec![],
}
@@ -327,18 +326,18 @@ mod tests {
"Failed to merge JSON datatype: datatypes have conflict, this: Number(I64), that: Array[Bool]",
),
];
let mut builder = JsonVectorBuilder::new(JsonNativeType::Null, 1);
let mut builder = JsonVectorBuilder::with_capacity(1);
for (json, result) in jsons.into_iter().zip(results.into_iter()) {
push(json, &mut builder, result);
}
let vector = builder.to_vector();
let expected = r#"
+---------------------+
| StructVector |
+---------------------+
| {__json_plain__: 1} |
| {__json_plain__: 2} |
+---------------------+"#;
+----------------+
| StructVector |
+----------------+
| {__plain__: 1} |
| {__plain__: 2} |
+----------------+"#;
assert_eq!(pretty_print(vector), expected.trim());
Ok(())
}
@@ -387,7 +386,7 @@ mod tests {
"object": {"timestamp": 1761523203000}
}"#,
];
let mut builder = JsonVectorBuilder::new(JsonNativeType::Null, 1);
let mut builder = JsonVectorBuilder::with_capacity(1);
for json in jsons {
push(json, &mut builder, Ok(()));
}

View File

@@ -379,8 +379,10 @@ impl MutableVector for StructVectorBuilder {
},
StructValueRef::Ref(val) => self.push_struct_value(val)?,
StructValueRef::RefList { val, fields } => {
let struct_value =
StructValue::try_new(val.into_iter().map(Value::from).collect(), fields)?;
let struct_value = StructValue::try_new(
val.iter().map(|v| Value::from(v.clone())).collect(),
fields.clone(),
)?;
self.push_struct_value(&struct_value)?;
}
}
@@ -427,17 +429,12 @@ impl ScalarVectorBuilder for StructVectorBuilder {
.value_builders
.iter_mut()
.map(|b| b.to_vector().to_arrow_array())
.collect::<Vec<_>>();
let struct_array = if arrays.is_empty() {
StructArray::new_empty_fields(self.len(), self.null_buffer.finish())
} else {
StructArray::new(
self.fields.as_arrow_fields(),
arrays,
self.null_buffer.finish(),
)
};
.collect();
let struct_array = StructArray::new(
self.fields.as_arrow_fields(),
arrays,
self.null_buffer.finish(),
);
StructVector::try_new(self.fields.clone(), struct_array).unwrap()
}

View File

@@ -17,7 +17,7 @@
mod relation;
use api::helper::{pb_value_to_value_ref, to_grpc_value};
use api::helper::{pb_value_to_value_ref, value_to_grpc_value};
use api::v1::Row as ProtoRow;
use datatypes::data_type::ConcreteDataType;
use datatypes::types::cast;
@@ -201,7 +201,11 @@ impl From<ProtoRow> for Row {
impl From<Row> for ProtoRow {
fn from(row: Row) -> Self {
let values = row.unpack().into_iter().map(to_grpc_value).collect_vec();
let values = row
.unpack()
.into_iter()
.map(value_to_grpc_value)
.collect_vec();
ProtoRow { values }
}
}

View File

@@ -17,7 +17,6 @@ arc-swap = "1.0"
async-stream.workspace = true
async-trait.workspace = true
auth.workspace = true
axum.workspace = true
bytes.workspace = true
cache.workspace = true
catalog.workspace = true
@@ -86,9 +85,6 @@ common-test-util.workspace = true
datanode.workspace = true
datatypes.workspace = true
futures.workspace = true
hyper-util = { workspace = true, features = ["tokio"] }
meta-srv.workspace = true
reqwest.workspace = true
serde_json.workspace = true
strfmt = "0.2"
tower.workspace = true

View File

@@ -364,12 +364,6 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Service suspended"))]
Suspended {
#[snafu(implicit)]
location: Location,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -450,8 +444,6 @@ impl ErrorExt for Error {
Error::StatementTimeout { .. } => StatusCode::Cancelled,
Error::AcquireLimiter { .. } => StatusCode::Internal,
Error::Suspended { .. } => StatusCode::Suspended,
}
}

View File

@@ -141,43 +141,7 @@ impl Frontend {
#[cfg(test)]
mod tests {
use std::sync::atomic::{AtomicBool, Ordering};
use std::time::Duration;
use api::v1::meta::heartbeat_server::HeartbeatServer;
use api::v1::meta::mailbox_message::Payload;
use api::v1::meta::{
AskLeaderRequest, AskLeaderResponse, HeartbeatRequest, HeartbeatResponse, MailboxMessage,
Peer, ResponseHeader, Role, heartbeat_server,
};
use async_trait::async_trait;
use client::{Client, Database};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_error::ext::ErrorExt;
use common_error::from_header_to_err_code_msg;
use common_error::status_code::StatusCode;
use common_grpc::channel_manager::ChannelManager;
use common_meta::distributed_time_constants::FRONTEND_HEARTBEAT_INTERVAL_MILLIS;
use common_meta::heartbeat::handler::HandlerGroupExecutor;
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
use common_meta::heartbeat::handler::suspend::SuspendHandler;
use common_meta::instruction::Instruction;
use common_stat::ResourceStatImpl;
use meta_client::MetaClientRef;
use meta_client::client::MetaClientBuilder;
use meta_srv::service::GrpcStream;
use servers::grpc::{FlightCompression, GRPC_SERVER};
use servers::http::HTTP_SERVER;
use servers::http::result::greptime_result_v1::GreptimedbV1Response;
use tokio::sync::mpsc;
use tonic::codec::CompressionEncoding;
use tonic::codegen::tokio_stream::StreamExt;
use tonic::codegen::tokio_stream::wrappers::ReceiverStream;
use tonic::{Request, Response, Status, Streaming};
use super::*;
use crate::instance::builder::FrontendBuilder;
use crate::server::Services;
#[test]
fn test_toml() {
@@ -185,277 +149,4 @@ mod tests {
let toml_string = toml::to_string(&opts).unwrap();
let _parsed: FrontendOptions = toml::from_str(&toml_string).unwrap();
}
struct SuspendableHeartbeatServer {
suspend: Arc<AtomicBool>,
}
#[async_trait]
impl heartbeat_server::Heartbeat for SuspendableHeartbeatServer {
type HeartbeatStream = GrpcStream<HeartbeatResponse>;
async fn heartbeat(
&self,
request: Request<Streaming<HeartbeatRequest>>,
) -> std::result::Result<Response<Self::HeartbeatStream>, Status> {
let (tx, rx) = mpsc::channel(4);
common_runtime::spawn_global({
let mut requests = request.into_inner();
let suspend = self.suspend.clone();
async move {
while let Some(request) = requests.next().await {
if let Err(e) = request {
let _ = tx.send(Err(e)).await;
return;
}
let mailbox_message =
suspend.load(Ordering::Relaxed).then(|| MailboxMessage {
payload: Some(Payload::Json(
serde_json::to_string(&Instruction::Suspend).unwrap(),
)),
..Default::default()
});
let response = HeartbeatResponse {
header: Some(ResponseHeader::success()),
mailbox_message,
..Default::default()
};
let _ = tx.send(Ok(response)).await;
}
}
});
Ok(Response::new(Box::pin(ReceiverStream::new(rx))))
}
async fn ask_leader(
&self,
_: Request<AskLeaderRequest>,
) -> std::result::Result<Response<AskLeaderResponse>, Status> {
Ok(Response::new(AskLeaderResponse {
header: Some(ResponseHeader::success()),
leader: Some(Peer {
addr: "localhost:0".to_string(),
..Default::default()
}),
}))
}
}
async fn create_meta_client(
options: &MetaClientOptions,
heartbeat_server: Arc<SuspendableHeartbeatServer>,
) -> MetaClientRef {
let (client, server) = tokio::io::duplex(1024);
// create the heartbeat server:
common_runtime::spawn_global(async move {
let mut router = tonic::transport::Server::builder();
let router = router.add_service(
HeartbeatServer::from_arc(heartbeat_server)
.accept_compressed(CompressionEncoding::Zstd)
.send_compressed(CompressionEncoding::Zstd),
);
router
.serve_with_incoming(futures::stream::iter([Ok::<_, std::io::Error>(server)]))
.await
});
// Move client to an option so we can _move_ the inner value
// on the first attempt to connect. All other attempts will fail.
let mut client = Some(client);
let connector = tower::service_fn(move |_| {
let client = client.take();
async move {
if let Some(client) = client {
Ok(hyper_util::rt::TokioIo::new(client))
} else {
Err(std::io::Error::other("client already taken"))
}
}
});
let manager = ChannelManager::new();
manager
.reset_with_connector("localhost:0", connector)
.unwrap();
// create the heartbeat client:
let mut client = MetaClientBuilder::new(0, Role::Frontend)
.enable_heartbeat()
.heartbeat_channel_manager(manager)
.build();
client.start(&options.metasrv_addrs).await.unwrap();
Arc::new(client)
}
async fn create_frontend(
options: &FrontendOptions,
meta_client: MetaClientRef,
) -> Result<Frontend> {
let instance = Arc::new(
FrontendBuilder::new_test(options, meta_client.clone())
.try_build()
.await?,
);
let servers =
Services::new(options.clone(), instance.clone(), Default::default()).build()?;
let executor = Arc::new(HandlerGroupExecutor::new(vec![
Arc::new(ParseMailboxMessageHandler),
Arc::new(SuspendHandler::new(instance.suspend_state())),
]));
let heartbeat_task = Some(HeartbeatTask::new(
options,
meta_client,
executor,
Arc::new(ResourceStatImpl::default()),
));
let mut frontend = Frontend {
instance,
servers,
heartbeat_task,
};
frontend.start().await?;
Ok(frontend)
}
async fn verify_suspend_state_by_http(
frontend: &Frontend,
expected: std::result::Result<&str, (StatusCode, &str)>,
) {
let addr = frontend.server_handlers().addr(HTTP_SERVER).unwrap();
let response = reqwest::get(format!("http://{}/v1/sql?sql=SELECT 1", addr))
.await
.unwrap();
let headers = response.headers();
let response = if let Some((code, error)) = from_header_to_err_code_msg(headers) {
Err((code, error))
} else {
Ok(response.text().await.unwrap())
};
match (response, expected) {
(Ok(response), Ok(expected)) => {
let response: GreptimedbV1Response = serde_json::from_str(&response).unwrap();
let response = serde_json::to_string(response.output()).unwrap();
assert_eq!(&response, expected);
}
(Err(actual), Err(expected)) => assert_eq!(actual, expected),
_ => unreachable!(),
}
}
async fn verify_suspend_state_by_grpc(
frontend: &Frontend,
expected: std::result::Result<&str, (StatusCode, &str)>,
) {
let addr = frontend.server_handlers().addr(GRPC_SERVER).unwrap();
let client = Client::with_urls([addr.to_string()]);
let client = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
let response = client.sql("SELECT 1").await;
match (response, expected) {
(Ok(response), Ok(expected)) => {
let response = response.data.pretty_print().await;
assert_eq!(&response, expected.trim());
}
(Err(actual), Err(expected)) => {
assert_eq!(actual.status_code(), expected.0);
assert_eq!(actual.output_msg(), expected.1);
}
_ => unreachable!(),
}
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn test_suspend_frontend() -> Result<()> {
common_telemetry::init_default_ut_logging();
let meta_client_options = MetaClientOptions {
metasrv_addrs: vec!["localhost:0".to_string()],
..Default::default()
};
let options = FrontendOptions {
http: HttpOptions {
addr: "127.0.0.1:0".to_string(),
..Default::default()
},
grpc: GrpcOptions {
bind_addr: "127.0.0.1:0".to_string(),
flight_compression: FlightCompression::None,
..Default::default()
},
mysql: MysqlOptions {
enable: false,
..Default::default()
},
postgres: PostgresOptions {
enable: false,
..Default::default()
},
meta_client: Some(meta_client_options.clone()),
..Default::default()
};
let server = Arc::new(SuspendableHeartbeatServer {
suspend: Arc::new(AtomicBool::new(false)),
});
let meta_client = create_meta_client(&meta_client_options, server.clone()).await;
let frontend = create_frontend(&options, meta_client).await?;
tokio::time::sleep(Duration::from_millis(FRONTEND_HEARTBEAT_INTERVAL_MILLIS)).await;
// initial state: not suspend:
assert!(!frontend.instance.is_suspended());
verify_suspend_state_by_http(&frontend, Ok(r#"[{"records":{"schema":{"column_schemas":[{"name":"Int64(1)","data_type":"Int64"}]},"rows":[[1]],"total_rows":1}}]"#)).await;
verify_suspend_state_by_grpc(
&frontend,
Ok(r#"
+----------+
| Int64(1) |
+----------+
| 1 |
+----------+"#),
)
.await;
// make heartbeat server returned "suspend" instruction,
server.suspend.store(true, Ordering::Relaxed);
tokio::time::sleep(Duration::from_millis(FRONTEND_HEARTBEAT_INTERVAL_MILLIS)).await;
// ... then the frontend is suspended:
assert!(frontend.instance.is_suspended());
verify_suspend_state_by_http(
&frontend,
Err((
StatusCode::Suspended,
"error: Service suspended, execution_time_ms: 0",
)),
)
.await;
verify_suspend_state_by_grpc(&frontend, Err((StatusCode::Suspended, "Service suspended")))
.await;
// make heartbeat server NOT returned "suspend" instruction,
server.suspend.store(false, Ordering::Relaxed);
tokio::time::sleep(Duration::from_millis(FRONTEND_HEARTBEAT_INTERVAL_MILLIS)).await;
// ... then frontend's suspend state is cleared:
assert!(!frontend.instance.is_suspended());
verify_suspend_state_by_http(&frontend, Ok(r#"[{"records":{"schema":{"column_schemas":[{"name":"Int64(1)","data_type":"Int64"}]},"rows":[[1]],"total_rows":1}}]"#)).await;
verify_suspend_state_by_grpc(
&frontend,
Ok(r#"
+----------+
| Int64(1) |
+----------+
| 1 |
+----------+"#),
)
.await;
Ok(())
}
}

View File

@@ -27,6 +27,7 @@ use common_stat::ResourceStatRef;
use common_telemetry::{debug, error, info, warn};
use meta_client::client::{HeartbeatSender, HeartbeatStream, MetaClient};
use servers::addrs;
use servers::heartbeat_options::HeartbeatOptions;
use snafu::ResultExt;
use tokio::sync::mpsc;
use tokio::sync::mpsc::Receiver;
@@ -53,6 +54,7 @@ impl HeartbeatTask {
pub fn new(
opts: &FrontendOptions,
meta_client: Arc<MetaClient>,
heartbeat_opts: HeartbeatOptions,
resp_handler_executor: HeartbeatResponseHandlerExecutorRef,
resource_stat: ResourceStatRef,
) -> Self {
@@ -66,8 +68,8 @@ impl HeartbeatTask {
addrs::resolve_addr(&opts.grpc.bind_addr, Some(&opts.grpc.server_addr))
},
meta_client,
report_interval: opts.heartbeat.interval,
retry_interval: opts.heartbeat.retry_interval,
report_interval: heartbeat_opts.interval,
retry_interval: heartbeat_opts.retry_interval,
resp_handler_executor,
start_time_ms: common_time::util::current_time_millis() as u64,
resource_stat,
@@ -194,8 +196,7 @@ impl HeartbeatTask {
let report_interval = self.report_interval;
let start_time_ms = self.start_time_ms;
let self_peer = Some(Peer {
// The node id will be actually calculated from its address (by hashing the address
// string) in the metasrv. So it can be set to 0 here, as a placeholder.
// The peer id doesn't make sense for frontend, so we just set it 0.
id: 0,
addr: self.peer_addr.clone(),
});

View File

@@ -26,8 +26,7 @@ mod region_query;
pub mod standalone;
use std::pin::Pin;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, atomic};
use std::sync::Arc;
use std::time::{Duration, SystemTime};
use async_stream::stream;
@@ -84,7 +83,6 @@ use snafu::prelude::*;
use sql::ast::ObjectNamePartExt;
use sql::dialect::Dialect;
use sql::parser::{ParseOptions, ParserContext};
use sql::statements::comment::CommentObject;
use sql::statements::copy::{CopyDatabase, CopyTable};
use sql::statements::statement::Statement;
use sql::statements::tql::Tql;
@@ -121,7 +119,6 @@ pub struct Instance {
limiter: Option<LimiterRef>,
process_manager: ProcessManagerRef,
slow_query_options: SlowQueryOptions,
suspend: Arc<AtomicBool>,
// cache for otlp metrics
// first layer key: db-string
@@ -174,14 +171,6 @@ impl Instance {
pub fn procedure_executor(&self) -> &ProcedureExecutorRef {
self.statement_executor.procedure_executor()
}
pub fn suspend_state(&self) -> Arc<AtomicBool> {
self.suspend.clone()
}
pub(crate) fn is_suspended(&self) -> bool {
self.suspend.load(atomic::Ordering::Relaxed)
}
}
fn parse_stmt(sql: &str, dialect: &(dyn Dialect + Send + Sync)) -> Result<Vec<Statement>> {
@@ -524,10 +513,6 @@ impl SqlQueryHandler for Instance {
#[tracing::instrument(skip_all)]
async fn do_query(&self, query: &str, query_ctx: QueryContextRef) -> Vec<Result<Output>> {
if self.is_suspended() {
return vec![error::SuspendedSnafu {}.fail()];
}
let query_interceptor_opt = self.plugins.get::<SqlQueryInterceptorRef<Error>>();
let query_interceptor = query_interceptor_opt.as_ref();
let query = match query_interceptor.pre_parsing(query, query_ctx.clone()) {
@@ -595,8 +580,6 @@ impl SqlQueryHandler for Instance {
plan: LogicalPlan,
query_ctx: QueryContextRef,
) -> Result<Output> {
ensure!(!self.is_suspended(), error::SuspendedSnafu);
if should_capture_statement(stmt.as_ref()) {
// It's safe to unwrap here because we've already checked the type.
let stmt = stmt.unwrap();
@@ -658,10 +641,6 @@ impl SqlQueryHandler for Instance {
query: &PromQuery,
query_ctx: QueryContextRef,
) -> Vec<Result<Output>> {
if self.is_suspended() {
return vec![error::SuspendedSnafu {}.fail()];
}
// check will be done in prometheus handler's do_query
let result = PrometheusHandler::do_query(self, query, query_ctx)
.await
@@ -676,8 +655,6 @@ impl SqlQueryHandler for Instance {
stmt: Statement,
query_ctx: QueryContextRef,
) -> Result<Option<DescribeResult>> {
ensure!(!self.is_suspended(), error::SuspendedSnafu);
if matches!(
stmt,
Statement::Insert(_) | Statement::Query(_) | Statement::Delete(_)
@@ -898,7 +875,7 @@ pub fn check_permission(
validate_param(&stmt.table_name, query_ctx)?;
}
Statement::ShowCreateFlow(stmt) => {
validate_flow(&stmt.flow_name, query_ctx)?;
validate_param(&stmt.flow_name, query_ctx)?;
}
#[cfg(feature = "enterprise")]
Statement::ShowCreateTrigger(stmt) => {
@@ -931,12 +908,6 @@ pub fn check_permission(
// show charset and show collation won't be checked
Statement::ShowCharset(_) | Statement::ShowCollation(_) => {}
Statement::Comment(comment) => match &comment.object {
CommentObject::Table(table) => validate_param(table, query_ctx)?,
CommentObject::Column { table, .. } => validate_param(table, query_ctx)?,
CommentObject::Flow(flow) => validate_flow(flow, query_ctx)?,
},
Statement::Insert(insert) => {
let name = insert.table_name().context(ParseSqlSnafu)?;
validate_param(name, query_ctx)?;
@@ -1022,27 +993,6 @@ fn validate_param(name: &ObjectName, query_ctx: &QueryContextRef) -> Result<()>
.context(SqlExecInterceptedSnafu)
}
fn validate_flow(name: &ObjectName, query_ctx: &QueryContextRef) -> Result<()> {
let catalog = match &name.0[..] {
[_flow] => query_ctx.current_catalog().to_string(),
[catalog, _flow] => catalog.to_string_unquoted(),
_ => {
return InvalidSqlSnafu {
err_msg: format!(
"expect flow name to be <catalog>.<flow_name> or <flow_name>, actual: {name}",
),
}
.fail();
}
};
let schema = query_ctx.current_schema();
validate_catalog_and_schema(&catalog, &schema, query_ctx)
.map_err(BoxedError::new)
.context(SqlExecInterceptedSnafu)
}
fn validate_database(name: &ObjectName, query_ctx: &QueryContextRef) -> Result<()> {
let (catalog, schema) = match &name.0[..] {
[schema] => (
@@ -1301,28 +1251,6 @@ mod tests {
// test describe table
let sql = "DESC TABLE {catalog}{schema}demo;";
replace_test(sql, plugins.clone(), &query_ctx);
let comment_flow_cases = [
("COMMENT ON FLOW my_flow IS 'comment';", true),
("COMMENT ON FLOW greptime.my_flow IS 'comment';", true),
("COMMENT ON FLOW wrongcatalog.my_flow IS 'comment';", false),
];
for (sql, is_ok) in comment_flow_cases {
let stmt = &parse_stmt(sql, &GreptimeDbDialect {}).unwrap()[0];
let result = check_permission(plugins.clone(), stmt, &query_ctx);
assert_eq!(result.is_ok(), is_ok);
}
let show_flow_cases = [
("SHOW CREATE FLOW my_flow;", true),
("SHOW CREATE FLOW greptime.my_flow;", true),
("SHOW CREATE FLOW wrongcatalog.my_flow;", false),
];
for (sql, is_ok) in show_flow_cases {
let stmt = &parse_stmt(sql, &GreptimeDbDialect {}).unwrap()[0];
let result = check_permission(plugins.clone(), stmt, &query_ctx);
assert_eq!(result.is_ok(), is_ok);
}
replace_test(sql, plugins, &query_ctx);
}
}

View File

@@ -13,7 +13,6 @@
// limitations under the License.
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use cache::{TABLE_FLOWNODE_SET_CACHE_NAME, TABLE_ROUTE_CACHE_NAME};
use catalog::CatalogManagerRef;
@@ -33,18 +32,15 @@ use operator::flow::FlowServiceOperator;
use operator::insert::Inserter;
use operator::procedure::ProcedureServiceOperator;
use operator::request::Requester;
use operator::statement::{
ExecutorConfigureContext, StatementExecutor, StatementExecutorConfiguratorRef,
StatementExecutorRef,
};
use operator::statement::{StatementExecutor, StatementExecutorRef};
use operator::table::TableMutationOperator;
use partition::manager::PartitionRuleManager;
use pipeline::pipeline_operator::PipelineOperator;
use query::QueryEngineFactory;
use query::region_query::RegionQueryHandlerFactoryRef;
use snafu::{OptionExt, ResultExt};
use snafu::OptionExt;
use crate::error::{self, ExternalSnafu, Result};
use crate::error::{self, Result};
use crate::events::EventHandlerImpl;
use crate::frontend::FrontendOptions;
use crate::instance::Instance;
@@ -88,33 +84,6 @@ impl FrontendBuilder {
}
}
#[cfg(test)]
pub(crate) fn new_test(
options: &FrontendOptions,
meta_client: meta_client::MetaClientRef,
) -> Self {
let kv_backend = Arc::new(common_meta::kv_backend::memory::MemoryKvBackend::new());
let layered_cache_registry = Arc::new(
common_meta::cache::LayeredCacheRegistryBuilder::default()
.add_cache_registry(cache::build_fundamental_cache_registry(kv_backend.clone()))
.build(),
);
Self::new(
options.clone(),
kv_backend,
layered_cache_registry,
catalog::memory::MemoryCatalogManager::with_default_setup(),
Arc::new(client::client_manager::NodeClients::default()),
meta_client,
Arc::new(catalog::process_manager::ProcessManager::new(
"".to_string(),
None,
)),
)
}
pub fn with_local_cache_invalidator(self, cache_invalidator: CacheInvalidatorRef) -> Self {
Self {
local_cache_invalidator: Some(cache_invalidator),
@@ -218,15 +187,10 @@ impl FrontendBuilder {
Some(process_manager.clone()),
);
#[cfg(feature = "enterprise")]
let statement_executor =
if let Some(configurator) = plugins.get::<StatementExecutorConfiguratorRef>() {
let ctx = ExecutorConfigureContext {
kv_backend: kv_backend.clone(),
};
configurator
.configure(statement_executor, ctx)
.await
.context(ExternalSnafu)?
if let Some(factory) = plugins.get::<operator::statement::TriggerQuerierFactoryRef>() {
statement_executor.with_trigger_querier(factory.create(kv_backend.clone()))
} else {
statement_executor
};
@@ -270,7 +234,6 @@ impl FrontendBuilder {
process_manager,
otlp_metrics_table_legacy_cache: DashMap::new(),
slow_query_options: self.options.slow_query.clone(),
suspend: Arc::new(AtomicBool::new(false)),
})
}
}

View File

@@ -12,9 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::pin::Pin;
use std::sync::Arc;
use std::time::Instant;
use api::helper::from_pb_time_ranges;
use api::v1::ddl_request::{Expr as DdlExpr, Expr};
@@ -24,18 +22,16 @@ use api::v1::{
DeleteRequests, DropFlowExpr, InsertIntoPlan, InsertRequests, RowDeleteRequests,
RowInsertRequests,
};
use async_stream::try_stream;
use async_trait::async_trait;
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
use common_base::AffectedRows;
use common_error::ext::BoxedError;
use common_grpc::flight::do_put::DoPutResponse;
use common_grpc::FlightData;
use common_grpc::flight::FlightDecoder;
use common_query::Output;
use common_query::logical_plan::add_insert_to_logical_plan;
use common_telemetry::tracing::{self};
use datafusion::datasource::DefaultTableSource;
use futures::Stream;
use futures::stream::StreamExt;
use query::parser::PromQuery;
use servers::interceptor::{GrpcQueryInterceptor, GrpcQueryInterceptorRef};
use servers::query_handler::grpc::GrpcQueryHandler;
@@ -234,11 +230,6 @@ impl GrpcQueryHandler for Instance {
DdlExpr::DropView(_) => {
todo!("implemented in the following PR")
}
DdlExpr::CommentOn(expr) => {
self.statement_executor
.comment_by_expr(expr, ctx.clone())
.await?
}
}
}
};
@@ -249,8 +240,10 @@ impl GrpcQueryHandler for Instance {
async fn put_record_batch(
&self,
request: servers::grpc::flight::PutRecordBatchRequest,
table_name: &TableName,
table_ref: &mut Option<TableRef>,
decoder: &mut FlightDecoder,
data: FlightData,
ctx: QueryContextRef,
) -> Result<AffectedRows> {
let table = if let Some(table) = table_ref {
@@ -259,15 +252,15 @@ impl GrpcQueryHandler for Instance {
let table = self
.catalog_manager()
.table(
&request.table_name.catalog_name,
&request.table_name.schema_name,
&request.table_name.table_name,
&table_name.catalog_name,
&table_name.schema_name,
&table_name.table_name,
None,
)
.await
.context(CatalogSnafu)?
.with_context(|| TableNotFoundSnafu {
table_name: request.table_name.to_string(),
table_name: table_name.to_string(),
})?;
*table_ref = Some(table.clone());
table
@@ -286,77 +279,10 @@ impl GrpcQueryHandler for Instance {
// do we check limit for bulk insert?
self.inserter
.handle_bulk_insert(
table,
request.flight_data,
request.record_batch,
request.schema_bytes,
)
.handle_bulk_insert(table, decoder, data)
.await
.context(TableOperationSnafu)
}
fn handle_put_record_batch_stream(
&self,
mut stream: servers::grpc::flight::PutRecordBatchRequestStream,
ctx: QueryContextRef,
) -> Pin<Box<dyn Stream<Item = Result<DoPutResponse>> + Send>> {
// Resolve table once for the stream
// Clone all necessary data to make it 'static
let catalog_manager = self.catalog_manager().clone();
let plugins = self.plugins.clone();
let inserter = self.inserter.clone();
let table_name = stream.table_name().clone();
let ctx = ctx.clone();
Box::pin(try_stream! {
plugins
.get::<PermissionCheckerRef>()
.as_ref()
.check_permission(ctx.current_user(), PermissionReq::BulkInsert)
.context(PermissionSnafu)?;
// Cache for resolved table reference - resolve once and reuse
let table_ref = catalog_manager
.table(
&table_name.catalog_name,
&table_name.schema_name,
&table_name.table_name,
None,
)
.await
.context(CatalogSnafu)?
.with_context(|| TableNotFoundSnafu {
table_name: table_name.to_string(),
})?;
// Check permissions once for the stream
let interceptor_ref = plugins.get::<GrpcQueryInterceptorRef<Error>>();
let interceptor = interceptor_ref.as_ref();
interceptor.pre_bulk_insert(table_ref.clone(), ctx.clone())?;
// Process each request in the stream
while let Some(request_result) = stream.next().await {
let request = request_result.map_err(|e| {
let error_msg = format!("Stream error: {:?}", e);
IncompleteGrpcRequestSnafu { err_msg: error_msg }.build()
})?;
let request_id = request.request_id;
let start = Instant::now();
let rows = inserter
.handle_bulk_insert(
table_ref.clone(),
request.flight_data,
request.record_batch,
request.schema_bytes,
)
.await
.context(TableOperationSnafu)?;
let elapsed_secs = start.elapsed().as_secs_f64();
yield DoPutResponse::new(request_id, rows, elapsed_secs);
}
})
}
}
fn fill_catalog_and_schema_from_context(ddl_expr: &mut DdlExpr, ctx: &QueryContextRef) {
@@ -404,9 +330,6 @@ fn fill_catalog_and_schema_from_context(ddl_expr: &mut DdlExpr, ctx: &QueryConte
Expr::DropView(expr) => {
check_and_fill!(expr);
}
Expr::CommentOn(expr) => {
check_and_fill!(expr);
}
}
}

View File

@@ -65,7 +65,8 @@ impl JaegerQueryHandler for Instance {
// It's equivalent to `SELECT DISTINCT(service_name) FROM {db}.{trace_table}`.
Ok(query_trace_table(
ctx,
self,
self.catalog_manager(),
self.query_engine(),
vec![SelectExpr::from(col(SERVICE_NAME_COLUMN))],
vec![],
vec![],
@@ -106,7 +107,8 @@ impl JaegerQueryHandler for Instance {
// ```.
Ok(query_trace_table(
ctx,
self,
self.catalog_manager(),
self.query_engine(),
vec![
SelectExpr::from(col(SPAN_NAME_COLUMN)),
SelectExpr::from(col(SPAN_KIND_COLUMN)),
@@ -158,7 +160,8 @@ impl JaegerQueryHandler for Instance {
Ok(query_trace_table(
ctx,
self,
self.catalog_manager(),
self.query_engine(),
selects,
filters,
vec![col(TIMESTAMP_COLUMN).sort(false, false)], // Sort by timestamp in descending order.
@@ -217,7 +220,8 @@ impl JaegerQueryHandler for Instance {
// ```.
let output = query_trace_table(
ctx.clone(),
self,
self.catalog_manager(),
self.query_engine(),
vec![wildcard()],
filters,
vec![],
@@ -281,7 +285,8 @@ impl JaegerQueryHandler for Instance {
// query all spans
Ok(query_trace_table(
ctx,
self,
self.catalog_manager(),
self.query_engine(),
vec![wildcard()],
filters,
vec![col(TIMESTAMP_COLUMN).sort(false, false)], // Sort by timestamp in descending order.
@@ -298,7 +303,8 @@ impl JaegerQueryHandler for Instance {
#[allow(clippy::too_many_arguments)]
async fn query_trace_table(
ctx: QueryContextRef,
instance: &Instance,
catalog_manager: &CatalogManagerRef,
query_engine: &QueryEngineRef,
selects: Vec<SelectExpr>,
filters: Vec<Expr>,
sorts: Vec<SortExpr>,
@@ -328,8 +334,7 @@ async fn query_trace_table(
}
};
let table = instance
.catalog_manager()
let table = catalog_manager
.table(
ctx.current_catalog(),
&ctx.current_schema(),
@@ -362,7 +367,7 @@ async fn query_trace_table(
.map(|s| format!("\"{}\"", s))
.collect::<HashSet<String>>();
let df_context = create_df_context(instance.query_engine())?;
let df_context = create_df_context(query_engine)?;
let dataframe = df_context
.read_table(Arc::new(DfTableProviderAdapter::new(table)))

View File

@@ -136,7 +136,7 @@ impl Instance {
table_name: format_full_table_name(ctx.current_catalog(), &table_schema, &metric),
})?;
let scan_plan = dataframe.into_unoptimized_plan();
let scan_plan = dataframe.into_logical_plan();
let filter_conditions =
PromPlanner::matchers_to_expr(Matchers::new(matchers), scan_plan.schema())
.context(PrometheusLabelValuesQueryPlanSnafu)?;

View File

@@ -16,21 +16,16 @@ use std::net::SocketAddr;
use std::sync::Arc;
use auth::UserProviderRef;
use axum::extract::{Request, State};
use axum::middleware::Next;
use axum::response::IntoResponse;
use common_base::Plugins;
use common_config::Configurable;
use common_telemetry::info;
use meta_client::MetaClientOptions;
use servers::error::Error as ServerError;
use servers::grpc::builder::GrpcServerBuilder;
use servers::grpc::flight::FlightCraftRef;
use servers::grpc::frontend_grpc_handler::FrontendGrpcHandler;
use servers::grpc::greptime_handler::GreptimeRequestHandler;
use servers::grpc::{GrpcOptions, GrpcServer};
use servers::http::event::LogValidatorRef;
use servers::http::result::error_result::ErrorResponse;
use servers::http::utils::router::RouterConfigurator;
use servers::http::{HttpServer, HttpServerBuilder};
use servers::interceptor::LogIngestInterceptorRef;
@@ -43,7 +38,6 @@ use servers::query_handler::sql::ServerSqlQueryHandlerAdapter;
use servers::server::{Server, ServerHandlers};
use servers::tls::{ReloadableTlsServerConfig, maybe_watch_server_tls_config};
use snafu::ResultExt;
use tonic::Status;
use crate::error::{self, Result, StartServerSnafu, TomlFormatSnafu};
use crate::frontend::FrontendOptions;
@@ -58,7 +52,6 @@ where
grpc_server_builder: Option<GrpcServerBuilder>,
http_server_builder: Option<HttpServerBuilder>,
plugins: Plugins,
flight_handler: Option<FlightCraftRef>,
}
impl<T> Services<T>
@@ -72,7 +65,6 @@ where
grpc_server_builder: None,
http_server_builder: None,
plugins,
flight_handler: None,
}
}
@@ -130,16 +122,7 @@ where
builder = builder.with_extra_router(configurator.router());
}
builder.add_layer(axum::middleware::from_fn_with_state(
self.instance.clone(),
async move |State(state): State<Arc<Instance>>, request: Request, next: Next| {
if state.is_suspended() {
return ErrorResponse::from_error(servers::error::SuspendedSnafu.build())
.into_response();
}
next.run(request).await
},
))
builder
}
pub fn with_grpc_server_builder(self, builder: GrpcServerBuilder) -> Self {
@@ -156,13 +139,6 @@ where
}
}
pub fn with_flight_handler(self, flight_handler: FlightCraftRef) -> Self {
Self {
flight_handler: Some(flight_handler),
..self
}
}
fn build_grpc_server(
&mut self,
grpc: &GrpcOptions,
@@ -197,12 +173,6 @@ where
grpc.flight_compression,
);
// Use custom flight handler if provided, otherwise use the default GreptimeRequestHandler
let flight_handler = self
.flight_handler
.clone()
.unwrap_or_else(|| Arc::new(greptime_request_handler.clone()) as FlightCraftRef);
let grpc_server = builder
.name(name)
.database_handler(greptime_request_handler.clone())
@@ -211,17 +181,7 @@ where
self.instance.clone(),
user_provider.clone(),
))
.flight_handler(flight_handler)
.add_layer(axum::middleware::from_fn_with_state(
self.instance.clone(),
async move |State(state): State<Arc<Instance>>, request: Request, next: Next| {
if state.is_suspended() {
let status = Status::from(servers::error::SuspendedSnafu.build());
return status.into_http();
}
next.run(request).await
},
));
.flight_handler(Arc::new(greptime_request_handler));
let grpc_server = if !external {
let frontend_grpc_handler =

View File

@@ -36,7 +36,7 @@ const BLOOM_META_LEN_SIZE: u64 = 4;
pub const DEFAULT_PREFETCH_SIZE: u64 = 8192; // 8KiB
/// Metrics for bloom filter read operations.
#[derive(Default, Clone)]
#[derive(Debug, Default, Clone)]
pub struct BloomFilterReadMetrics {
/// Total byte size to read.
pub total_bytes: u64,
@@ -50,46 +50,6 @@ pub struct BloomFilterReadMetrics {
pub cache_miss: usize,
}
impl std::fmt::Debug for BloomFilterReadMetrics {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let Self {
total_bytes,
total_ranges,
fetch_elapsed,
cache_hit,
cache_miss,
} = self;
// If both total_bytes and cache_hit are 0, we didn't read anything.
if *total_bytes == 0 && *cache_hit == 0 {
return write!(f, "{{}}");
}
write!(f, "{{")?;
if *total_bytes > 0 {
write!(f, "\"total_bytes\":{}", total_bytes)?;
}
if *cache_hit > 0 {
if *total_bytes > 0 {
write!(f, ", ")?;
}
write!(f, "\"cache_hit\":{}", cache_hit)?;
}
if *total_ranges > 0 {
write!(f, ", \"total_ranges\":{}", total_ranges)?;
}
if !fetch_elapsed.is_zero() {
write!(f, ", \"fetch_elapsed\":\"{:?}\"", fetch_elapsed)?;
}
if *cache_miss > 0 {
write!(f, ", \"cache_miss\":{}", cache_miss)?;
}
write!(f, "}}")
}
}
impl BloomFilterReadMetrics {
/// Merges another metrics into this one.
pub fn merge_from(&mut self, other: &Self) {
@@ -158,7 +118,26 @@ pub trait BloomFilterReader: Sync {
&self,
ranges: &[Range<u64>],
metrics: Option<&mut BloomFilterReadMetrics>,
) -> Result<Vec<Bytes>>;
) -> Result<Vec<Bytes>> {
let start = metrics.as_ref().map(|_| Instant::now());
let mut results = Vec::with_capacity(ranges.len());
for range in ranges {
let size = (range.end - range.start) as u32;
let data = self.range_read(range.start, size, None).await?;
results.push(data);
}
if let Some(m) = metrics {
m.total_ranges += ranges.len();
m.total_bytes += ranges.iter().map(|r| r.end - r.start).sum::<u64>();
if let Some(start) = start {
m.fetch_elapsed += start.elapsed();
}
}
Ok(results)
}
/// Reads the meta information of the bloom filter.
async fn metadata(

View File

@@ -31,7 +31,7 @@ mod blob;
mod footer;
/// Metrics for inverted index read operations.
#[derive(Default, Clone)]
#[derive(Debug, Default, Clone)]
pub struct InvertedIndexReadMetrics {
/// Total byte size to read.
pub total_bytes: u64,
@@ -45,46 +45,6 @@ pub struct InvertedIndexReadMetrics {
pub cache_miss: usize,
}
impl std::fmt::Debug for InvertedIndexReadMetrics {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let Self {
total_bytes,
total_ranges,
fetch_elapsed,
cache_hit,
cache_miss,
} = self;
// If both total_bytes and cache_hit are 0, we didn't read anything.
if *total_bytes == 0 && *cache_hit == 0 {
return write!(f, "{{}}");
}
write!(f, "{{")?;
if *total_bytes > 0 {
write!(f, "\"total_bytes\":{}", total_bytes)?;
}
if *cache_hit > 0 {
if *total_bytes > 0 {
write!(f, ", ")?;
}
write!(f, "\"cache_hit\":{}", cache_hit)?;
}
if *total_ranges > 0 {
write!(f, ", \"total_ranges\":{}", total_ranges)?;
}
if !fetch_elapsed.is_zero() {
write!(f, ", \"fetch_elapsed\":\"{:?}\"", fetch_elapsed)?;
}
if *cache_miss > 0 {
write!(f, ", \"cache_miss\":{}", cache_miss)?;
}
write!(f, "}}")
}
}
impl InvertedIndexReadMetrics {
/// Merges another metrics into this one.
pub fn merge_from(&mut self, other: &Self) {
@@ -113,7 +73,21 @@ pub trait InvertedIndexReader: Send + Sync {
&self,
ranges: &[Range<u64>],
metrics: Option<&'a mut InvertedIndexReadMetrics>,
) -> Result<Vec<Bytes>>;
) -> Result<Vec<Bytes>> {
let mut metrics = metrics;
let mut result = Vec::with_capacity(ranges.len());
for range in ranges {
let data = self
.range_read(
range.start,
(range.end - range.start) as u32,
metrics.as_deref_mut(),
)
.await?;
result.push(Bytes::from(data));
}
Ok(result)
}
/// Retrieves metadata of all inverted indices stored within the blob.
async fn metadata<'a>(

View File

@@ -16,7 +16,7 @@ use std::collections::HashMap;
use std::sync::Arc;
use common_wal::config::kafka::DatanodeKafkaConfig;
use common_wal::config::kafka::common::{DEFAULT_BACKOFF_CONFIG, DEFAULT_CONNECT_TIMEOUT};
use common_wal::config::kafka::common::DEFAULT_BACKOFF_CONFIG;
use dashmap::DashMap;
use rskafka::client::ClientBuilder;
use rskafka::client::partition::{Compression, PartitionClient, UnknownTopicHandling};
@@ -78,8 +78,7 @@ impl ClientManager {
) -> Result<Self> {
// Sets backoff config for the top-level kafka client and all clients constructed by it.
let mut builder = ClientBuilder::new(config.connection.broker_endpoints.clone())
.backoff_config(DEFAULT_BACKOFF_CONFIG)
.connect_timeout(Some(DEFAULT_CONNECT_TIMEOUT));
.backoff_config(DEFAULT_BACKOFF_CONFIG);
if let Some(sasl) = &config.connection.sasl {
builder = builder.sasl_config(sasl.config.clone().into_sasl_config());
};

View File

@@ -189,9 +189,6 @@ impl MetaClientBuilder {
let mgr = client.channel_manager.clone();
if self.enable_heartbeat {
if self.heartbeat_channel_manager.is_some() {
info!("Enable heartbeat channel using the heartbeat channel manager.");
}
let mgr = self.heartbeat_channel_manager.unwrap_or(mgr.clone());
client.heartbeat = Some(HeartbeatClient::new(
self.id,

View File

@@ -24,7 +24,7 @@ use common_meta::distributed_time_constants::META_KEEP_ALIVE_INTERVAL_SECS;
use common_telemetry::tracing_context::TracingContext;
use common_telemetry::warn;
use rand::seq::SliceRandom;
use snafu::ResultExt;
use snafu::{OptionExt, ResultExt};
use tokio::time::timeout;
use tonic::transport::Channel;
@@ -101,14 +101,12 @@ impl AskLeader {
};
let (tx, mut rx) = tokio::sync::mpsc::channel(peers.len());
let channel_manager = self.channel_manager.clone();
for addr in &peers {
let mut client = self.create_asker(addr)?;
let tx_clone = tx.clone();
let req = req.clone();
let addr = addr.clone();
let channel_manager = channel_manager.clone();
tokio::spawn(async move {
match client.ask_leader(req).await {
Ok(res) => {
@@ -119,19 +117,13 @@ impl AskLeader {
};
}
Err(status) => {
// Reset cached channel even on generic errors: the VIP may keep us on a dead
// backend, so forcing a reconnect gives us a chance to hit a healthy peer.
Self::reset_channels_with_manager(
&channel_manager,
std::slice::from_ref(&addr),
);
warn!("Failed to ask leader from: {addr}, {status}");
}
}
});
}
let leader = match timeout(
let leader = timeout(
self.channel_manager
.config()
.timeout
@@ -139,16 +131,8 @@ impl AskLeader {
rx.recv(),
)
.await
{
Ok(Some(leader)) => leader,
Ok(None) => return error::NoLeaderSnafu.fail(),
Err(e) => {
// All peers timed out. Reset channels to force reconnection,
// which may help escape dead backends in VIP/LB scenarios.
Self::reset_channels_with_manager(&self.channel_manager, &peers);
return Err(e).context(error::AskLeaderTimeoutSnafu);
}
};
.context(error::AskLeaderTimeoutSnafu)?
.context(error::NoLeaderSnafu)?;
let mut leadership_group = self.leadership_group.write().unwrap();
leadership_group.leader = Some(leader.clone());
@@ -185,15 +169,6 @@ impl AskLeader {
.context(error::CreateChannelSnafu)?,
))
}
/// Drop cached channels for the given peers so a fresh connection is used next time.
fn reset_channels_with_manager(channel_manager: &ChannelManager, peers: &[String]) {
if peers.is_empty() {
return;
}
channel_manager.retain_channel(|addr, _| !peers.iter().any(|peer| peer == addr));
}
}
#[async_trait]

Some files were not shown because too many files have changed in this diff Show More