mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-22 22:20:02 +00:00
Compare commits
91 Commits
v0.10.0-ni
...
transform-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d4aa4159d4 | ||
|
|
960f6d821b | ||
|
|
9c5d044238 | ||
|
|
be72d3bedb | ||
|
|
1ff29d8fde | ||
|
|
39ab1a6415 | ||
|
|
70c354eed6 | ||
|
|
23bf663d58 | ||
|
|
817648eac5 | ||
|
|
758ad0a8c5 | ||
|
|
8b60c27c2e | ||
|
|
ea6df9ba49 | ||
|
|
69420793e2 | ||
|
|
0da112b335 | ||
|
|
dcc08f6b3e | ||
|
|
a34035a1f2 | ||
|
|
fd8eba36a8 | ||
|
|
9712295177 | ||
|
|
d275cdd570 | ||
|
|
83eb777d21 | ||
|
|
8ed5bc5305 | ||
|
|
9ded314905 | ||
|
|
702a55a235 | ||
|
|
f3e5a5a7aa | ||
|
|
9c79baca4b | ||
|
|
03f2fa219d | ||
|
|
0ee455a980 | ||
|
|
eab9e3a48d | ||
|
|
1008af5324 | ||
|
|
2485f66077 | ||
|
|
4f3afb13b6 | ||
|
|
32a0023010 | ||
|
|
4e9c251041 | ||
|
|
e328c7067c | ||
|
|
8b307e4548 | ||
|
|
ff38abde2e | ||
|
|
aa9a265984 | ||
|
|
9d3ee6384a | ||
|
|
fcde0a4874 | ||
|
|
5d42e63ab0 | ||
|
|
0c01532a37 | ||
|
|
6d503b047a | ||
|
|
5d28f7a912 | ||
|
|
a50eea76a6 | ||
|
|
2ee1ce2ba1 | ||
|
|
c02b5dae93 | ||
|
|
081c6d9e74 | ||
|
|
ca6e02980e | ||
|
|
74bdba4613 | ||
|
|
2e0e82ddc8 | ||
|
|
e0c4157ad8 | ||
|
|
613e07afb4 | ||
|
|
0ce93f0b88 | ||
|
|
c231eee7c1 | ||
|
|
176f2df5b3 | ||
|
|
4622412dfe | ||
|
|
59ec90299b | ||
|
|
16b8cdc3d5 | ||
|
|
3197b8b535 | ||
|
|
972c2441af | ||
|
|
bb8b54b5d3 | ||
|
|
b5233e500b | ||
|
|
b61a388d04 | ||
|
|
06e565d25a | ||
|
|
3b2ce31a19 | ||
|
|
a889ea88ca | ||
|
|
2f2b4b306c | ||
|
|
856c0280f5 | ||
|
|
aaa9b32908 | ||
|
|
4bb1f4f184 | ||
|
|
0f907ef99e | ||
|
|
a61c0bd1d8 | ||
|
|
7dd0e3ab37 | ||
|
|
d168bde226 | ||
|
|
4b34f610aa | ||
|
|
695ff1e037 | ||
|
|
288fdc3145 | ||
|
|
a8ed3db0aa | ||
|
|
0dd11f53f5 | ||
|
|
19918928c5 | ||
|
|
5f0a83b2b1 | ||
|
|
71a66d15f7 | ||
|
|
2cdd103874 | ||
|
|
4dea4cac47 | ||
|
|
03b29439e2 | ||
|
|
712f4ca0ef | ||
|
|
60bacff57e | ||
|
|
6208772ba4 | ||
|
|
67184c0498 | ||
|
|
1dd908fdf7 | ||
|
|
8179b4798e |
@@ -40,7 +40,7 @@ runs:
|
||||
|
||||
- name: Install PyArrow Package
|
||||
shell: pwsh
|
||||
run: pip install pyarrow
|
||||
run: pip install pyarrow numpy
|
||||
|
||||
- name: Install WSL distribution
|
||||
uses: Vampire/setup-wsl@v2
|
||||
|
||||
@@ -18,7 +18,7 @@ runs:
|
||||
--set replicaCount=${{ inputs.etcd-replicas }} \
|
||||
--set resources.requests.cpu=50m \
|
||||
--set resources.requests.memory=128Mi \
|
||||
--set resources.limits.cpu=1000m \
|
||||
--set resources.limits.cpu=1500m \
|
||||
--set resources.limits.memory=2Gi \
|
||||
--set auth.rbac.create=false \
|
||||
--set auth.rbac.token.enabled=false \
|
||||
|
||||
10
.github/workflows/develop.yml
vendored
10
.github/workflows/develop.yml
vendored
@@ -436,7 +436,7 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
matrix:
|
||||
target: ["fuzz_migrate_mito_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
|
||||
target: ["fuzz_migrate_mito_regions", "fuzz_migrate_metric_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
|
||||
mode:
|
||||
- name: "Remote WAL"
|
||||
minio: true
|
||||
@@ -449,6 +449,12 @@ jobs:
|
||||
minio: true
|
||||
kafka: false
|
||||
values: "with-minio.yaml"
|
||||
- target: "fuzz_migrate_metric_regions"
|
||||
mode:
|
||||
name: "Local WAL"
|
||||
minio: true
|
||||
kafka: false
|
||||
values: "with-minio.yaml"
|
||||
steps:
|
||||
- name: Remove unused software
|
||||
run: |
|
||||
@@ -688,7 +694,7 @@ jobs:
|
||||
with:
|
||||
python-version: '3.10'
|
||||
- name: Install PyArrow Package
|
||||
run: pip install pyarrow
|
||||
run: pip install pyarrow numpy
|
||||
- name: Setup etcd server
|
||||
working-directory: tests-integration/fixtures/etcd
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
|
||||
2
.github/workflows/nightly-ci.yml
vendored
2
.github/workflows/nightly-ci.yml
vendored
@@ -92,7 +92,7 @@ jobs:
|
||||
with:
|
||||
python-version: "3.10"
|
||||
- name: Install PyArrow Package
|
||||
run: pip install pyarrow
|
||||
run: pip install pyarrow numpy
|
||||
- name: Install WSL distribution
|
||||
uses: Vampire/setup-wsl@v2
|
||||
with:
|
||||
|
||||
274
Cargo.lock
generated
274
Cargo.lock
generated
@@ -1,6 +1,6 @@
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
version = 3
|
||||
version = 4
|
||||
|
||||
[[package]]
|
||||
name = "Inflector"
|
||||
@@ -200,12 +200,6 @@ version = "1.0.89"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6"
|
||||
|
||||
[[package]]
|
||||
name = "anymap"
|
||||
version = "1.0.0-beta.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8f1f8f5a6f3d50d89e3797d7593a50f96bb2aaa20ca0cc7be1fb673232c91d72"
|
||||
|
||||
[[package]]
|
||||
name = "anymap2"
|
||||
version = "0.13.0"
|
||||
@@ -214,7 +208,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
|
||||
|
||||
[[package]]
|
||||
name = "api"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-decimal",
|
||||
@@ -230,6 +224,15 @@ dependencies = [
|
||||
"tonic-build",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "approx"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3f2a05fd1bd10b2527e20a2cd32d8873d115b8b39fe219ee25f42a8aca6ba278"
|
||||
dependencies = [
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "approx"
|
||||
version = "0.5.1"
|
||||
@@ -766,7 +769,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "auth"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -985,6 +988,7 @@ dependencies = [
|
||||
"num-bigint",
|
||||
"num-integer",
|
||||
"num-traits",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1375,7 +1379,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cache"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"catalog",
|
||||
"common-error",
|
||||
@@ -1383,7 +1387,7 @@ dependencies = [
|
||||
"common-meta",
|
||||
"moka",
|
||||
"snafu 0.8.5",
|
||||
"substrait 0.9.3",
|
||||
"substrait 0.9.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1410,7 +1414,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "catalog"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow",
|
||||
@@ -1548,6 +1552,16 @@ dependencies = [
|
||||
"vob",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cgmath"
|
||||
version = "0.18.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1a98d30140e3296250832bbaaff83b27dcd6fa3cc70fb6f1f3e5c9c0023b5317"
|
||||
dependencies = [
|
||||
"approx 0.4.0",
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "chrono"
|
||||
version = "0.4.38"
|
||||
@@ -1739,7 +1753,7 @@ checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97"
|
||||
|
||||
[[package]]
|
||||
name = "client"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -1769,12 +1783,11 @@ dependencies = [
|
||||
"serde_json",
|
||||
"snafu 0.8.5",
|
||||
"substrait 0.37.3",
|
||||
"substrait 0.9.3",
|
||||
"substrait 0.9.5",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tonic 0.11.0",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1788,6 +1801,17 @@ dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clocksource"
|
||||
version = "0.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "129026dd5a8a9592d96916258f3a5379589e513ea5e86aeb0bd2530286e44e9e"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"time",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cmake"
|
||||
version = "0.1.51"
|
||||
@@ -1799,7 +1823,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cmd"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"auth",
|
||||
@@ -1856,7 +1880,7 @@ dependencies = [
|
||||
"similar-asserts",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.9.3",
|
||||
"substrait 0.9.5",
|
||||
"table",
|
||||
"temp-env",
|
||||
"tempfile",
|
||||
@@ -1902,7 +1926,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
|
||||
|
||||
[[package]]
|
||||
name = "common-base"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"async-trait",
|
||||
@@ -1920,7 +1944,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-catalog"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"common-error",
|
||||
@@ -1931,7 +1955,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-config"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -1954,7 +1978,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-datasource"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-schema",
|
||||
@@ -1991,7 +2015,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-decimal"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"bigdecimal 0.4.5",
|
||||
"common-error",
|
||||
@@ -2004,7 +2028,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-error"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"snafu 0.8.5",
|
||||
"strum 0.25.0",
|
||||
@@ -2013,7 +2037,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-frontend"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -2028,7 +2052,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-function"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -2054,6 +2078,7 @@ dependencies = [
|
||||
"once_cell",
|
||||
"paste",
|
||||
"ron",
|
||||
"s2",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"session",
|
||||
@@ -2067,7 +2092,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-greptimedb-telemetry"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-runtime",
|
||||
@@ -2084,7 +2109,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -2110,7 +2135,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc-expr"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"api",
|
||||
"common-base",
|
||||
@@ -2123,12 +2148,13 @@ dependencies = [
|
||||
"paste",
|
||||
"prost 0.12.6",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"table",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "common-macro"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-query",
|
||||
@@ -2142,7 +2168,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-mem-prof"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2155,7 +2181,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-meta"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"api",
|
||||
@@ -2212,11 +2238,23 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-plugins"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
|
||||
[[package]]
|
||||
name = "common-pprof"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
"pprof",
|
||||
"prost 0.12.6",
|
||||
"snafu 0.8.5",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -2243,7 +2281,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure-test"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-procedure",
|
||||
@@ -2251,7 +2289,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-query"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -2277,7 +2315,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-recordbatch"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-error",
|
||||
@@ -2296,19 +2334,27 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-runtime"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.19",
|
||||
"common-error",
|
||||
"common-macro",
|
||||
"common-telemetry",
|
||||
"futures",
|
||||
"lazy_static",
|
||||
"num_cpus",
|
||||
"once_cell",
|
||||
"parking_lot 0.12.3",
|
||||
"paste",
|
||||
"pin-project",
|
||||
"prometheus",
|
||||
"rand",
|
||||
"ratelimit",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"snafu 0.8.5",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"tokio-metrics",
|
||||
"tokio-metrics-collector",
|
||||
@@ -2318,7 +2364,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-telemetry"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"atty",
|
||||
"backtrace",
|
||||
@@ -2346,7 +2392,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-test-util"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"client",
|
||||
"common-query",
|
||||
@@ -2358,7 +2404,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-time"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"chrono",
|
||||
@@ -2374,7 +2420,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-version"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"build-data",
|
||||
"const_format",
|
||||
@@ -2385,7 +2431,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-wal"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -3194,7 +3240,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datanode"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -3244,7 +3290,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.9.3",
|
||||
"substrait 0.9.5",
|
||||
"table",
|
||||
"tokio",
|
||||
"toml 0.8.19",
|
||||
@@ -3253,7 +3299,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datatypes"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -3859,7 +3905,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "file-engine"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -3959,9 +4005,18 @@ version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "28a80e3145d8ad11ba0995949bbcf48b9df2be62772b3d351ef017dff6ecb853"
|
||||
|
||||
[[package]]
|
||||
name = "float_extras"
|
||||
version = "0.1.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b22b70f8649ea2315955f1a36d964b0e4da482dfaa5f0d04df0d1fb7c338ab7a"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "flow"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow",
|
||||
@@ -4018,7 +4073,7 @@ dependencies = [
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"strum 0.25.0",
|
||||
"substrait 0.9.3",
|
||||
"substrait 0.9.5",
|
||||
"table",
|
||||
"tokio",
|
||||
"tonic 0.11.0",
|
||||
@@ -4080,7 +4135,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
|
||||
|
||||
[[package]]
|
||||
name = "frontend"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -4389,7 +4444,7 @@ version = "0.7.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9ff16065e5720f376fbced200a5ae0f47ace85fd70b7e54269790281353b6d61"
|
||||
dependencies = [
|
||||
"approx",
|
||||
"approx 0.5.1",
|
||||
"num-traits",
|
||||
"serde",
|
||||
]
|
||||
@@ -4476,7 +4531,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=0b4f7c8ab06399f6b90e1626e8d5b9697cb33bb9#0b4f7c8ab06399f6b90e1626e8d5b9697cb33bb9"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=255f87a3318ace3f88a67f76995a0e14910983f4#255f87a3318ace3f88a67f76995a0e14910983f4"
|
||||
dependencies = [
|
||||
"prost 0.12.6",
|
||||
"serde",
|
||||
@@ -5128,7 +5183,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "index"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"asynchronous-codec",
|
||||
@@ -5208,7 +5263,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "influxdb_line_protocol"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/evenyag/influxdb_iox?branch=feat/line-protocol#10ef0d0b02705ac7518717390939fa3a9bcfcacc"
|
||||
source = "git+https://github.com/evenyag/influxdb_iox?branch=feat%2Fline-protocol#10ef0d0b02705ac7518717390939fa3a9bcfcacc"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"nom",
|
||||
@@ -5469,7 +5524,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "jsonb"
|
||||
version = "0.4.1"
|
||||
source = "git+https://github.com/datafuselabs/jsonb.git?rev=46ad50fc71cf75afbf98eec455f7892a6387c1fc#46ad50fc71cf75afbf98eec455f7892a6387c1fc"
|
||||
source = "git+https://github.com/databendlabs/jsonb.git?rev=46ad50fc71cf75afbf98eec455f7892a6387c1fc#46ad50fc71cf75afbf98eec455f7892a6387c1fc"
|
||||
dependencies = [
|
||||
"byteorder",
|
||||
"fast-float",
|
||||
@@ -5959,7 +6014,7 @@ checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
|
||||
|
||||
[[package]]
|
||||
name = "log-store"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -6279,7 +6334,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-client"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -6305,7 +6360,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-srv"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -6366,9 +6421,9 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "meter-core"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-meter.git?rev=80eb97c24c88af4dd9a86f8bbaf50e741d4eb8cd#80eb97c24c88af4dd9a86f8bbaf50e741d4eb8cd"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-meter.git?rev=a10facb353b41460eeb98578868ebf19c2084fac#a10facb353b41460eeb98578868ebf19c2084fac"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"anymap2",
|
||||
"once_cell",
|
||||
"parking_lot 0.12.3",
|
||||
]
|
||||
@@ -6376,14 +6431,14 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "meter-macros"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-meter.git?rev=80eb97c24c88af4dd9a86f8bbaf50e741d4eb8cd#80eb97c24c88af4dd9a86f8bbaf50e741d4eb8cd"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-meter.git?rev=a10facb353b41460eeb98578868ebf19c2084fac#a10facb353b41460eeb98578868ebf19c2084fac"
|
||||
dependencies = [
|
||||
"meter-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "metric-engine"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -6486,7 +6541,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mito2"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -6871,7 +6926,7 @@ version = "0.29.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d506eb7e08d6329505faa8a3a00a5dcc6de9f76e0c77e4b75763ae3c770831ff"
|
||||
dependencies = [
|
||||
"approx",
|
||||
"approx 0.5.1",
|
||||
"matrixmultiply",
|
||||
"nalgebra-macros",
|
||||
"num-complex",
|
||||
@@ -7222,7 +7277,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "object-store"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bytes",
|
||||
@@ -7507,12 +7562,13 @@ dependencies = [
|
||||
"ordered-float 4.3.0",
|
||||
"percent-encoding",
|
||||
"rand",
|
||||
"serde_json",
|
||||
"thiserror",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "operator"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -7557,7 +7613,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlparser 0.45.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=54a267ac89c09b11c0c88934690530807185d3e7)",
|
||||
"store-api",
|
||||
"substrait 0.9.3",
|
||||
"substrait 0.9.5",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -7807,7 +7863,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "partition"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -8108,7 +8164,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "pipeline"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -8141,6 +8197,7 @@ dependencies = [
|
||||
"futures",
|
||||
"greptime-proto",
|
||||
"itertools 0.10.5",
|
||||
"jsonb",
|
||||
"lazy_static",
|
||||
"moka",
|
||||
"once_cell",
|
||||
@@ -8269,13 +8326,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "plugins"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"auth",
|
||||
"common-base",
|
||||
"datanode",
|
||||
"frontend",
|
||||
"meta-srv",
|
||||
"serde",
|
||||
"snafu 0.8.5",
|
||||
]
|
||||
|
||||
@@ -8543,7 +8601,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "promql"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"async-trait",
|
||||
@@ -8569,15 +8627,18 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "promql-parser"
|
||||
version = "0.4.0"
|
||||
version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "007a331efb31f6ddb644590ef22359c9469784931162aad92599e34bcfa66583"
|
||||
checksum = "7fe99e6f80a79abccf1e8fb48dd63473a36057e600cc6ea36147c8318698ae6f"
|
||||
dependencies = [
|
||||
"cfgrammar",
|
||||
"chrono",
|
||||
"lazy_static",
|
||||
"lrlex",
|
||||
"lrpar",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -8778,7 +8839,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "puffin"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"async-compression 0.4.13",
|
||||
"async-trait",
|
||||
@@ -8900,7 +8961,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "query"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -8935,6 +8996,7 @@ dependencies = [
|
||||
"datafusion-physical-expr",
|
||||
"datafusion-sql",
|
||||
"datatypes",
|
||||
"fastrand",
|
||||
"format_num",
|
||||
"futures",
|
||||
"futures-util",
|
||||
@@ -8949,12 +9011,15 @@ dependencies = [
|
||||
"object-store",
|
||||
"once_cell",
|
||||
"paste",
|
||||
"pretty_assertions",
|
||||
"prometheus",
|
||||
"promql",
|
||||
"promql-parser",
|
||||
"prost 0.12.6",
|
||||
"rand",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"sql",
|
||||
@@ -8963,10 +9028,11 @@ dependencies = [
|
||||
"stats-cli",
|
||||
"store-api",
|
||||
"streaming-stats",
|
||||
"substrait 0.9.3",
|
||||
"substrait 0.9.5",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -9146,6 +9212,17 @@ dependencies = [
|
||||
"rand",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ratelimit"
|
||||
version = "0.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6c1bb13e2dcfa2232ac6887157aad8d9b3fe4ca57f7c8d4938ff5ea9be742300"
|
||||
dependencies = [
|
||||
"clocksource",
|
||||
"parking_lot 0.12.3",
|
||||
"thiserror",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "raw-cpuid"
|
||||
version = "11.2.0"
|
||||
@@ -10261,6 +10338,20 @@ version = "1.0.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f"
|
||||
|
||||
[[package]]
|
||||
name = "s2"
|
||||
version = "0.0.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cc7fbc04bb52c40b5f48c9bb2d2961375301916e0c25d9f373750654d588cd5c"
|
||||
dependencies = [
|
||||
"bigdecimal 0.3.1",
|
||||
"cgmath",
|
||||
"float_extras",
|
||||
"lazy_static",
|
||||
"libm",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "safe-proc-macro2"
|
||||
version = "1.0.67"
|
||||
@@ -10383,7 +10474,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "script"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -10677,8 +10768,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "servers"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"aide",
|
||||
"api",
|
||||
"arrow",
|
||||
@@ -10704,6 +10796,7 @@ dependencies = [
|
||||
"common-mem-prof",
|
||||
"common-meta",
|
||||
"common-plugins",
|
||||
"common-pprof",
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
"common-runtime",
|
||||
@@ -10786,7 +10879,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "session"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -10847,9 +10940,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "shadow-rs"
|
||||
version = "0.31.1"
|
||||
version = "0.35.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "02c282402d25101f9c893e9cd7e4cae535fe7db18b81291de973026c219ddf1e"
|
||||
checksum = "2311e39772c00391875f40e34d43efef247b23930143a70ca5fbec9505937420"
|
||||
dependencies = [
|
||||
"const_format",
|
||||
"git2",
|
||||
@@ -10898,7 +10991,7 @@ version = "0.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f0b7840f121a46d63066ee7a99fc81dcabbc6105e437cae43528cea199b5a05f"
|
||||
dependencies = [
|
||||
"approx",
|
||||
"approx 0.5.1",
|
||||
"num-complex",
|
||||
"num-traits",
|
||||
"paste",
|
||||
@@ -11107,7 +11200,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sql"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"api",
|
||||
"chrono",
|
||||
@@ -11168,7 +11261,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlness-runner"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.19",
|
||||
@@ -11369,7 +11462,7 @@ version = "0.16.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b35a062dbadac17a42e0fc64c27f419b25d6fae98572eb43c8814c9e873d7721"
|
||||
dependencies = [
|
||||
"approx",
|
||||
"approx 0.5.1",
|
||||
"lazy_static",
|
||||
"nalgebra",
|
||||
"num-traits",
|
||||
@@ -11388,7 +11481,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "store-api"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -11406,6 +11499,7 @@ dependencies = [
|
||||
"datatypes",
|
||||
"derive_builder 0.12.0",
|
||||
"futures",
|
||||
"humantime",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"snafu 0.8.5",
|
||||
@@ -11557,7 +11651,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "substrait"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
@@ -11756,7 +11850,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "table"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -12022,7 +12116,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
|
||||
|
||||
[[package]]
|
||||
name = "tests-fuzz"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"arbitrary",
|
||||
"async-trait",
|
||||
@@ -12064,7 +12158,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tests-integration"
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -12126,7 +12220,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlx",
|
||||
"store-api",
|
||||
"substrait 0.9.3",
|
||||
"substrait 0.9.5",
|
||||
"table",
|
||||
"tempfile",
|
||||
"time",
|
||||
|
||||
24
Cargo.toml
24
Cargo.toml
@@ -20,6 +20,7 @@ members = [
|
||||
"src/common/mem-prof",
|
||||
"src/common/meta",
|
||||
"src/common/plugins",
|
||||
"src/common/pprof",
|
||||
"src/common/procedure",
|
||||
"src/common/procedure-test",
|
||||
"src/common/query",
|
||||
@@ -64,7 +65,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.9.3"
|
||||
version = "0.9.5"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -120,13 +121,13 @@ etcd-client = { version = "0.13" }
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "0b4f7c8ab06399f6b90e1626e8d5b9697cb33bb9" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "255f87a3318ace3f88a67f76995a0e14910983f4" }
|
||||
humantime = "2.1"
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
jsonb = { git = "https://github.com/datafuselabs/jsonb.git", rev = "46ad50fc71cf75afbf98eec455f7892a6387c1fc", default-features = false }
|
||||
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "46ad50fc71cf75afbf98eec455f7892a6387c1fc", default-features = false }
|
||||
lazy_static = "1.4"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "80eb97c24c88af4dd9a86f8bbaf50e741d4eb8cd" }
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "a10facb353b41460eeb98578868ebf19c2084fac" }
|
||||
mockall = "0.11.4"
|
||||
moka = "0.12"
|
||||
notify = "6.1"
|
||||
@@ -137,15 +138,18 @@ opentelemetry-proto = { version = "0.5", features = [
|
||||
"metrics",
|
||||
"trace",
|
||||
"with-serde",
|
||||
"logs",
|
||||
] }
|
||||
parking_lot = "0.12"
|
||||
parquet = { version = "51.0.0", default-features = false, features = ["arrow", "async", "object_store"] }
|
||||
paste = "1.0"
|
||||
pin-project = "1.0"
|
||||
prometheus = { version = "0.13.3", features = ["process"] }
|
||||
promql-parser = { version = "0.4" }
|
||||
promql-parser = { version = "0.4.3", features = ["ser"] }
|
||||
prost = "0.12"
|
||||
raft-engine = { version = "0.4.1", default-features = false }
|
||||
rand = "0.8"
|
||||
ratelimit = "0.9"
|
||||
regex = "1.8"
|
||||
regex-automata = { version = "0.4" }
|
||||
reqwest = { version = "0.12", default-features = false, features = [
|
||||
@@ -165,7 +169,7 @@ schemars = "0.8"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
||||
serde_with = "3"
|
||||
shadow-rs = "0.31"
|
||||
shadow-rs = "0.35"
|
||||
similar-asserts = "1.6.0"
|
||||
smallvec = { version = "1", features = ["serde"] }
|
||||
snafu = "0.8"
|
||||
@@ -176,13 +180,16 @@ sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "5
|
||||
] }
|
||||
strum = { version = "0.25", features = ["derive"] }
|
||||
tempfile = "3"
|
||||
tokio = { version = "1.36", features = ["full"] }
|
||||
tokio = { version = "1.40", features = ["full"] }
|
||||
tokio-postgres = "0.7"
|
||||
tokio-stream = { version = "0.1" }
|
||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
toml = "0.8.8"
|
||||
tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] }
|
||||
tower = { version = "0.4" }
|
||||
tracing-appender = "0.2"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
|
||||
typetag = "0.2"
|
||||
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
|
||||
zstd = "0.13"
|
||||
|
||||
@@ -208,6 +215,7 @@ common-macro = { path = "src/common/macro" }
|
||||
common-mem-prof = { path = "src/common/mem-prof" }
|
||||
common-meta = { path = "src/common/meta" }
|
||||
common-plugins = { path = "src/common/plugins" }
|
||||
common-pprof = { path = "src/common/pprof" }
|
||||
common-procedure = { path = "src/common/procedure" }
|
||||
common-procedure-test = { path = "src/common/procedure-test" }
|
||||
common-query = { path = "src/common/query" }
|
||||
@@ -256,7 +264,7 @@ tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls" }
|
||||
|
||||
[workspace.dependencies.meter-macros]
|
||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
rev = "80eb97c24c88af4dd9a86f8bbaf50e741d4eb8cd"
|
||||
rev = "a10facb353b41460eeb98578868ebf19c2084fac"
|
||||
|
||||
[profile.release]
|
||||
debug = 1
|
||||
|
||||
2
Makefile
2
Makefile
@@ -8,7 +8,7 @@ CARGO_BUILD_OPTS := --locked
|
||||
IMAGE_REGISTRY ?= docker.io
|
||||
IMAGE_NAMESPACE ?= greptime
|
||||
IMAGE_TAG ?= latest
|
||||
DEV_BUILDER_IMAGE_TAG ?= 2024-06-06-5674c14f-20240920110415
|
||||
DEV_BUILDER_IMAGE_TAG ?= 2024-10-19-a5c00e85-20241024184445
|
||||
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
||||
BUILDX_BUILDER_NAME ?= gtbuilder
|
||||
BASE_IMAGE ?= ubuntu
|
||||
|
||||
@@ -83,6 +83,7 @@
|
||||
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
|
||||
| `metadata_store` | -- | -- | Metadata storage options. |
|
||||
| `metadata_store.file_size` | String | `256MB` | Kv file size in bytes. |
|
||||
| `metadata_store.purge_threshold` | String | `4GB` | Kv purge threshold. |
|
||||
@@ -115,7 +116,9 @@
|
||||
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
|
||||
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
|
||||
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
|
||||
| `region_engine.mito.max_background_jobs` | Integer | `4` | Max number of running background jobs |
|
||||
| `region_engine.mito.max_background_flushes` | Integer | Auto | Max number of running background flush jobs (default: 1/2 of cpu cores). |
|
||||
| `region_engine.mito.max_background_compactions` | Integer | Auto | Max number of running background compaction jobs (default: 1/4 of cpu cores). |
|
||||
| `region_engine.mito.max_background_purges` | Integer | Auto | Max number of running background purge jobs (default: number of cpu cores). |
|
||||
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
|
||||
| `region_engine.mito.global_write_buffer_size` | String | Auto | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
||||
| `region_engine.mito.global_write_buffer_reject_size` | String | Auto | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`. |
|
||||
@@ -409,6 +412,7 @@
|
||||
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.create_index` | Bool | `true` | Whether to enable WAL index creation.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.dump_index_interval` | String | `60s` | The interval for dumping WAL indexes.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
@@ -435,7 +439,9 @@
|
||||
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
|
||||
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
|
||||
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
|
||||
| `region_engine.mito.max_background_jobs` | Integer | `4` | Max number of running background jobs |
|
||||
| `region_engine.mito.max_background_flushes` | Integer | Auto | Max number of running background flush jobs (default: 1/2 of cpu cores). |
|
||||
| `region_engine.mito.max_background_compactions` | Integer | Auto | Max number of running background compaction jobs (default: 1/4 of cpu cores). |
|
||||
| `region_engine.mito.max_background_purges` | Integer | Auto | Max number of running background purge jobs (default: number of cpu cores). |
|
||||
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
|
||||
| `region_engine.mito.global_write_buffer_size` | String | Auto | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
||||
| `region_engine.mito.global_write_buffer_reject_size` | String | Auto | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
|
||||
|
||||
@@ -213,6 +213,17 @@ create_index = true
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
dump_index_interval = "60s"
|
||||
|
||||
## Ignore missing entries during read WAL.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
##
|
||||
## This option ensures that when Kafka messages are deleted, the system
|
||||
## can still successfully replay memtable data without throwing an
|
||||
## out-of-range error.
|
||||
## However, enabling this option might lead to unexpected data loss,
|
||||
## as the system will skip over missing entries instead of treating
|
||||
## them as critical errors.
|
||||
overwrite_entry_start_id = false
|
||||
|
||||
# The Kafka SASL configuration.
|
||||
# **It's only used when the provider is `kafka`**.
|
||||
# Available SASL mechanisms:
|
||||
@@ -405,8 +416,17 @@ manifest_checkpoint_distance = 10
|
||||
## Whether to compress manifest and checkpoint file by gzip (default false).
|
||||
compress_manifest = false
|
||||
|
||||
## Max number of running background jobs
|
||||
max_background_jobs = 4
|
||||
## Max number of running background flush jobs (default: 1/2 of cpu cores).
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ max_background_flushes = 4
|
||||
|
||||
## Max number of running background compaction jobs (default: 1/4 of cpu cores).
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ max_background_compactions = 2
|
||||
|
||||
## Max number of running background purge jobs (default: number of cpu cores).
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ max_background_purges = 8
|
||||
|
||||
## Interval to auto flush a region if it has not flushed yet.
|
||||
auto_flush_interval = "1h"
|
||||
@@ -626,7 +646,7 @@ url = ""
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
#+ [tracing]
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
@@ -101,8 +101,8 @@ threshold = "10s"
|
||||
sample_ratio = 1.0
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
#+ [tracing]
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
|
||||
@@ -231,7 +231,7 @@ url = ""
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
#+ [tracing]
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
@@ -218,7 +218,7 @@ url = ""
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
#+ [tracing]
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
@@ -237,6 +237,17 @@ backoff_base = 2
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_deadline = "5mins"
|
||||
|
||||
## Ignore missing entries during read WAL.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
##
|
||||
## This option ensures that when Kafka messages are deleted, the system
|
||||
## can still successfully replay memtable data without throwing an
|
||||
## out-of-range error.
|
||||
## However, enabling this option might lead to unexpected data loss,
|
||||
## as the system will skip over missing entries instead of treating
|
||||
## them as critical errors.
|
||||
overwrite_entry_start_id = false
|
||||
|
||||
# The Kafka SASL configuration.
|
||||
# **It's only used when the provider is `kafka`**.
|
||||
# Available SASL mechanisms:
|
||||
@@ -443,8 +454,17 @@ manifest_checkpoint_distance = 10
|
||||
## Whether to compress manifest and checkpoint file by gzip (default false).
|
||||
compress_manifest = false
|
||||
|
||||
## Max number of running background jobs
|
||||
max_background_jobs = 4
|
||||
## Max number of running background flush jobs (default: 1/2 of cpu cores).
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ max_background_flushes = 4
|
||||
|
||||
## Max number of running background compaction jobs (default: 1/4 of cpu cores).
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ max_background_compactions = 2
|
||||
|
||||
## Max number of running background purge jobs (default: number of cpu cores).
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ max_background_purges = 8
|
||||
|
||||
## Interval to auto flush a region if it has not flushed yet.
|
||||
auto_flush_interval = "1h"
|
||||
@@ -670,7 +690,7 @@ url = ""
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
#+ [tracing]
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
@@ -48,4 +48,4 @@ Please refer to [SQL query](./query.sql) for GreptimeDB and Clickhouse, and [que
|
||||
|
||||
## Addition
|
||||
- You can tune GreptimeDB's configuration to get better performance.
|
||||
- You can setup GreptimeDB to use S3 as storage, see [here](https://docs.greptime.com/user-guide/operations/configuration/#storage-options).
|
||||
- You can setup GreptimeDB to use S3 as storage, see [here](https://docs.greptime.com/user-guide/deployments/configuration#storage-options).
|
||||
|
||||
16
docs/how-to/how-to-change-log-level-on-the-fly.md
Normal file
16
docs/how-to/how-to-change-log-level-on-the-fly.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# Change Log Level on the Fly
|
||||
|
||||
## HTTP API
|
||||
|
||||
example:
|
||||
```bash
|
||||
curl --data "trace;flow=debug" 127.0.0.1:4000/debug/log_level
|
||||
```
|
||||
And database will reply with something like:
|
||||
```bash
|
||||
Log Level changed from Some("info") to "trace;flow=debug"%
|
||||
```
|
||||
|
||||
The data is a string in the format of `global_level;module1=level1;module2=level2;...` that follow the same rule of `RUST_LOG`.
|
||||
|
||||
The module is the module name of the log, and the level is the log level. The log level can be one of the following: `trace`, `debug`, `info`, `warn`, `error`, `off`(case insensitive).
|
||||
@@ -1,15 +1,9 @@
|
||||
# Profiling CPU
|
||||
|
||||
## Build GreptimeDB with `pprof` feature
|
||||
|
||||
```bash
|
||||
cargo build --features=pprof
|
||||
```
|
||||
|
||||
## HTTP API
|
||||
Sample at 99 Hertz, for 5 seconds, output report in [protobuf format](https://github.com/google/pprof/blob/master/proto/profile.proto).
|
||||
```bash
|
||||
curl -s '0:4000/v1/prof/cpu' > /tmp/pprof.out
|
||||
curl -s '0:4000/debug/prof/cpu' > /tmp/pprof.out
|
||||
```
|
||||
|
||||
Then you can use `pprof` command with the protobuf file.
|
||||
@@ -19,10 +13,10 @@ go tool pprof -top /tmp/pprof.out
|
||||
|
||||
Sample at 99 Hertz, for 60 seconds, output report in flamegraph format.
|
||||
```bash
|
||||
curl -s '0:4000/v1/prof/cpu?seconds=60&output=flamegraph' > /tmp/pprof.svg
|
||||
curl -s '0:4000/debug/prof/cpu?seconds=60&output=flamegraph' > /tmp/pprof.svg
|
||||
```
|
||||
|
||||
Sample at 49 Hertz, for 10 seconds, output report in text format.
|
||||
```bash
|
||||
curl -s '0:4000/v1/prof/cpu?seconds=10&frequency=49&output=text' > /tmp/pprof.txt
|
||||
curl -s '0:4000/debug/prof/cpu?seconds=10&frequency=49&output=text' > /tmp/pprof.txt
|
||||
```
|
||||
@@ -12,16 +12,10 @@ brew install jemalloc
|
||||
sudo apt install libjemalloc-dev
|
||||
```
|
||||
|
||||
### [flamegraph](https://github.com/brendangregg/FlameGraph)
|
||||
### [flamegraph](https://github.com/brendangregg/FlameGraph)
|
||||
|
||||
```bash
|
||||
curl https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph.pl > ./flamegraph.pl
|
||||
```
|
||||
|
||||
### Build GreptimeDB with `mem-prof` feature.
|
||||
|
||||
```bash
|
||||
cargo build --features=mem-prof
|
||||
curl https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph.pl > ./flamegraph.pl
|
||||
```
|
||||
|
||||
## Profiling
|
||||
@@ -35,7 +29,7 @@ MALLOC_CONF=prof:true,lg_prof_interval:28 ./target/debug/greptime standalone sta
|
||||
Dump memory profiling data through HTTP API:
|
||||
|
||||
```bash
|
||||
curl localhost:4000/v1/prof/mem > greptime.hprof
|
||||
curl localhost:4000/debug/prof/mem > greptime.hprof
|
||||
```
|
||||
|
||||
You can periodically dump profiling data and compare them to find the delta memory usage.
|
||||
@@ -45,6 +39,9 @@ You can periodically dump profiling data and compare them to find the delta memo
|
||||
To create flamegraph according to dumped profiling data:
|
||||
|
||||
```bash
|
||||
jeprof --svg <path_to_greptimedb_binary> --base=<baseline_prof> <profile_data> > output.svg
|
||||
```
|
||||
sudo apt install -y libjemalloc-dev
|
||||
|
||||
jeprof <path_to_greptime_binary> <profile_data> --collapse | ./flamegraph.pl > mem-prof.svg
|
||||
|
||||
jeprof <path_to_greptime_binary> --base <baseline_prof> <profile_data> --collapse | ./flamegraph.pl > output.svg
|
||||
```
|
||||
@@ -409,7 +409,39 @@
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisBorderShow": false,
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"insertNulls": false,
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"fieldMinMax": false,
|
||||
"mappings": [],
|
||||
@@ -438,18 +470,16 @@
|
||||
},
|
||||
"id": 27,
|
||||
"options": {
|
||||
"colorMode": "value",
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
"calcs": ["lastNotNull"],
|
||||
"fields": "",
|
||||
"values": false
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
"text": {},
|
||||
"textMode": "auto",
|
||||
"wideLayout": true
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "10.2.3",
|
||||
"targets": [
|
||||
@@ -467,7 +497,7 @@
|
||||
}
|
||||
],
|
||||
"title": "CPU",
|
||||
"type": "stat"
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
@@ -477,7 +507,39 @@
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisBorderShow": false,
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"insertNulls": false,
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"decimals": 0,
|
||||
"fieldMinMax": false,
|
||||
@@ -503,18 +565,16 @@
|
||||
},
|
||||
"id": 28,
|
||||
"options": {
|
||||
"colorMode": "value",
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
"calcs": ["lastNotNull"],
|
||||
"fields": "",
|
||||
"values": false
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
"text": {},
|
||||
"textMode": "auto",
|
||||
"wideLayout": true
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "10.2.3",
|
||||
"targets": [
|
||||
@@ -532,7 +592,7 @@
|
||||
}
|
||||
],
|
||||
"title": "Memory",
|
||||
"type": "stat"
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"collapsed": false,
|
||||
@@ -3335,6 +3395,6 @@
|
||||
"timezone": "",
|
||||
"title": "GreptimeDB",
|
||||
"uid": "e7097237-669b-4f8d-b751-13067afbfb68",
|
||||
"version": 15,
|
||||
"version": 16,
|
||||
"weekStart": ""
|
||||
}
|
||||
|
||||
@@ -1,3 +1,2 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2024-06-06"
|
||||
|
||||
channel = "nightly-2024-10-19"
|
||||
|
||||
@@ -17,10 +17,11 @@ use std::sync::Arc;
|
||||
use common_base::BitVec;
|
||||
use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECISION};
|
||||
use common_decimal::Decimal128;
|
||||
use common_time::interval::IntervalUnit;
|
||||
use common_time::time::Time;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use common_time::{Date, DateTime, Interval, Timestamp};
|
||||
use common_time::{
|
||||
Date, DateTime, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth, Timestamp,
|
||||
};
|
||||
use datatypes::prelude::{ConcreteDataType, ValueRef};
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::types::{
|
||||
@@ -115,6 +116,7 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
||||
ConcreteDataType::binary_datatype()
|
||||
}
|
||||
}
|
||||
ColumnDataType::Json => ConcreteDataType::json_datatype(),
|
||||
ColumnDataType::String => ConcreteDataType::string_datatype(),
|
||||
ColumnDataType::Date => ConcreteDataType::date_datatype(),
|
||||
ColumnDataType::Datetime => ConcreteDataType::datetime_datatype(),
|
||||
@@ -416,6 +418,10 @@ pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values
|
||||
decimal128_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Json => Values {
|
||||
string_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -456,13 +462,11 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
|
||||
TimeUnit::Microsecond => values.time_microsecond_values.push(val.value()),
|
||||
TimeUnit::Nanosecond => values.time_nanosecond_values.push(val.value()),
|
||||
},
|
||||
Value::Interval(val) => match val.unit() {
|
||||
IntervalUnit::YearMonth => values.interval_year_month_values.push(val.to_i32()),
|
||||
IntervalUnit::DayTime => values.interval_day_time_values.push(val.to_i64()),
|
||||
IntervalUnit::MonthDayNano => values
|
||||
.interval_month_day_nano_values
|
||||
.push(convert_i128_to_interval(val.to_i128())),
|
||||
},
|
||||
Value::IntervalYearMonth(val) => values.interval_year_month_values.push(val.to_i32()),
|
||||
Value::IntervalDayTime(val) => values.interval_day_time_values.push(val.to_i64()),
|
||||
Value::IntervalMonthDayNano(val) => values
|
||||
.interval_month_day_nano_values
|
||||
.push(convert_month_day_nano_to_pb(val)),
|
||||
Value::Decimal128(val) => values.decimal128_values.push(convert_to_pb_decimal128(val)),
|
||||
Value::List(_) | Value::Duration(_) => unreachable!(),
|
||||
});
|
||||
@@ -507,14 +511,12 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts an i128 value to google protobuf type [IntervalMonthDayNano].
|
||||
pub fn convert_i128_to_interval(v: i128) -> v1::IntervalMonthDayNano {
|
||||
let interval = Interval::from_i128(v);
|
||||
let (months, days, nanoseconds) = interval.to_month_day_nano();
|
||||
/// Converts an interval to google protobuf type [IntervalMonthDayNano].
|
||||
pub fn convert_month_day_nano_to_pb(v: IntervalMonthDayNano) -> v1::IntervalMonthDayNano {
|
||||
v1::IntervalMonthDayNano {
|
||||
months,
|
||||
days,
|
||||
nanoseconds,
|
||||
months: v.months,
|
||||
days: v.days,
|
||||
nanoseconds: v.nanoseconds,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -562,11 +564,15 @@ pub fn pb_value_to_value_ref<'a>(
|
||||
ValueData::TimeMillisecondValue(t) => ValueRef::Time(Time::new_millisecond(*t)),
|
||||
ValueData::TimeMicrosecondValue(t) => ValueRef::Time(Time::new_microsecond(*t)),
|
||||
ValueData::TimeNanosecondValue(t) => ValueRef::Time(Time::new_nanosecond(*t)),
|
||||
ValueData::IntervalYearMonthValue(v) => ValueRef::Interval(Interval::from_i32(*v)),
|
||||
ValueData::IntervalDayTimeValue(v) => ValueRef::Interval(Interval::from_i64(*v)),
|
||||
ValueData::IntervalYearMonthValue(v) => {
|
||||
ValueRef::IntervalYearMonth(IntervalYearMonth::from_i32(*v))
|
||||
}
|
||||
ValueData::IntervalDayTimeValue(v) => {
|
||||
ValueRef::IntervalDayTime(IntervalDayTime::from_i64(*v))
|
||||
}
|
||||
ValueData::IntervalMonthDayNanoValue(v) => {
|
||||
let interval = Interval::from_month_day_nano(v.months, v.days, v.nanoseconds);
|
||||
ValueRef::Interval(interval)
|
||||
let interval = IntervalMonthDayNano::new(v.months, v.days, v.nanoseconds);
|
||||
ValueRef::IntervalMonthDayNano(interval)
|
||||
}
|
||||
ValueData::Decimal128Value(v) => {
|
||||
// get precision and scale from datatype_extension
|
||||
@@ -657,7 +663,7 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
|
||||
IntervalType::MonthDayNano(_) => {
|
||||
Arc::new(IntervalMonthDayNanoVector::from_iter_values(
|
||||
values.interval_month_day_nano_values.iter().map(|x| {
|
||||
Interval::from_month_day_nano(x.months, x.days, x.nanoseconds).to_i128()
|
||||
IntervalMonthDayNano::new(x.months, x.days, x.nanoseconds).to_i128()
|
||||
}),
|
||||
))
|
||||
}
|
||||
@@ -802,18 +808,18 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
|
||||
ConcreteDataType::Interval(IntervalType::YearMonth(_)) => values
|
||||
.interval_year_month_values
|
||||
.into_iter()
|
||||
.map(|v| Value::Interval(Interval::from_i32(v)))
|
||||
.map(|v| Value::IntervalYearMonth(IntervalYearMonth::from_i32(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Interval(IntervalType::DayTime(_)) => values
|
||||
.interval_day_time_values
|
||||
.into_iter()
|
||||
.map(|v| Value::Interval(Interval::from_i64(v)))
|
||||
.map(|v| Value::IntervalDayTime(IntervalDayTime::from_i64(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Interval(IntervalType::MonthDayNano(_)) => values
|
||||
.interval_month_day_nano_values
|
||||
.into_iter()
|
||||
.map(|v| {
|
||||
Value::Interval(Interval::from_month_day_nano(
|
||||
Value::IntervalMonthDayNano(IntervalMonthDayNano::new(
|
||||
v.months,
|
||||
v.days,
|
||||
v.nanoseconds,
|
||||
@@ -941,18 +947,16 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
|
||||
value_data: Some(ValueData::TimeNanosecondValue(v.value())),
|
||||
},
|
||||
},
|
||||
Value::Interval(v) => match v.unit() {
|
||||
IntervalUnit::YearMonth => v1::Value {
|
||||
value_data: Some(ValueData::IntervalYearMonthValue(v.to_i32())),
|
||||
},
|
||||
IntervalUnit::DayTime => v1::Value {
|
||||
value_data: Some(ValueData::IntervalDayTimeValue(v.to_i64())),
|
||||
},
|
||||
IntervalUnit::MonthDayNano => v1::Value {
|
||||
value_data: Some(ValueData::IntervalMonthDayNanoValue(
|
||||
convert_i128_to_interval(v.to_i128()),
|
||||
)),
|
||||
},
|
||||
Value::IntervalYearMonth(v) => v1::Value {
|
||||
value_data: Some(ValueData::IntervalYearMonthValue(v.to_i32())),
|
||||
},
|
||||
Value::IntervalDayTime(v) => v1::Value {
|
||||
value_data: Some(ValueData::IntervalDayTimeValue(v.to_i64())),
|
||||
},
|
||||
Value::IntervalMonthDayNano(v) => v1::Value {
|
||||
value_data: Some(ValueData::IntervalMonthDayNanoValue(
|
||||
convert_month_day_nano_to_pb(v),
|
||||
)),
|
||||
},
|
||||
Value::Decimal128(v) => v1::Value {
|
||||
value_data: Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
|
||||
@@ -1044,13 +1048,11 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
||||
TimeUnit::Microsecond => ValueData::TimeMicrosecondValue(v.value()),
|
||||
TimeUnit::Nanosecond => ValueData::TimeNanosecondValue(v.value()),
|
||||
}),
|
||||
Value::Interval(v) => Some(match v.unit() {
|
||||
IntervalUnit::YearMonth => ValueData::IntervalYearMonthValue(v.to_i32()),
|
||||
IntervalUnit::DayTime => ValueData::IntervalDayTimeValue(v.to_i64()),
|
||||
IntervalUnit::MonthDayNano => {
|
||||
ValueData::IntervalMonthDayNanoValue(convert_i128_to_interval(v.to_i128()))
|
||||
}
|
||||
}),
|
||||
Value::IntervalYearMonth(v) => Some(ValueData::IntervalYearMonthValue(v.to_i32())),
|
||||
Value::IntervalDayTime(v) => Some(ValueData::IntervalDayTimeValue(v.to_i64())),
|
||||
Value::IntervalMonthDayNano(v) => Some(ValueData::IntervalMonthDayNanoValue(
|
||||
convert_month_day_nano_to_pb(v),
|
||||
)),
|
||||
Value::Decimal128(v) => Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
|
||||
Value::List(_) | Value::Duration(_) => unreachable!(),
|
||||
},
|
||||
@@ -1061,6 +1063,7 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_time::interval::IntervalUnit;
|
||||
use datatypes::types::{
|
||||
Int32Type, IntervalDayTimeType, IntervalMonthDayNanoType, IntervalYearMonthType,
|
||||
TimeMillisecondType, TimeSecondType, TimestampMillisecondType, TimestampSecondType,
|
||||
@@ -1506,11 +1509,11 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_convert_i128_to_interval() {
|
||||
let i128_val = 3000;
|
||||
let interval = convert_i128_to_interval(i128_val);
|
||||
let i128_val = 3;
|
||||
let interval = convert_month_day_nano_to_pb(IntervalMonthDayNano::from_i128(i128_val));
|
||||
assert_eq!(interval.months, 0);
|
||||
assert_eq!(interval.days, 0);
|
||||
assert_eq!(interval.nanoseconds, 3000);
|
||||
assert_eq!(interval.nanoseconds, 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1590,9 +1593,9 @@ mod tests {
|
||||
},
|
||||
);
|
||||
let expect = vec![
|
||||
Value::Interval(Interval::from_year_month(1_i32)),
|
||||
Value::Interval(Interval::from_year_month(2_i32)),
|
||||
Value::Interval(Interval::from_year_month(3_i32)),
|
||||
Value::IntervalYearMonth(IntervalYearMonth::new(1_i32)),
|
||||
Value::IntervalYearMonth(IntervalYearMonth::new(2_i32)),
|
||||
Value::IntervalYearMonth(IntervalYearMonth::new(3_i32)),
|
||||
];
|
||||
assert_eq!(expect, actual);
|
||||
|
||||
@@ -1605,9 +1608,9 @@ mod tests {
|
||||
},
|
||||
);
|
||||
let expect = vec![
|
||||
Value::Interval(Interval::from_i64(1_i64)),
|
||||
Value::Interval(Interval::from_i64(2_i64)),
|
||||
Value::Interval(Interval::from_i64(3_i64)),
|
||||
Value::IntervalDayTime(IntervalDayTime::from_i64(1_i64)),
|
||||
Value::IntervalDayTime(IntervalDayTime::from_i64(2_i64)),
|
||||
Value::IntervalDayTime(IntervalDayTime::from_i64(3_i64)),
|
||||
];
|
||||
assert_eq!(expect, actual);
|
||||
|
||||
@@ -1636,9 +1639,9 @@ mod tests {
|
||||
},
|
||||
);
|
||||
let expect = vec![
|
||||
Value::Interval(Interval::from_month_day_nano(1, 2, 3)),
|
||||
Value::Interval(Interval::from_month_day_nano(5, 6, 7)),
|
||||
Value::Interval(Interval::from_month_day_nano(9, 10, 11)),
|
||||
Value::IntervalMonthDayNano(IntervalMonthDayNano::new(1, 2, 3)),
|
||||
Value::IntervalMonthDayNano(IntervalMonthDayNano::new(5, 6, 7)),
|
||||
Value::IntervalMonthDayNano(IntervalMonthDayNano::new(9, 10, 11)),
|
||||
];
|
||||
assert_eq!(expect, actual);
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ impl StaticUserProvider {
|
||||
value: value.to_string(),
|
||||
msg: "StaticUserProviderOption must be in format `<option>:<value>`",
|
||||
})?;
|
||||
return match mode {
|
||||
match mode {
|
||||
"file" => {
|
||||
let users = load_credential_from_file(content)?
|
||||
.context(InvalidConfigSnafu {
|
||||
@@ -58,7 +58,7 @@ impl StaticUserProvider {
|
||||
msg: "StaticUserProviderOption must be in format `file:<path>` or `cmd:<values>`",
|
||||
}
|
||||
.fail(),
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -89,9 +89,8 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to get procedure client in {mode} mode"))]
|
||||
GetProcedureClient {
|
||||
mode: String,
|
||||
#[snafu(display("Failed to get information extension client"))]
|
||||
GetInformationExtension {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
@@ -301,7 +300,7 @@ impl ErrorExt for Error {
|
||||
| Error::CacheNotFound { .. }
|
||||
| Error::CastManager { .. }
|
||||
| Error::Json { .. }
|
||||
| Error::GetProcedureClient { .. }
|
||||
| Error::GetInformationExtension { .. }
|
||||
| Error::ProcedureIdNotFound { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::ViewPlanColumnsChanged { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
@@ -21,7 +21,6 @@ use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID,
|
||||
PG_CATALOG_NAME,
|
||||
};
|
||||
use common_config::Mode;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache::{LayeredCacheRegistryRef, ViewInfoCacheRef};
|
||||
use common_meta::key::catalog_name::CatalogNameKey;
|
||||
@@ -34,7 +33,6 @@ use common_meta::kv_backend::KvBackendRef;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use futures_util::stream::BoxStream;
|
||||
use futures_util::{StreamExt, TryStreamExt};
|
||||
use meta_client::client::MetaClient;
|
||||
use moka::sync::Cache;
|
||||
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
|
||||
use session::context::{Channel, QueryContext};
|
||||
@@ -50,7 +48,7 @@ use crate::error::{
|
||||
CacheNotFoundSnafu, GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu,
|
||||
ListSchemasSnafu, ListTablesSnafu, Result, TableMetadataManagerSnafu,
|
||||
};
|
||||
use crate::information_schema::InformationSchemaProvider;
|
||||
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
||||
use crate::kvbackend::TableCacheRef;
|
||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||
use crate::system_schema::SystemSchemaProvider;
|
||||
@@ -63,9 +61,8 @@ use crate::CatalogManager;
|
||||
/// comes from `SystemCatalog`, which is static and read-only.
|
||||
#[derive(Clone)]
|
||||
pub struct KvBackendCatalogManager {
|
||||
mode: Mode,
|
||||
/// Only available in `Distributed` mode.
|
||||
meta_client: Option<Arc<MetaClient>>,
|
||||
/// Provides the extension methods for the `information_schema` tables
|
||||
information_extension: InformationExtensionRef,
|
||||
/// Manages partition rules.
|
||||
partition_manager: PartitionRuleManagerRef,
|
||||
/// Manages table metadata.
|
||||
@@ -82,15 +79,13 @@ const CATALOG_CACHE_MAX_CAPACITY: u64 = 128;
|
||||
|
||||
impl KvBackendCatalogManager {
|
||||
pub fn new(
|
||||
mode: Mode,
|
||||
meta_client: Option<Arc<MetaClient>>,
|
||||
information_extension: InformationExtensionRef,
|
||||
backend: KvBackendRef,
|
||||
cache_registry: LayeredCacheRegistryRef,
|
||||
procedure_manager: Option<ProcedureManagerRef>,
|
||||
) -> Arc<Self> {
|
||||
Arc::new_cyclic(|me| Self {
|
||||
mode,
|
||||
meta_client,
|
||||
information_extension,
|
||||
partition_manager: Arc::new(PartitionRuleManager::new(
|
||||
backend.clone(),
|
||||
cache_registry
|
||||
@@ -118,20 +113,15 @@ impl KvBackendCatalogManager {
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the server running mode.
|
||||
pub fn running_mode(&self) -> &Mode {
|
||||
&self.mode
|
||||
}
|
||||
|
||||
pub fn view_info_cache(&self) -> Result<ViewInfoCacheRef> {
|
||||
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "view_info_cache",
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the `[MetaClient]`.
|
||||
pub fn meta_client(&self) -> Option<Arc<MetaClient>> {
|
||||
self.meta_client.clone()
|
||||
/// Returns the [`InformationExtension`].
|
||||
pub fn information_extension(&self) -> InformationExtensionRef {
|
||||
self.information_extension.clone()
|
||||
}
|
||||
|
||||
pub fn partition_manager(&self) -> PartitionRuleManagerRef {
|
||||
|
||||
@@ -32,7 +32,11 @@ use std::collections::HashMap;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_catalog::consts::{self, DEFAULT_CATALOG_NAME, INFORMATION_SCHEMA_NAME};
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_meta::cluster::NodeInfo;
|
||||
use common_meta::datanode::RegionStat;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_procedure::ProcedureInfo;
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
use datatypes::schema::SchemaRef;
|
||||
use lazy_static::lazy_static;
|
||||
@@ -45,7 +49,7 @@ use views::InformationSchemaViews;
|
||||
|
||||
use self::columns::InformationSchemaColumns;
|
||||
use super::{SystemSchemaProviderInner, SystemTable, SystemTableRef};
|
||||
use crate::error::Result;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::system_schema::information_schema::cluster_info::InformationSchemaClusterInfo;
|
||||
use crate::system_schema::information_schema::flows::InformationSchemaFlows;
|
||||
use crate::system_schema::information_schema::information_memory_table::get_schema_columns;
|
||||
@@ -318,3 +322,39 @@ where
|
||||
InformationTable::to_stream(self, request)
|
||||
}
|
||||
}
|
||||
|
||||
pub type InformationExtensionRef = Arc<dyn InformationExtension<Error = Error> + Send + Sync>;
|
||||
|
||||
/// The `InformationExtension` trait provides the extension methods for the `information_schema` tables.
|
||||
#[async_trait::async_trait]
|
||||
pub trait InformationExtension {
|
||||
type Error: ErrorExt;
|
||||
|
||||
/// Gets the nodes information.
|
||||
async fn nodes(&self) -> std::result::Result<Vec<NodeInfo>, Self::Error>;
|
||||
|
||||
/// Gets the procedures information.
|
||||
async fn procedures(&self) -> std::result::Result<Vec<(String, ProcedureInfo)>, Self::Error>;
|
||||
|
||||
/// Gets the region statistics.
|
||||
async fn region_stats(&self) -> std::result::Result<Vec<RegionStat>, Self::Error>;
|
||||
}
|
||||
|
||||
pub struct NoopInformationExtension;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl InformationExtension for NoopInformationExtension {
|
||||
type Error = Error;
|
||||
|
||||
async fn nodes(&self) -> std::result::Result<Vec<NodeInfo>, Self::Error> {
|
||||
Ok(vec![])
|
||||
}
|
||||
|
||||
async fn procedures(&self) -> std::result::Result<Vec<(String, ProcedureInfo)>, Self::Error> {
|
||||
Ok(vec![])
|
||||
}
|
||||
|
||||
async fn region_stats(&self) -> std::result::Result<Vec<RegionStat>, Self::Error> {
|
||||
Ok(vec![])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,13 +17,10 @@ use std::time::Duration;
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_CLUSTER_INFO_TABLE_ID;
|
||||
use common_config::Mode;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cluster::{ClusterInfo, NodeInfo, NodeStatus};
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::cluster::NodeInfo;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_telemetry::warn;
|
||||
use common_time::timestamp::Timestamp;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
@@ -40,7 +37,7 @@ use snafu::ResultExt;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use super::CLUSTER_INFO;
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, ListNodesSnafu, Result};
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
@@ -70,7 +67,6 @@ const INIT_CAPACITY: usize = 42;
|
||||
pub(super) struct InformationSchemaClusterInfo {
|
||||
schema: SchemaRef,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
start_time_ms: u64,
|
||||
}
|
||||
|
||||
impl InformationSchemaClusterInfo {
|
||||
@@ -78,7 +74,6 @@ impl InformationSchemaClusterInfo {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_manager,
|
||||
start_time_ms: common_time::util::current_time_millis() as u64,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -100,11 +95,7 @@ impl InformationSchemaClusterInfo {
|
||||
}
|
||||
|
||||
fn builder(&self) -> InformationSchemaClusterInfoBuilder {
|
||||
InformationSchemaClusterInfoBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
self.start_time_ms,
|
||||
)
|
||||
InformationSchemaClusterInfoBuilder::new(self.schema.clone(), self.catalog_manager.clone())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -144,7 +135,6 @@ impl InformationTable for InformationSchemaClusterInfo {
|
||||
|
||||
struct InformationSchemaClusterInfoBuilder {
|
||||
schema: SchemaRef,
|
||||
start_time_ms: u64,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
peer_ids: Int64VectorBuilder,
|
||||
@@ -158,11 +148,7 @@ struct InformationSchemaClusterInfoBuilder {
|
||||
}
|
||||
|
||||
impl InformationSchemaClusterInfoBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
start_time_ms: u64,
|
||||
) -> Self {
|
||||
fn new(schema: SchemaRef, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_manager,
|
||||
@@ -174,56 +160,17 @@ impl InformationSchemaClusterInfoBuilder {
|
||||
start_times: TimestampMillisecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
uptimes: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
active_times: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
start_time_ms,
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.cluster_info` virtual table
|
||||
async fn make_cluster_info(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
let mode = utils::running_mode(&self.catalog_manager)?.unwrap_or(Mode::Standalone);
|
||||
|
||||
match mode {
|
||||
Mode::Standalone => {
|
||||
let build_info = common_version::build_info();
|
||||
|
||||
self.add_node_info(
|
||||
&predicates,
|
||||
NodeInfo {
|
||||
// For the standalone:
|
||||
// - id always 0
|
||||
// - empty string for peer_addr
|
||||
peer: Peer {
|
||||
id: 0,
|
||||
addr: "".to_string(),
|
||||
},
|
||||
last_activity_ts: -1,
|
||||
status: NodeStatus::Standalone,
|
||||
version: build_info.version.to_string(),
|
||||
git_commit: build_info.commit_short.to_string(),
|
||||
// Use `self.start_time_ms` instead.
|
||||
// It's not precise but enough.
|
||||
start_time_ms: self.start_time_ms,
|
||||
},
|
||||
);
|
||||
}
|
||||
Mode::Distributed => {
|
||||
if let Some(meta_client) = utils::meta_client(&self.catalog_manager)? {
|
||||
let node_infos = meta_client
|
||||
.list_nodes(None)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ListNodesSnafu)?;
|
||||
|
||||
for node_info in node_infos {
|
||||
self.add_node_info(&predicates, node_info);
|
||||
}
|
||||
} else {
|
||||
warn!("Could not find meta client in distributed mode.");
|
||||
}
|
||||
}
|
||||
let information_extension = utils::information_extension(&self.catalog_manager)?;
|
||||
let node_infos = information_extension.nodes().await?;
|
||||
for node_info in node_infos {
|
||||
self.add_node_info(&predicates, node_info);
|
||||
}
|
||||
|
||||
self.finish()
|
||||
}
|
||||
|
||||
|
||||
@@ -14,14 +14,10 @@
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use api::v1::meta::{ProcedureMeta, ProcedureStatus};
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_PROCEDURE_INFO_TABLE_ID;
|
||||
use common_config::Mode;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::ddl::{ExecutorContext, ProcedureExecutor};
|
||||
use common_meta::rpc::procedure;
|
||||
use common_procedure::{ProcedureInfo, ProcedureState};
|
||||
use common_procedure::ProcedureInfo;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_time::timestamp::Timestamp;
|
||||
@@ -38,10 +34,7 @@ use snafu::ResultExt;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use super::PROCEDURE_INFO;
|
||||
use crate::error::{
|
||||
ConvertProtoDataSnafu, CreateRecordBatchSnafu, GetProcedureClientSnafu, InternalSnafu,
|
||||
ListProceduresSnafu, ProcedureIdNotFoundSnafu, Result,
|
||||
};
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
@@ -167,45 +160,11 @@ impl InformationSchemaProcedureInfoBuilder {
|
||||
/// Construct the `information_schema.procedure_info` virtual table
|
||||
async fn make_procedure_info(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
let mode = utils::running_mode(&self.catalog_manager)?.unwrap_or(Mode::Standalone);
|
||||
match mode {
|
||||
Mode::Standalone => {
|
||||
if let Some(procedure_manager) = utils::procedure_manager(&self.catalog_manager)? {
|
||||
let procedures = procedure_manager
|
||||
.list_procedures()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ListProceduresSnafu)?;
|
||||
for procedure in procedures {
|
||||
self.add_procedure(
|
||||
&predicates,
|
||||
procedure.state.as_str_name().to_string(),
|
||||
procedure,
|
||||
);
|
||||
}
|
||||
} else {
|
||||
return GetProcedureClientSnafu { mode: "standalone" }.fail();
|
||||
}
|
||||
}
|
||||
Mode::Distributed => {
|
||||
if let Some(meta_client) = utils::meta_client(&self.catalog_manager)? {
|
||||
let procedures = meta_client
|
||||
.list_procedures(&ExecutorContext::default())
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ListProceduresSnafu)?;
|
||||
for procedure in procedures.procedures {
|
||||
self.add_procedure_info(&predicates, procedure)?;
|
||||
}
|
||||
} else {
|
||||
return GetProcedureClientSnafu {
|
||||
mode: "distributed",
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let information_extension = utils::information_extension(&self.catalog_manager)?;
|
||||
let procedures = information_extension.procedures().await?;
|
||||
for (status, procedure_info) in procedures {
|
||||
self.add_procedure(&predicates, status, procedure_info);
|
||||
}
|
||||
self.finish()
|
||||
}
|
||||
|
||||
@@ -247,34 +206,6 @@ impl InformationSchemaProcedureInfoBuilder {
|
||||
self.lock_keys.push(Some(&lock_keys));
|
||||
}
|
||||
|
||||
fn add_procedure_info(
|
||||
&mut self,
|
||||
predicates: &Predicates,
|
||||
procedure: ProcedureMeta,
|
||||
) -> Result<()> {
|
||||
let pid = match procedure.id {
|
||||
Some(pid) => pid,
|
||||
None => return ProcedureIdNotFoundSnafu {}.fail(),
|
||||
};
|
||||
let pid = procedure::pb_pid_to_pid(&pid)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ConvertProtoDataSnafu)?;
|
||||
let status = ProcedureStatus::try_from(procedure.status)
|
||||
.map(|v| v.as_str_name())
|
||||
.unwrap_or("Unknown")
|
||||
.to_string();
|
||||
let procedure_info = ProcedureInfo {
|
||||
id: pid,
|
||||
type_name: procedure.type_name,
|
||||
start_time_ms: procedure.start_time_ms,
|
||||
end_time_ms: procedure.end_time_ms,
|
||||
state: ProcedureState::Running,
|
||||
lock_keys: procedure.lock_keys,
|
||||
};
|
||||
self.add_procedure(predicates, status, procedure_info);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(self.procedure_ids.finish()),
|
||||
|
||||
@@ -16,13 +16,10 @@ use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_REGION_STATISTICS_TABLE_ID;
|
||||
use common_config::Mode;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cluster::ClusterInfo;
|
||||
use common_meta::datanode::RegionStat;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch, SendableRecordBatchStream};
|
||||
use common_telemetry::tracing::warn;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
@@ -34,7 +31,7 @@ use snafu::ResultExt;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use super::{InformationTable, REGION_STATISTICS};
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, ListRegionStatsSnafu, Result};
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
@@ -42,9 +39,12 @@ use crate::CatalogManager;
|
||||
const REGION_ID: &str = "region_id";
|
||||
const TABLE_ID: &str = "table_id";
|
||||
const REGION_NUMBER: &str = "region_number";
|
||||
const REGION_ROWS: &str = "region_rows";
|
||||
const DISK_SIZE: &str = "disk_size";
|
||||
const MEMTABLE_SIZE: &str = "memtable_size";
|
||||
const MANIFEST_SIZE: &str = "manifest_size";
|
||||
const SST_SIZE: &str = "sst_size";
|
||||
const INDEX_SIZE: &str = "index_size";
|
||||
const ENGINE: &str = "engine";
|
||||
const REGION_ROLE: &str = "region_role";
|
||||
|
||||
@@ -55,9 +55,12 @@ const INIT_CAPACITY: usize = 42;
|
||||
/// - `region_id`: The region id.
|
||||
/// - `table_id`: The table id.
|
||||
/// - `region_number`: The region number.
|
||||
/// - `region_rows`: The number of rows in region.
|
||||
/// - `memtable_size`: The memtable size in bytes.
|
||||
/// - `disk_size`: The approximate disk size in bytes.
|
||||
/// - `manifest_size`: The manifest size in bytes.
|
||||
/// - `sst_size`: The sst size in bytes.
|
||||
/// - `sst_size`: The sst data files size in bytes.
|
||||
/// - `index_size`: The sst index files size in bytes.
|
||||
/// - `engine`: The engine type.
|
||||
/// - `region_role`: The region role.
|
||||
///
|
||||
@@ -79,9 +82,12 @@ impl InformationSchemaRegionStatistics {
|
||||
ColumnSchema::new(REGION_ID, ConcreteDataType::uint64_datatype(), false),
|
||||
ColumnSchema::new(TABLE_ID, ConcreteDataType::uint32_datatype(), false),
|
||||
ColumnSchema::new(REGION_NUMBER, ConcreteDataType::uint32_datatype(), false),
|
||||
ColumnSchema::new(REGION_ROWS, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(DISK_SIZE, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(MEMTABLE_SIZE, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(MANIFEST_SIZE, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(SST_SIZE, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(INDEX_SIZE, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(ENGINE, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(REGION_ROLE, ConcreteDataType::string_datatype(), true),
|
||||
]))
|
||||
@@ -138,9 +144,12 @@ struct InformationSchemaRegionStatisticsBuilder {
|
||||
region_ids: UInt64VectorBuilder,
|
||||
table_ids: UInt32VectorBuilder,
|
||||
region_numbers: UInt32VectorBuilder,
|
||||
region_rows: UInt64VectorBuilder,
|
||||
disk_sizes: UInt64VectorBuilder,
|
||||
memtable_sizes: UInt64VectorBuilder,
|
||||
manifest_sizes: UInt64VectorBuilder,
|
||||
sst_sizes: UInt64VectorBuilder,
|
||||
index_sizes: UInt64VectorBuilder,
|
||||
engines: StringVectorBuilder,
|
||||
region_roles: StringVectorBuilder,
|
||||
}
|
||||
@@ -153,9 +162,12 @@ impl InformationSchemaRegionStatisticsBuilder {
|
||||
region_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_ids: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
region_numbers: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
region_rows: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
disk_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
memtable_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
manifest_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
sst_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
index_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
engines: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
region_roles: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
@@ -167,28 +179,11 @@ impl InformationSchemaRegionStatisticsBuilder {
|
||||
request: Option<ScanRequest>,
|
||||
) -> Result<RecordBatch> {
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
let mode = utils::running_mode(&self.catalog_manager)?.unwrap_or(Mode::Standalone);
|
||||
|
||||
match mode {
|
||||
Mode::Standalone => {
|
||||
// TODO(weny): implement it
|
||||
}
|
||||
Mode::Distributed => {
|
||||
if let Some(meta_client) = utils::meta_client(&self.catalog_manager)? {
|
||||
let region_stats = meta_client
|
||||
.list_region_stats()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ListRegionStatsSnafu)?;
|
||||
for region_stat in region_stats {
|
||||
self.add_region_statistic(&predicates, region_stat);
|
||||
}
|
||||
} else {
|
||||
warn!("Meta client is not available");
|
||||
}
|
||||
}
|
||||
let information_extension = utils::information_extension(&self.catalog_manager)?;
|
||||
let region_stats = information_extension.region_stats().await?;
|
||||
for region_stat in region_stats {
|
||||
self.add_region_statistic(&predicates, region_stat);
|
||||
}
|
||||
|
||||
self.finish()
|
||||
}
|
||||
|
||||
@@ -197,9 +192,12 @@ impl InformationSchemaRegionStatisticsBuilder {
|
||||
(REGION_ID, &Value::from(region_stat.id.as_u64())),
|
||||
(TABLE_ID, &Value::from(region_stat.id.table_id())),
|
||||
(REGION_NUMBER, &Value::from(region_stat.id.region_number())),
|
||||
(REGION_ROWS, &Value::from(region_stat.num_rows)),
|
||||
(DISK_SIZE, &Value::from(region_stat.approximate_bytes)),
|
||||
(MEMTABLE_SIZE, &Value::from(region_stat.memtable_size)),
|
||||
(MANIFEST_SIZE, &Value::from(region_stat.manifest_size)),
|
||||
(SST_SIZE, &Value::from(region_stat.sst_size)),
|
||||
(INDEX_SIZE, &Value::from(region_stat.index_size)),
|
||||
(ENGINE, &Value::from(region_stat.engine.as_str())),
|
||||
(REGION_ROLE, &Value::from(region_stat.role.to_string())),
|
||||
];
|
||||
@@ -212,9 +210,12 @@ impl InformationSchemaRegionStatisticsBuilder {
|
||||
self.table_ids.push(Some(region_stat.id.table_id()));
|
||||
self.region_numbers
|
||||
.push(Some(region_stat.id.region_number()));
|
||||
self.region_rows.push(Some(region_stat.num_rows));
|
||||
self.disk_sizes.push(Some(region_stat.approximate_bytes));
|
||||
self.memtable_sizes.push(Some(region_stat.memtable_size));
|
||||
self.manifest_sizes.push(Some(region_stat.manifest_size));
|
||||
self.sst_sizes.push(Some(region_stat.sst_size));
|
||||
self.index_sizes.push(Some(region_stat.index_size));
|
||||
self.engines.push(Some(®ion_stat.engine));
|
||||
self.region_roles.push(Some(®ion_stat.role.to_string()));
|
||||
}
|
||||
@@ -224,9 +225,12 @@ impl InformationSchemaRegionStatisticsBuilder {
|
||||
Arc::new(self.region_ids.finish()),
|
||||
Arc::new(self.table_ids.finish()),
|
||||
Arc::new(self.region_numbers.finish()),
|
||||
Arc::new(self.region_rows.finish()),
|
||||
Arc::new(self.disk_sizes.finish()),
|
||||
Arc::new(self.memtable_sizes.finish()),
|
||||
Arc::new(self.manifest_sizes.finish()),
|
||||
Arc::new(self.sst_sizes.finish()),
|
||||
Arc::new(self.index_sizes.finish()),
|
||||
Arc::new(self.engines.finish()),
|
||||
Arc::new(self.region_roles.finish()),
|
||||
];
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/// All table names in `information_schema`.
|
||||
//! All table names in `information_schema`.
|
||||
|
||||
pub const TABLES: &str = "tables";
|
||||
pub const COLUMNS: &str = "columns";
|
||||
|
||||
@@ -74,7 +74,7 @@ impl MemoryTableBuilder {
|
||||
/// Construct the `information_schema.{table_name}` virtual table
|
||||
pub async fn memory_records(&mut self) -> Result<RecordBatch> {
|
||||
if self.columns.is_empty() {
|
||||
RecordBatch::new_empty(self.schema.clone()).context(CreateRecordBatchSnafu)
|
||||
Ok(RecordBatch::new_empty(self.schema.clone()))
|
||||
} else {
|
||||
RecordBatch::new(self.schema.clone(), std::mem::take(&mut self.columns))
|
||||
.context(CreateRecordBatchSnafu)
|
||||
|
||||
@@ -12,6 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! The `pg_catalog.pg_namespace` table implementation.
|
||||
//! namespace is a schema in greptime
|
||||
|
||||
pub(super) mod oid_map;
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
@@ -40,9 +43,6 @@ use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||
use crate::system_schema::SystemTable;
|
||||
use crate::CatalogManager;
|
||||
|
||||
/// The `pg_catalog.pg_namespace` table implementation.
|
||||
/// namespace is a schema in greptime
|
||||
|
||||
const NSPNAME: &str = "nspname";
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
|
||||
@@ -12,48 +12,33 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod tables;
|
||||
use std::sync::Weak;
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_config::Mode;
|
||||
use common_meta::key::TableMetadataManagerRef;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use meta_client::client::MetaClient;
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::error::{Result, UpgradeWeakCatalogManagerRefSnafu};
|
||||
use crate::error::{GetInformationExtensionSnafu, Result, UpgradeWeakCatalogManagerRefSnafu};
|
||||
use crate::information_schema::InformationExtensionRef;
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::CatalogManager;
|
||||
|
||||
/// Try to get the server running mode from `[CatalogManager]` weak reference.
|
||||
pub fn running_mode(catalog_manager: &Weak<dyn CatalogManager>) -> Result<Option<Mode>> {
|
||||
pub mod tables;
|
||||
|
||||
/// Try to get the `[InformationExtension]` from `[CatalogManager]` weak reference.
|
||||
pub fn information_extension(
|
||||
catalog_manager: &Weak<dyn CatalogManager>,
|
||||
) -> Result<InformationExtensionRef> {
|
||||
let catalog_manager = catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
|
||||
Ok(catalog_manager
|
||||
let information_extension = catalog_manager
|
||||
.as_any()
|
||||
.downcast_ref::<KvBackendCatalogManager>()
|
||||
.map(|manager| manager.running_mode())
|
||||
.copied())
|
||||
}
|
||||
.map(|manager| manager.information_extension())
|
||||
.context(GetInformationExtensionSnafu)?;
|
||||
|
||||
/// Try to get the `[MetaClient]` from `[CatalogManager]` weak reference.
|
||||
pub fn meta_client(catalog_manager: &Weak<dyn CatalogManager>) -> Result<Option<Arc<MetaClient>>> {
|
||||
let catalog_manager = catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
|
||||
let meta_client = match catalog_manager
|
||||
.as_any()
|
||||
.downcast_ref::<KvBackendCatalogManager>()
|
||||
{
|
||||
None => None,
|
||||
Some(manager) => manager.meta_client(),
|
||||
};
|
||||
|
||||
Ok(meta_client)
|
||||
Ok(information_extension)
|
||||
}
|
||||
|
||||
/// Try to get the `[TableMetadataManagerRef]` from `[CatalogManager]` weak reference.
|
||||
@@ -69,17 +54,3 @@ pub fn table_meta_manager(
|
||||
.downcast_ref::<KvBackendCatalogManager>()
|
||||
.map(|manager| manager.table_metadata_manager_ref().clone()))
|
||||
}
|
||||
|
||||
/// Try to get the `[ProcedureManagerRef]` from `[CatalogManager]` weak reference.
|
||||
pub fn procedure_manager(
|
||||
catalog_manager: &Weak<dyn CatalogManager>,
|
||||
) -> Result<Option<ProcedureManagerRef>> {
|
||||
let catalog_manager = catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
|
||||
Ok(catalog_manager
|
||||
.as_any()
|
||||
.downcast_ref::<KvBackendCatalogManager>()
|
||||
.and_then(|manager| manager.procedure_manager()))
|
||||
}
|
||||
|
||||
@@ -259,7 +259,6 @@ mod tests {
|
||||
|
||||
use arrow::datatypes::{DataType, Field, Schema, SchemaRef};
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use common_config::Mode;
|
||||
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
@@ -269,6 +268,8 @@ mod tests {
|
||||
use datafusion::logical_expr::builder::LogicalTableSource;
|
||||
use datafusion::logical_expr::{col, lit, LogicalPlan, LogicalPlanBuilder};
|
||||
|
||||
use crate::information_schema::NoopInformationExtension;
|
||||
|
||||
struct MockDecoder;
|
||||
impl MockDecoder {
|
||||
pub fn arc() -> Arc<Self> {
|
||||
@@ -323,8 +324,7 @@ mod tests {
|
||||
);
|
||||
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
Mode::Standalone,
|
||||
None,
|
||||
Arc::new(NoopInformationExtension),
|
||||
backend.clone(),
|
||||
layered_cache_registry,
|
||||
None,
|
||||
|
||||
@@ -28,7 +28,7 @@ enum_dispatch = "0.3"
|
||||
futures-util.workspace = true
|
||||
lazy_static.workspace = true
|
||||
moka = { workspace = true, features = ["future"] }
|
||||
parking_lot = "0.12"
|
||||
parking_lot.workspace = true
|
||||
prometheus.workspace = true
|
||||
prost.workspace = true
|
||||
query.workspace = true
|
||||
@@ -45,7 +45,6 @@ common-grpc-expr.workspace = true
|
||||
datanode.workspace = true
|
||||
derive-new = "0.5"
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
|
||||
[dev-dependencies.substrait_proto]
|
||||
package = "substrait"
|
||||
|
||||
@@ -10,7 +10,7 @@ name = "greptime"
|
||||
path = "src/bin/greptime.rs"
|
||||
|
||||
[features]
|
||||
default = ["python"]
|
||||
default = ["python", "servers/pprof", "servers/mem-prof"]
|
||||
tokio-console = ["common-telemetry/tokio-console"]
|
||||
python = ["frontend/python"]
|
||||
|
||||
@@ -78,7 +78,7 @@ table.workspace = true
|
||||
tokio.workspace = true
|
||||
toml.workspace = true
|
||||
tonic.workspace = true
|
||||
tracing-appender = "0.2"
|
||||
tracing-appender.workspace = true
|
||||
|
||||
[target.'cfg(not(windows))'.dependencies]
|
||||
tikv-jemallocator = "0.6"
|
||||
|
||||
@@ -46,12 +46,12 @@ use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||
use crate::cli::cmd::ReplCommand;
|
||||
use crate::cli::helper::RustylineHelper;
|
||||
use crate::cli::AttachCommand;
|
||||
use crate::error;
|
||||
use crate::error::{
|
||||
CollectRecordBatchesSnafu, ParseSqlSnafu, PlanStatementSnafu, PrettyPrintRecordBatchesSnafu,
|
||||
ReadlineSnafu, ReplCreationSnafu, RequestDatabaseSnafu, Result, StartMetaClientSnafu,
|
||||
SubstraitEncodeLogicalPlanSnafu,
|
||||
};
|
||||
use crate::{error, DistributedInformationExtension};
|
||||
|
||||
/// Captures the state of the repl, gathers commands and executes them one by one
|
||||
pub struct Repl {
|
||||
@@ -174,7 +174,7 @@ impl Repl {
|
||||
|
||||
let plan = query_engine
|
||||
.planner()
|
||||
.plan(stmt, query_ctx.clone())
|
||||
.plan(&stmt, query_ctx.clone())
|
||||
.await
|
||||
.context(PlanStatementSnafu)?;
|
||||
|
||||
@@ -275,9 +275,9 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
||||
.build(),
|
||||
);
|
||||
|
||||
let information_extension = Arc::new(DistributedInformationExtension::new(meta_client.clone()));
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
Mode::Distributed,
|
||||
Some(meta_client.clone()),
|
||||
information_extension,
|
||||
cached_meta_backend.clone(),
|
||||
layered_cache_registry,
|
||||
None,
|
||||
|
||||
@@ -272,9 +272,10 @@ impl StartCommand {
|
||||
info!("Datanode start command: {:#?}", self);
|
||||
info!("Datanode options: {:#?}", opts);
|
||||
|
||||
let plugin_opts = opts.plugins;
|
||||
let opts = opts.component;
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_datanode_plugins(&mut plugins, &opts)
|
||||
plugins::setup_datanode_plugins(&mut plugins, &plugin_opts, &opts)
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ use crate::error::{
|
||||
MissingConfigSnafu, Result, ShutdownFlownodeSnafu, StartFlownodeSnafu,
|
||||
};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{log_versions, App};
|
||||
use crate::{log_versions, App, DistributedInformationExtension};
|
||||
|
||||
pub const APP_NAME: &str = "greptime-flownode";
|
||||
|
||||
@@ -269,9 +269,10 @@ impl StartCommand {
|
||||
.build(),
|
||||
);
|
||||
|
||||
let information_extension =
|
||||
Arc::new(DistributedInformationExtension::new(meta_client.clone()));
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
opts.mode,
|
||||
Some(meta_client.clone()),
|
||||
information_extension,
|
||||
cached_meta_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
None,
|
||||
|
||||
@@ -38,7 +38,6 @@ use frontend::server::Services;
|
||||
use meta_client::{MetaClientOptions, MetaClientType};
|
||||
use query::stats::StatementStatistics;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
use servers::Mode;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
@@ -47,7 +46,7 @@ use crate::error::{
|
||||
Result, StartFrontendSnafu,
|
||||
};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{log_versions, App};
|
||||
use crate::{log_versions, App, DistributedInformationExtension};
|
||||
|
||||
type FrontendOptions = GreptimeOptions<frontend::frontend::FrontendOptions>;
|
||||
|
||||
@@ -267,9 +266,10 @@ impl StartCommand {
|
||||
info!("Frontend start command: {:#?}", self);
|
||||
info!("Frontend options: {:#?}", opts);
|
||||
|
||||
let plugin_opts = opts.plugins;
|
||||
let opts = opts.component;
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_frontend_plugins(&mut plugins, &opts)
|
||||
plugins::setup_frontend_plugins(&mut plugins, &plugin_opts, &opts)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
@@ -316,9 +316,10 @@ impl StartCommand {
|
||||
.build(),
|
||||
);
|
||||
|
||||
let information_extension =
|
||||
Arc::new(DistributedInformationExtension::new(meta_client.clone()));
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
Mode::Distributed,
|
||||
Some(meta_client.clone()),
|
||||
information_extension,
|
||||
cached_meta_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
None,
|
||||
@@ -342,6 +343,8 @@ impl StartCommand {
|
||||
// Some queries are expected to take long time.
|
||||
let channel_config = ChannelConfig {
|
||||
timeout: None,
|
||||
tcp_nodelay: opts.datanode.client.tcp_nodelay,
|
||||
connect_timeout: Some(opts.datanode.client.connect_timeout),
|
||||
..Default::default()
|
||||
};
|
||||
let client = NodeClients::new(channel_config);
|
||||
@@ -472,7 +475,7 @@ mod tests {
|
||||
};
|
||||
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_frontend_plugins(&mut plugins, &fe_opts)
|
||||
plugins::setup_frontend_plugins(&mut plugins, &[], &fe_opts)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
|
||||
@@ -15,7 +15,17 @@
|
||||
#![feature(assert_matches, let_chains)]
|
||||
|
||||
use async_trait::async_trait;
|
||||
use catalog::information_schema::InformationExtension;
|
||||
use client::api::v1::meta::ProcedureStatus;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cluster::{ClusterInfo, NodeInfo};
|
||||
use common_meta::datanode::RegionStat;
|
||||
use common_meta::ddl::{ExecutorContext, ProcedureExecutor};
|
||||
use common_meta::rpc::procedure;
|
||||
use common_procedure::{ProcedureInfo, ProcedureState};
|
||||
use common_telemetry::{error, info};
|
||||
use meta_client::MetaClientRef;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
@@ -74,6 +84,7 @@ pub trait App: Send {
|
||||
}
|
||||
|
||||
/// Log the versions of the application, and the arguments passed to the cli.
|
||||
///
|
||||
/// `version` should be the same as the output of cli "--version";
|
||||
/// and the `short_version` is the short version of the codes, often consist of git branch and commit.
|
||||
pub fn log_versions(version: &str, short_version: &str, app: &str) {
|
||||
@@ -94,3 +105,69 @@ fn log_env_flags() {
|
||||
info!("argument: {}", argument);
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DistributedInformationExtension {
|
||||
meta_client: MetaClientRef,
|
||||
}
|
||||
|
||||
impl DistributedInformationExtension {
|
||||
pub fn new(meta_client: MetaClientRef) -> Self {
|
||||
Self { meta_client }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl InformationExtension for DistributedInformationExtension {
|
||||
type Error = catalog::error::Error;
|
||||
|
||||
async fn nodes(&self) -> std::result::Result<Vec<NodeInfo>, Self::Error> {
|
||||
self.meta_client
|
||||
.list_nodes(None)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(catalog::error::ListNodesSnafu)
|
||||
}
|
||||
|
||||
async fn procedures(&self) -> std::result::Result<Vec<(String, ProcedureInfo)>, Self::Error> {
|
||||
let procedures = self
|
||||
.meta_client
|
||||
.list_procedures(&ExecutorContext::default())
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(catalog::error::ListProceduresSnafu)?
|
||||
.procedures;
|
||||
let mut result = Vec::with_capacity(procedures.len());
|
||||
for procedure in procedures {
|
||||
let pid = match procedure.id {
|
||||
Some(pid) => pid,
|
||||
None => return catalog::error::ProcedureIdNotFoundSnafu {}.fail(),
|
||||
};
|
||||
let pid = procedure::pb_pid_to_pid(&pid)
|
||||
.map_err(BoxedError::new)
|
||||
.context(catalog::error::ConvertProtoDataSnafu)?;
|
||||
let status = ProcedureStatus::try_from(procedure.status)
|
||||
.map(|v| v.as_str_name())
|
||||
.unwrap_or("Unknown")
|
||||
.to_string();
|
||||
let procedure_info = ProcedureInfo {
|
||||
id: pid,
|
||||
type_name: procedure.type_name,
|
||||
start_time_ms: procedure.start_time_ms,
|
||||
end_time_ms: procedure.end_time_ms,
|
||||
state: ProcedureState::Running,
|
||||
lock_keys: procedure.lock_keys,
|
||||
};
|
||||
result.push((status, procedure_info));
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn region_stats(&self) -> std::result::Result<Vec<RegionStat>, Self::Error> {
|
||||
self.meta_client
|
||||
.list_region_stats()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(catalog::error::ListRegionStatsSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,6 +48,10 @@ impl Instance {
|
||||
_guard: guard,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_inner(&self) -> &MetasrvInstance {
|
||||
&self.instance
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -86,6 +90,14 @@ impl Command {
|
||||
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<MetasrvOptions> {
|
||||
self.subcmd.load_options(global_options)
|
||||
}
|
||||
|
||||
pub fn config_file(&self) -> &Option<String> {
|
||||
self.subcmd.config_file()
|
||||
}
|
||||
|
||||
pub fn env_prefix(&self) -> &String {
|
||||
self.subcmd.env_prefix()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
@@ -105,6 +117,18 @@ impl SubCommand {
|
||||
SubCommand::Start(cmd) => cmd.load_options(global_options),
|
||||
}
|
||||
}
|
||||
|
||||
fn config_file(&self) -> &Option<String> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => &cmd.config_file,
|
||||
}
|
||||
}
|
||||
|
||||
fn env_prefix(&self) -> &String {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => &cmd.env_prefix,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Parser)]
|
||||
@@ -249,9 +273,10 @@ impl StartCommand {
|
||||
info!("Metasrv start command: {:#?}", self);
|
||||
info!("Metasrv options: {:#?}", opts);
|
||||
|
||||
let plugin_opts = opts.plugins;
|
||||
let opts = opts.component;
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_metasrv_plugins(&mut plugins, &opts)
|
||||
plugins::setup_metasrv_plugins(&mut plugins, &plugin_opts, &opts)
|
||||
.await
|
||||
.context(StartMetaServerSnafu)?;
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
use clap::Parser;
|
||||
use common_config::Configurable;
|
||||
use common_runtime::global::RuntimeOptions;
|
||||
use plugins::PluginOptions;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Parser, Default, Debug, Clone)]
|
||||
@@ -40,6 +41,8 @@ pub struct GlobalOptions {
|
||||
pub struct GreptimeOptions<T> {
|
||||
/// The runtime options.
|
||||
pub runtime: RuntimeOptions,
|
||||
/// The plugin options.
|
||||
pub plugins: Vec<PluginOptions>,
|
||||
|
||||
/// The options of each component (like Datanode or Standalone) of GreptimeDB.
|
||||
#[serde(flatten)]
|
||||
|
||||
@@ -12,19 +12,24 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use std::{fs, path};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use catalog::information_schema::InformationExtension;
|
||||
use catalog::kvbackend::KvBackendCatalogManager;
|
||||
use clap::Parser;
|
||||
use client::api::v1::meta::RegionRole;
|
||||
use common_base::Plugins;
|
||||
use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
|
||||
use common_config::{metadata_store_dir, Configurable, KvBackendConfig};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache::LayeredCacheRegistryBuilder;
|
||||
use common_meta::cache_invalidator::CacheInvalidatorRef;
|
||||
use common_meta::cluster::{NodeInfo, NodeStatus};
|
||||
use common_meta::datanode::RegionStat;
|
||||
use common_meta::ddl::flow_meta::{FlowMetadataAllocator, FlowMetadataAllocatorRef};
|
||||
use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocatorRef};
|
||||
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl, ProcedureExecutorRef};
|
||||
@@ -33,10 +38,11 @@ use common_meta::key::flow::{FlowMetadataManager, FlowMetadataManagerRef};
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::node_manager::NodeManagerRef;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::region_keeper::MemoryRegionKeeper;
|
||||
use common_meta::sequence::SequenceBuilder;
|
||||
use common_meta::wal_options_allocator::{WalOptionsAllocator, WalOptionsAllocatorRef};
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use common_procedure::{ProcedureInfo, ProcedureManagerRef};
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::{LoggingOptions, TracingOptions};
|
||||
use common_time::timezone::set_default_timezone;
|
||||
@@ -44,6 +50,7 @@ use common_version::{short_version, version};
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig};
|
||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||
use datanode::region_server::RegionServer;
|
||||
use file_engine::config::EngineConfig as FileEngineConfig;
|
||||
use flow::{FlowWorkerManager, FlownodeBuilder, FrontendInvoker};
|
||||
use frontend::frontend::FrontendOptions;
|
||||
@@ -244,6 +251,13 @@ pub struct Instance {
|
||||
_guard: Vec<WorkerGuard>,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
/// Find the socket addr of a server by its `name`.
|
||||
pub async fn server_addr(&self, name: &str) -> Option<SocketAddr> {
|
||||
self.frontend.server_handlers().addr(name).await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl App for Instance {
|
||||
fn name(&self) -> &str {
|
||||
@@ -334,7 +348,8 @@ pub struct StartCommand {
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
fn load_options(
|
||||
/// Load the GreptimeDB options from various sources (command line, config file or env).
|
||||
pub fn load_options(
|
||||
&self,
|
||||
global_options: &GlobalOptions,
|
||||
) -> Result<GreptimeOptions<StandaloneOptions>> {
|
||||
@@ -424,7 +439,8 @@ impl StartCommand {
|
||||
#[allow(unreachable_code)]
|
||||
#[allow(unused_variables)]
|
||||
#[allow(clippy::diverging_sub_expression)]
|
||||
async fn build(&self, opts: GreptimeOptions<StandaloneOptions>) -> Result<Instance> {
|
||||
/// Build GreptimeDB instance with the loaded options.
|
||||
pub async fn build(&self, opts: GreptimeOptions<StandaloneOptions>) -> Result<Instance> {
|
||||
common_runtime::init_global_runtimes(&opts.runtime);
|
||||
|
||||
let guard = common_telemetry::init_global_logging(
|
||||
@@ -439,15 +455,16 @@ impl StartCommand {
|
||||
info!("Standalone options: {opts:#?}");
|
||||
|
||||
let mut plugins = Plugins::new();
|
||||
let plugin_opts = opts.plugins;
|
||||
let opts = opts.component;
|
||||
let fe_opts = opts.frontend_options();
|
||||
let dn_opts = opts.datanode_options();
|
||||
|
||||
plugins::setup_frontend_plugins(&mut plugins, &fe_opts)
|
||||
plugins::setup_frontend_plugins(&mut plugins, &plugin_opts, &fe_opts)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
plugins::setup_datanode_plugins(&mut plugins, &dn_opts)
|
||||
plugins::setup_datanode_plugins(&mut plugins, &plugin_opts, &dn_opts)
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
@@ -478,9 +495,18 @@ impl StartCommand {
|
||||
.build(),
|
||||
);
|
||||
|
||||
let datanode = DatanodeBuilder::new(dn_opts, plugins.clone())
|
||||
.with_kv_backend(kv_backend.clone())
|
||||
.build()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
let information_extension = Arc::new(StandaloneInformationExtension::new(
|
||||
datanode.region_server(),
|
||||
procedure_manager.clone(),
|
||||
));
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
dn_opts.mode,
|
||||
None,
|
||||
information_extension,
|
||||
kv_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
Some(procedure_manager.clone()),
|
||||
@@ -489,12 +515,6 @@ impl StartCommand {
|
||||
let table_metadata_manager =
|
||||
Self::create_table_metadata_manager(kv_backend.clone()).await?;
|
||||
|
||||
let datanode = DatanodeBuilder::new(dn_opts, plugins.clone())
|
||||
.with_kv_backend(kv_backend.clone())
|
||||
.build()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(kv_backend.clone()));
|
||||
let flow_builder = FlownodeBuilder::new(
|
||||
Default::default(),
|
||||
@@ -644,6 +664,93 @@ impl StartCommand {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct StandaloneInformationExtension {
|
||||
region_server: RegionServer,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
start_time_ms: u64,
|
||||
}
|
||||
|
||||
impl StandaloneInformationExtension {
|
||||
pub fn new(region_server: RegionServer, procedure_manager: ProcedureManagerRef) -> Self {
|
||||
Self {
|
||||
region_server,
|
||||
procedure_manager,
|
||||
start_time_ms: common_time::util::current_time_millis() as u64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl InformationExtension for StandaloneInformationExtension {
|
||||
type Error = catalog::error::Error;
|
||||
|
||||
async fn nodes(&self) -> std::result::Result<Vec<NodeInfo>, Self::Error> {
|
||||
let build_info = common_version::build_info();
|
||||
let node_info = NodeInfo {
|
||||
// For the standalone:
|
||||
// - id always 0
|
||||
// - empty string for peer_addr
|
||||
peer: Peer {
|
||||
id: 0,
|
||||
addr: "".to_string(),
|
||||
},
|
||||
last_activity_ts: -1,
|
||||
status: NodeStatus::Standalone,
|
||||
version: build_info.version.to_string(),
|
||||
git_commit: build_info.commit_short.to_string(),
|
||||
// Use `self.start_time_ms` instead.
|
||||
// It's not precise but enough.
|
||||
start_time_ms: self.start_time_ms,
|
||||
};
|
||||
Ok(vec![node_info])
|
||||
}
|
||||
|
||||
async fn procedures(&self) -> std::result::Result<Vec<(String, ProcedureInfo)>, Self::Error> {
|
||||
self.procedure_manager
|
||||
.list_procedures()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.map(|procedures| {
|
||||
procedures
|
||||
.into_iter()
|
||||
.map(|procedure| {
|
||||
let status = procedure.state.as_str_name().to_string();
|
||||
(status, procedure)
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
.context(catalog::error::ListProceduresSnafu)
|
||||
}
|
||||
|
||||
async fn region_stats(&self) -> std::result::Result<Vec<RegionStat>, Self::Error> {
|
||||
let stats = self
|
||||
.region_server
|
||||
.reportable_regions()
|
||||
.into_iter()
|
||||
.map(|stat| {
|
||||
let region_stat = self
|
||||
.region_server
|
||||
.region_statistic(stat.region_id)
|
||||
.unwrap_or_default();
|
||||
RegionStat {
|
||||
id: stat.region_id,
|
||||
rcus: 0,
|
||||
wcus: 0,
|
||||
approximate_bytes: region_stat.estimated_disk_size(),
|
||||
engine: stat.engine,
|
||||
role: RegionRole::from(stat.role).into(),
|
||||
num_rows: region_stat.num_rows,
|
||||
memtable_size: region_stat.memtable_size,
|
||||
manifest_size: region_stat.manifest_size,
|
||||
sst_size: region_stat.sst_size,
|
||||
index_size: region_stat.index_size,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
Ok(stats)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::default::Default;
|
||||
@@ -668,7 +775,7 @@ mod tests {
|
||||
};
|
||||
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_frontend_plugins(&mut plugins, &fe_opts)
|
||||
plugins::setup_frontend_plugins(&mut plugins, &[], &fe_opts)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ use common_config::Configurable;
|
||||
use common_grpc::channel_manager::{
|
||||
DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE, DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
|
||||
};
|
||||
use common_telemetry::logging::{LoggingOptions, DEFAULT_OTLP_ENDPOINT};
|
||||
use common_telemetry::logging::{LoggingOptions, SlowQueryOptions, DEFAULT_OTLP_ENDPOINT};
|
||||
use common_wal::config::raft_engine::RaftEngineConfig;
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
|
||||
@@ -159,8 +159,20 @@ fn test_load_metasrv_example_config() {
|
||||
level: Some("info".to_string()),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
slow_query: SlowQueryOptions {
|
||||
enable: false,
|
||||
threshold: Some(Duration::from_secs(10)),
|
||||
sample_ratio: Some(1.0),
|
||||
},
|
||||
..Default::default()
|
||||
},
|
||||
datanode: meta_srv::metasrv::DatanodeOptions {
|
||||
client: meta_srv::metasrv::DatanodeClientOptions {
|
||||
timeout: Duration::from_secs(10),
|
||||
connect_timeout: Duration::from_secs(10),
|
||||
tcp_nodelay: true,
|
||||
},
|
||||
},
|
||||
export_metrics: ExportMetricsOption {
|
||||
self_import: Some(Default::default()),
|
||||
remote_write: Some(Default::default()),
|
||||
|
||||
@@ -38,6 +38,18 @@ impl Plugins {
|
||||
self.read().get::<T>().cloned()
|
||||
}
|
||||
|
||||
pub fn get_or_insert<T, F>(&self, f: F) -> T
|
||||
where
|
||||
T: 'static + Send + Sync + Clone,
|
||||
F: FnOnce() -> T,
|
||||
{
|
||||
let mut binding = self.write();
|
||||
if !binding.contains::<T>() {
|
||||
binding.insert(f());
|
||||
}
|
||||
binding.get::<T>().cloned().unwrap()
|
||||
}
|
||||
|
||||
pub fn map_mut<T: 'static + Send + Sync, F, R>(&self, mapper: F) -> R
|
||||
where
|
||||
F: FnOnce(Option<&mut T>) -> R,
|
||||
|
||||
@@ -46,8 +46,9 @@ impl From<String> for SecretString {
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrapper type for values that contains secrets, which attempts to limit
|
||||
/// accidental exposure and ensure secrets are wiped from memory when dropped.
|
||||
/// Wrapper type for values that contains secrets.
|
||||
///
|
||||
/// It attempts to limit accidental exposure and ensure secrets are wiped from memory when dropped.
|
||||
/// (e.g. passwords, cryptographic keys, access tokens or other credentials)
|
||||
///
|
||||
/// Access to the secret inner value occurs through the [`ExposeSecret`]
|
||||
|
||||
@@ -103,14 +103,15 @@ pub const INFORMATION_SCHEMA_PROCEDURE_INFO_TABLE_ID: u32 = 34;
|
||||
/// id for information_schema.region_statistics
|
||||
pub const INFORMATION_SCHEMA_REGION_STATISTICS_TABLE_ID: u32 = 35;
|
||||
|
||||
/// ----- End of information_schema tables -----
|
||||
// ----- End of information_schema tables -----
|
||||
|
||||
/// ----- Begin of pg_catalog tables -----
|
||||
pub const PG_CATALOG_PG_CLASS_TABLE_ID: u32 = 256;
|
||||
pub const PG_CATALOG_PG_TYPE_TABLE_ID: u32 = 257;
|
||||
pub const PG_CATALOG_PG_NAMESPACE_TABLE_ID: u32 = 258;
|
||||
|
||||
/// ----- End of pg_catalog tables -----
|
||||
// ----- End of pg_catalog tables -----
|
||||
|
||||
pub const MITO_ENGINE: &str = "mito";
|
||||
pub const MITO2_ENGINE: &str = "mito2";
|
||||
pub const METRIC_ENGINE: &str = "metric";
|
||||
|
||||
@@ -9,7 +9,7 @@ workspace = true
|
||||
|
||||
[features]
|
||||
default = ["geo"]
|
||||
geo = ["geohash", "h3o"]
|
||||
geo = ["geohash", "h3o", "s2"]
|
||||
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
@@ -35,6 +35,7 @@ num = "0.4"
|
||||
num-traits = "0.2"
|
||||
once_cell.workspace = true
|
||||
paste = "1.0"
|
||||
s2 = { version = "0.0.12", optional = true }
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
session.workspace = true
|
||||
|
||||
@@ -91,5 +91,12 @@ impl AggregateFunctions {
|
||||
register_aggr_func!("argmin", 1, ArgminAccumulatorCreator);
|
||||
register_aggr_func!("scipystatsnormcdf", 2, ScipyStatsNormCdfAccumulatorCreator);
|
||||
register_aggr_func!("scipystatsnormpdf", 2, ScipyStatsNormPdfAccumulatorCreator);
|
||||
|
||||
#[cfg(feature = "geo")]
|
||||
register_aggr_func!(
|
||||
"json_encode_path",
|
||||
3,
|
||||
super::geo::encoding::JsonPathEncodeFunctionCreator
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,10 @@ use std::cmp::Ordering;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
|
||||
use common_query::error::{BadAccumulatorImplSnafu, CreateAccumulatorSnafu, Result};
|
||||
use common_query::error::{
|
||||
BadAccumulatorImplSnafu, CreateAccumulatorSnafu, InvalidInputStateSnafu, Result,
|
||||
};
|
||||
use common_query::logical_plan::accumulator::AggrFuncTypeStore;
|
||||
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
|
||||
use common_query::prelude::*;
|
||||
use datatypes::prelude::*;
|
||||
|
||||
@@ -16,7 +16,10 @@ use std::cmp::Ordering;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
|
||||
use common_query::error::{BadAccumulatorImplSnafu, CreateAccumulatorSnafu, Result};
|
||||
use common_query::error::{
|
||||
BadAccumulatorImplSnafu, CreateAccumulatorSnafu, InvalidInputStateSnafu, Result,
|
||||
};
|
||||
use common_query::logical_plan::accumulator::AggrFuncTypeStore;
|
||||
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
|
||||
use common_query::prelude::*;
|
||||
use datatypes::prelude::*;
|
||||
|
||||
@@ -17,8 +17,10 @@ use std::sync::Arc;
|
||||
|
||||
use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
|
||||
use common_query::error::{
|
||||
CreateAccumulatorSnafu, DowncastVectorSnafu, FromScalarValueSnafu, Result,
|
||||
CreateAccumulatorSnafu, DowncastVectorSnafu, FromScalarValueSnafu, InvalidInputStateSnafu,
|
||||
Result,
|
||||
};
|
||||
use common_query::logical_plan::accumulator::AggrFuncTypeStore;
|
||||
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
|
||||
use common_query::prelude::*;
|
||||
use datatypes::prelude::*;
|
||||
|
||||
@@ -17,8 +17,10 @@ use std::sync::Arc;
|
||||
|
||||
use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
|
||||
use common_query::error::{
|
||||
BadAccumulatorImplSnafu, CreateAccumulatorSnafu, DowncastVectorSnafu, Result,
|
||||
BadAccumulatorImplSnafu, CreateAccumulatorSnafu, DowncastVectorSnafu, InvalidInputStateSnafu,
|
||||
Result,
|
||||
};
|
||||
use common_query::logical_plan::accumulator::AggrFuncTypeStore;
|
||||
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
|
||||
use common_query::prelude::*;
|
||||
use datatypes::prelude::*;
|
||||
|
||||
@@ -18,8 +18,9 @@ use std::sync::Arc;
|
||||
use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
|
||||
use common_query::error::{
|
||||
self, BadAccumulatorImplSnafu, CreateAccumulatorSnafu, DowncastVectorSnafu,
|
||||
FromScalarValueSnafu, InvalidInputColSnafu, Result,
|
||||
FromScalarValueSnafu, InvalidInputColSnafu, InvalidInputStateSnafu, Result,
|
||||
};
|
||||
use common_query::logical_plan::accumulator::AggrFuncTypeStore;
|
||||
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
|
||||
use common_query::prelude::*;
|
||||
use datatypes::prelude::*;
|
||||
|
||||
@@ -17,8 +17,10 @@ use std::sync::Arc;
|
||||
use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
|
||||
use common_query::error::{
|
||||
self, BadAccumulatorImplSnafu, CreateAccumulatorSnafu, DowncastVectorSnafu,
|
||||
FromScalarValueSnafu, GenerateFunctionSnafu, InvalidInputColSnafu, Result,
|
||||
FromScalarValueSnafu, GenerateFunctionSnafu, InvalidInputColSnafu, InvalidInputStateSnafu,
|
||||
Result,
|
||||
};
|
||||
use common_query::logical_plan::accumulator::AggrFuncTypeStore;
|
||||
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
|
||||
use common_query::prelude::*;
|
||||
use datatypes::prelude::*;
|
||||
|
||||
@@ -17,8 +17,10 @@ use std::sync::Arc;
|
||||
use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
|
||||
use common_query::error::{
|
||||
self, BadAccumulatorImplSnafu, CreateAccumulatorSnafu, DowncastVectorSnafu,
|
||||
FromScalarValueSnafu, GenerateFunctionSnafu, InvalidInputColSnafu, Result,
|
||||
FromScalarValueSnafu, GenerateFunctionSnafu, InvalidInputColSnafu, InvalidInputStateSnafu,
|
||||
Result,
|
||||
};
|
||||
use common_query::logical_plan::accumulator::AggrFuncTypeStore;
|
||||
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
|
||||
use common_query::prelude::*;
|
||||
use datatypes::prelude::*;
|
||||
|
||||
@@ -14,18 +14,19 @@
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
|
||||
use common_query::error::{ArrowComputeSnafu, IntoVectorSnafu, InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::Signature;
|
||||
use datatypes::data_type::DataType;
|
||||
use datatypes::arrow::compute::kernels::numeric;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::value::ValueRef;
|
||||
use datatypes::vectors::VectorRef;
|
||||
use snafu::ensure;
|
||||
use datatypes::vectors::{Helper, VectorRef};
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::helper;
|
||||
|
||||
/// A function adds an interval value to Timestamp, Date or DateTime, and return the result.
|
||||
/// A function adds an interval value to Timestamp, Date, and return the result.
|
||||
/// The implementation of datetime type is based on Date64 which is incorrect so this function
|
||||
/// doesn't support the datetime type.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct DateAddFunction;
|
||||
|
||||
@@ -44,7 +45,6 @@ impl Function for DateAddFunction {
|
||||
helper::one_of_sigs2(
|
||||
vec![
|
||||
ConcreteDataType::date_datatype(),
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
@@ -69,64 +69,14 @@ impl Function for DateAddFunction {
|
||||
}
|
||||
);
|
||||
|
||||
let left = &columns[0];
|
||||
let right = &columns[1];
|
||||
let left = columns[0].to_arrow_array();
|
||||
let right = columns[1].to_arrow_array();
|
||||
|
||||
let size = left.len();
|
||||
let left_datatype = columns[0].data_type();
|
||||
match left_datatype {
|
||||
ConcreteDataType::Timestamp(_) => {
|
||||
let mut result = left_datatype.create_mutable_vector(size);
|
||||
for i in 0..size {
|
||||
let ts = left.get(i).as_timestamp();
|
||||
let interval = right.get(i).as_interval();
|
||||
|
||||
let new_ts = match (ts, interval) {
|
||||
(Some(ts), Some(interval)) => ts.add_interval(interval),
|
||||
_ => ts,
|
||||
};
|
||||
|
||||
result.push_value_ref(ValueRef::from(new_ts));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
ConcreteDataType::Date(_) => {
|
||||
let mut result = left_datatype.create_mutable_vector(size);
|
||||
for i in 0..size {
|
||||
let date = left.get(i).as_date();
|
||||
let interval = right.get(i).as_interval();
|
||||
let new_date = match (date, interval) {
|
||||
(Some(date), Some(interval)) => date.add_interval(interval),
|
||||
_ => date,
|
||||
};
|
||||
|
||||
result.push_value_ref(ValueRef::from(new_date));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
ConcreteDataType::DateTime(_) => {
|
||||
let mut result = left_datatype.create_mutable_vector(size);
|
||||
for i in 0..size {
|
||||
let datetime = left.get(i).as_datetime();
|
||||
let interval = right.get(i).as_interval();
|
||||
let new_datetime = match (datetime, interval) {
|
||||
(Some(datetime), Some(interval)) => datetime.add_interval(interval),
|
||||
_ => datetime,
|
||||
};
|
||||
|
||||
result.push_value_ref(ValueRef::from(new_datetime));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
_ => UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
let result = numeric::add(&left, &right).context(ArrowComputeSnafu)?;
|
||||
let arrow_type = result.data_type().clone();
|
||||
Helper::try_into_vector(result).context(IntoVectorSnafu {
|
||||
data_type: arrow_type,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -144,8 +94,7 @@ mod tests {
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
DateTimeVector, DateVector, IntervalDayTimeVector, IntervalYearMonthVector,
|
||||
TimestampSecondVector,
|
||||
DateVector, IntervalDayTimeVector, IntervalYearMonthVector, TimestampSecondVector,
|
||||
};
|
||||
|
||||
use super::{DateAddFunction, *};
|
||||
@@ -168,16 +117,15 @@ mod tests {
|
||||
ConcreteDataType::date_datatype(),
|
||||
f.return_type(&[ConcreteDataType::date_datatype()]).unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
f.return_type(&[ConcreteDataType::datetime_datatype()])
|
||||
.unwrap()
|
||||
);
|
||||
assert!(matches!(f.signature(),
|
||||
assert!(
|
||||
matches!(f.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::OneOf(sigs),
|
||||
volatility: Volatility::Immutable
|
||||
} if sigs.len() == 18));
|
||||
} if sigs.len() == 15),
|
||||
"{:?}",
|
||||
f.signature()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -243,36 +191,4 @@ mod tests {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_datetime_date_add() {
|
||||
let f = DateAddFunction;
|
||||
|
||||
let dates = vec![Some(123), None, Some(42), None];
|
||||
// Intervals in months
|
||||
let intervals = vec![1, 2, 3, 1];
|
||||
let results = [Some(2678400123), None, Some(7776000042), None];
|
||||
|
||||
let date_vector = DateTimeVector::from(dates.clone());
|
||||
let interval_vector = IntervalYearMonthVector::from_vec(intervals);
|
||||
let args: Vec<VectorRef> = vec![Arc::new(date_vector), Arc::new(interval_vector)];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
|
||||
assert_eq!(4, vector.len());
|
||||
for (i, _t) in dates.iter().enumerate() {
|
||||
let v = vector.get(i);
|
||||
let result = results.get(i).unwrap();
|
||||
|
||||
if result.is_none() {
|
||||
assert_eq!(Value::Null, v);
|
||||
continue;
|
||||
}
|
||||
match v {
|
||||
Value::DateTime(date) => {
|
||||
assert_eq!(date.val(), result.unwrap());
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,18 +14,19 @@
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
|
||||
use common_query::error::{ArrowComputeSnafu, IntoVectorSnafu, InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::Signature;
|
||||
use datatypes::data_type::DataType;
|
||||
use datatypes::arrow::compute::kernels::numeric;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::value::ValueRef;
|
||||
use datatypes::vectors::VectorRef;
|
||||
use snafu::ensure;
|
||||
use datatypes::vectors::{Helper, VectorRef};
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::helper;
|
||||
|
||||
/// A function subtracts an interval value to Timestamp, Date or DateTime, and return the result.
|
||||
/// A function subtracts an interval value to Timestamp, Date, and return the result.
|
||||
/// The implementation of datetime type is based on Date64 which is incorrect so this function
|
||||
/// doesn't support the datetime type.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct DateSubFunction;
|
||||
|
||||
@@ -44,7 +45,6 @@ impl Function for DateSubFunction {
|
||||
helper::one_of_sigs2(
|
||||
vec![
|
||||
ConcreteDataType::date_datatype(),
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
@@ -69,65 +69,14 @@ impl Function for DateSubFunction {
|
||||
}
|
||||
);
|
||||
|
||||
let left = &columns[0];
|
||||
let right = &columns[1];
|
||||
let left = columns[0].to_arrow_array();
|
||||
let right = columns[1].to_arrow_array();
|
||||
|
||||
let size = left.len();
|
||||
let left_datatype = columns[0].data_type();
|
||||
|
||||
match left_datatype {
|
||||
ConcreteDataType::Timestamp(_) => {
|
||||
let mut result = left_datatype.create_mutable_vector(size);
|
||||
for i in 0..size {
|
||||
let ts = left.get(i).as_timestamp();
|
||||
let interval = right.get(i).as_interval();
|
||||
|
||||
let new_ts = match (ts, interval) {
|
||||
(Some(ts), Some(interval)) => ts.sub_interval(interval),
|
||||
_ => ts,
|
||||
};
|
||||
|
||||
result.push_value_ref(ValueRef::from(new_ts));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
ConcreteDataType::Date(_) => {
|
||||
let mut result = left_datatype.create_mutable_vector(size);
|
||||
for i in 0..size {
|
||||
let date = left.get(i).as_date();
|
||||
let interval = right.get(i).as_interval();
|
||||
let new_date = match (date, interval) {
|
||||
(Some(date), Some(interval)) => date.sub_interval(interval),
|
||||
_ => date,
|
||||
};
|
||||
|
||||
result.push_value_ref(ValueRef::from(new_date));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
ConcreteDataType::DateTime(_) => {
|
||||
let mut result = left_datatype.create_mutable_vector(size);
|
||||
for i in 0..size {
|
||||
let datetime = left.get(i).as_datetime();
|
||||
let interval = right.get(i).as_interval();
|
||||
let new_datetime = match (datetime, interval) {
|
||||
(Some(datetime), Some(interval)) => datetime.sub_interval(interval),
|
||||
_ => datetime,
|
||||
};
|
||||
|
||||
result.push_value_ref(ValueRef::from(new_datetime));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
_ => UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
let result = numeric::sub(&left, &right).context(ArrowComputeSnafu)?;
|
||||
let arrow_type = result.data_type().clone();
|
||||
Helper::try_into_vector(result).context(IntoVectorSnafu {
|
||||
data_type: arrow_type,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -145,8 +94,7 @@ mod tests {
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
DateTimeVector, DateVector, IntervalDayTimeVector, IntervalYearMonthVector,
|
||||
TimestampSecondVector,
|
||||
DateVector, IntervalDayTimeVector, IntervalYearMonthVector, TimestampSecondVector,
|
||||
};
|
||||
|
||||
use super::{DateSubFunction, *};
|
||||
@@ -174,11 +122,15 @@ mod tests {
|
||||
f.return_type(&[ConcreteDataType::datetime_datatype()])
|
||||
.unwrap()
|
||||
);
|
||||
assert!(matches!(f.signature(),
|
||||
assert!(
|
||||
matches!(f.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::OneOf(sigs),
|
||||
volatility: Volatility::Immutable
|
||||
} if sigs.len() == 18));
|
||||
} if sigs.len() == 15),
|
||||
"{:?}",
|
||||
f.signature()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -250,42 +202,4 @@ mod tests {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_datetime_date_sub() {
|
||||
let f = DateSubFunction;
|
||||
let millis_per_month = 3600 * 24 * 30 * 1000;
|
||||
|
||||
let dates = vec![
|
||||
Some(123 * millis_per_month),
|
||||
None,
|
||||
Some(42 * millis_per_month),
|
||||
None,
|
||||
];
|
||||
// Intervals in months
|
||||
let intervals = vec![1, 2, 3, 1];
|
||||
let results = [Some(316137600000), None, Some(100915200000), None];
|
||||
|
||||
let date_vector = DateTimeVector::from(dates.clone());
|
||||
let interval_vector = IntervalYearMonthVector::from_vec(intervals);
|
||||
let args: Vec<VectorRef> = vec![Arc::new(date_vector), Arc::new(interval_vector)];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
|
||||
assert_eq!(4, vector.len());
|
||||
for (i, _t) in dates.iter().enumerate() {
|
||||
let v = vector.get(i);
|
||||
let result = results.get(i).unwrap();
|
||||
|
||||
if result.is_none() {
|
||||
assert_eq!(Value::Null, v);
|
||||
continue;
|
||||
}
|
||||
match v {
|
||||
Value::DateTime(date) => {
|
||||
assert_eq!(date.val(), result.unwrap());
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,10 +13,11 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
pub(crate) mod encoding;
|
||||
mod geohash;
|
||||
mod h3;
|
||||
|
||||
use geohash::{GeohashFunction, GeohashNeighboursFunction};
|
||||
mod helpers;
|
||||
mod s2;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
@@ -25,20 +26,39 @@ pub(crate) struct GeoFunctions;
|
||||
impl GeoFunctions {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
// geohash
|
||||
registry.register(Arc::new(GeohashFunction));
|
||||
registry.register(Arc::new(GeohashNeighboursFunction));
|
||||
// h3 family
|
||||
registry.register(Arc::new(geohash::GeohashFunction));
|
||||
registry.register(Arc::new(geohash::GeohashNeighboursFunction));
|
||||
|
||||
// h3 index
|
||||
registry.register(Arc::new(h3::H3LatLngToCell));
|
||||
registry.register(Arc::new(h3::H3LatLngToCellString));
|
||||
|
||||
// h3 index inspection
|
||||
registry.register(Arc::new(h3::H3CellBase));
|
||||
registry.register(Arc::new(h3::H3CellCenterChild));
|
||||
registry.register(Arc::new(h3::H3CellCenterLat));
|
||||
registry.register(Arc::new(h3::H3CellCenterLng));
|
||||
registry.register(Arc::new(h3::H3CellIsPentagon));
|
||||
registry.register(Arc::new(h3::H3CellParent));
|
||||
registry.register(Arc::new(h3::H3CellResolution));
|
||||
registry.register(Arc::new(h3::H3CellToString));
|
||||
registry.register(Arc::new(h3::H3IsNeighbour));
|
||||
registry.register(Arc::new(h3::H3StringToCell));
|
||||
registry.register(Arc::new(h3::H3CellToString));
|
||||
registry.register(Arc::new(h3::H3CellCenterLatLng));
|
||||
registry.register(Arc::new(h3::H3CellResolution));
|
||||
|
||||
// h3 hierarchical grid
|
||||
registry.register(Arc::new(h3::H3CellCenterChild));
|
||||
registry.register(Arc::new(h3::H3CellParent));
|
||||
registry.register(Arc::new(h3::H3CellToChildren));
|
||||
registry.register(Arc::new(h3::H3CellToChildrenSize));
|
||||
registry.register(Arc::new(h3::H3CellToChildPos));
|
||||
registry.register(Arc::new(h3::H3ChildPosToCell));
|
||||
|
||||
// h3 grid traversal
|
||||
registry.register(Arc::new(h3::H3GridDisk));
|
||||
registry.register(Arc::new(h3::H3GridDiskDistances));
|
||||
registry.register(Arc::new(h3::H3GridDistance));
|
||||
registry.register(Arc::new(h3::H3GridPathCells));
|
||||
|
||||
// s2
|
||||
registry.register(Arc::new(s2::S2LatLngToCell));
|
||||
registry.register(Arc::new(s2::S2CellLevel));
|
||||
registry.register(Arc::new(s2::S2CellToToken));
|
||||
registry.register(Arc::new(s2::S2CellParent));
|
||||
}
|
||||
}
|
||||
|
||||
223
src/common/function/src/scalars/geo/encoding.rs
Normal file
223
src/common/function/src/scalars/geo/encoding.rs
Normal file
@@ -0,0 +1,223 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_error::ext::{BoxedError, PlainError};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
|
||||
use common_query::error::{self, InvalidInputStateSnafu, Result};
|
||||
use common_query::logical_plan::accumulator::AggrFuncTypeStore;
|
||||
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
|
||||
use common_query::prelude::AccumulatorCreatorFunction;
|
||||
use common_time::Timestamp;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::value::{ListValue, Value};
|
||||
use datatypes::vectors::VectorRef;
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use super::helpers::{ensure_columns_len, ensure_columns_n};
|
||||
|
||||
/// Accumulator of lat, lng, timestamp tuples
|
||||
#[derive(Debug)]
|
||||
pub struct JsonPathAccumulator {
|
||||
timestamp_type: ConcreteDataType,
|
||||
lat: Vec<Option<f64>>,
|
||||
lng: Vec<Option<f64>>,
|
||||
timestamp: Vec<Option<Timestamp>>,
|
||||
}
|
||||
|
||||
impl JsonPathAccumulator {
|
||||
fn new(timestamp_type: ConcreteDataType) -> Self {
|
||||
Self {
|
||||
lat: Vec::default(),
|
||||
lng: Vec::default(),
|
||||
timestamp: Vec::default(),
|
||||
timestamp_type,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Accumulator for JsonPathAccumulator {
|
||||
fn state(&self) -> Result<Vec<Value>> {
|
||||
Ok(vec![
|
||||
Value::List(ListValue::new(
|
||||
self.lat.iter().map(|i| Value::from(*i)).collect(),
|
||||
ConcreteDataType::float64_datatype(),
|
||||
)),
|
||||
Value::List(ListValue::new(
|
||||
self.lng.iter().map(|i| Value::from(*i)).collect(),
|
||||
ConcreteDataType::float64_datatype(),
|
||||
)),
|
||||
Value::List(ListValue::new(
|
||||
self.timestamp.iter().map(|i| Value::from(*i)).collect(),
|
||||
self.timestamp_type.clone(),
|
||||
)),
|
||||
])
|
||||
}
|
||||
|
||||
fn update_batch(&mut self, columns: &[VectorRef]) -> Result<()> {
|
||||
// update batch as in datafusion just provides the accumulator original
|
||||
// input.
|
||||
//
|
||||
// columns is vec of [`lat`, `lng`, `timestamp`]
|
||||
// where
|
||||
// - `lat` is a vector of `Value::Float64` or similar type. Each item in
|
||||
// the vector is a row in given dataset.
|
||||
// - so on so forth for `lng` and `timestamp`
|
||||
ensure_columns_n!(columns, 3);
|
||||
|
||||
let lat = &columns[0];
|
||||
let lng = &columns[1];
|
||||
let ts = &columns[2];
|
||||
|
||||
let size = lat.len();
|
||||
|
||||
for idx in 0..size {
|
||||
self.lat.push(lat.get(idx).as_f64_lossy());
|
||||
self.lng.push(lng.get(idx).as_f64_lossy());
|
||||
self.timestamp.push(ts.get(idx).as_timestamp());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn merge_batch(&mut self, states: &[VectorRef]) -> Result<()> {
|
||||
// merge batch as in datafusion gives state accumulated from the data
|
||||
// returned from child accumulators' state() call
|
||||
// In our particular implementation, the data structure is like
|
||||
//
|
||||
// states is vec of [`lat`, `lng`, `timestamp`]
|
||||
// where
|
||||
// - `lat` is a vector of `Value::List`. Each item in the list is all
|
||||
// coordinates from a child accumulator.
|
||||
// - so on so forth for `lng` and `timestamp`
|
||||
|
||||
ensure_columns_n!(states, 3);
|
||||
|
||||
let lat_lists = &states[0];
|
||||
let lng_lists = &states[1];
|
||||
let ts_lists = &states[2];
|
||||
|
||||
let len = lat_lists.len();
|
||||
|
||||
for idx in 0..len {
|
||||
if let Some(lat_list) = lat_lists
|
||||
.get(idx)
|
||||
.as_list()
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::ExecuteSnafu)?
|
||||
{
|
||||
for v in lat_list.items() {
|
||||
self.lat.push(v.as_f64_lossy());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(lng_list) = lng_lists
|
||||
.get(idx)
|
||||
.as_list()
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::ExecuteSnafu)?
|
||||
{
|
||||
for v in lng_list.items() {
|
||||
self.lng.push(v.as_f64_lossy());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(ts_list) = ts_lists
|
||||
.get(idx)
|
||||
.as_list()
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::ExecuteSnafu)?
|
||||
{
|
||||
for v in ts_list.items() {
|
||||
self.timestamp.push(v.as_timestamp());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn evaluate(&self) -> Result<Value> {
|
||||
let mut work_vec: Vec<(&Option<f64>, &Option<f64>, &Option<Timestamp>)> = self
|
||||
.lat
|
||||
.iter()
|
||||
.zip(self.lng.iter())
|
||||
.zip(self.timestamp.iter())
|
||||
.map(|((a, b), c)| (a, b, c))
|
||||
.collect();
|
||||
|
||||
// sort by timestamp, we treat null timestamp as 0
|
||||
work_vec.sort_unstable_by_key(|tuple| tuple.2.unwrap_or_else(|| Timestamp::new_second(0)));
|
||||
|
||||
let result = serde_json::to_string(
|
||||
&work_vec
|
||||
.into_iter()
|
||||
// note that we transform to lng,lat for geojson compatibility
|
||||
.map(|(lat, lng, _)| vec![lng, lat])
|
||||
.collect::<Vec<Vec<&Option<f64>>>>(),
|
||||
)
|
||||
.map_err(|e| {
|
||||
BoxedError::new(PlainError::new(
|
||||
format!("Serialization failure: {}", e),
|
||||
StatusCode::EngineExecuteQuery,
|
||||
))
|
||||
})
|
||||
.context(error::ExecuteSnafu)?;
|
||||
|
||||
Ok(Value::String(result.into()))
|
||||
}
|
||||
}
|
||||
|
||||
/// This function accept rows of lat, lng and timestamp, sort with timestamp and
|
||||
/// encoding them into a geojson-like path.
|
||||
///
|
||||
/// Example:
|
||||
///
|
||||
/// ```sql
|
||||
/// SELECT json_encode_path(lat, lon, timestamp) FROM table [group by ...];
|
||||
/// ```
|
||||
///
|
||||
#[as_aggr_func_creator]
|
||||
#[derive(Debug, Default, AggrFuncTypeStore)]
|
||||
pub struct JsonPathEncodeFunctionCreator {}
|
||||
|
||||
impl AggregateFunctionCreator for JsonPathEncodeFunctionCreator {
|
||||
fn creator(&self) -> AccumulatorCreatorFunction {
|
||||
let creator: AccumulatorCreatorFunction = Arc::new(move |types: &[ConcreteDataType]| {
|
||||
let ts_type = types[2].clone();
|
||||
Ok(Box::new(JsonPathAccumulator::new(ts_type)))
|
||||
});
|
||||
|
||||
creator
|
||||
}
|
||||
|
||||
fn output_type(&self) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::string_datatype())
|
||||
}
|
||||
|
||||
fn state_types(&self) -> Result<Vec<ConcreteDataType>> {
|
||||
let input_types = self.input_types()?;
|
||||
ensure!(input_types.len() == 3, InvalidInputStateSnafu);
|
||||
|
||||
let timestamp_type = input_types[2].clone();
|
||||
|
||||
Ok(vec![
|
||||
ConcreteDataType::list_datatype(ConcreteDataType::float64_datatype()),
|
||||
ConcreteDataType::list_datatype(ConcreteDataType::float64_datatype()),
|
||||
ConcreteDataType::list_datatype(timestamp_type),
|
||||
])
|
||||
}
|
||||
}
|
||||
@@ -16,22 +16,75 @@ use std::str::FromStr;
|
||||
|
||||
use common_error::ext::{BoxedError, PlainError};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_query::error::{self, InvalidFuncArgsSnafu, Result};
|
||||
use common_query::error::{self, Result};
|
||||
use common_query::prelude::{Signature, TypeSignature};
|
||||
use datafusion::logical_expr::Volatility;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::scalars::{Scalar, ScalarVectorBuilder};
|
||||
use datatypes::value::{ListValue, Value};
|
||||
use datatypes::vectors::{
|
||||
BooleanVectorBuilder, Float64VectorBuilder, MutableVector, StringVectorBuilder,
|
||||
UInt64VectorBuilder, UInt8VectorBuilder, VectorRef,
|
||||
BooleanVectorBuilder, Int32VectorBuilder, ListVectorBuilder, MutableVector,
|
||||
StringVectorBuilder, UInt64VectorBuilder, UInt8VectorBuilder, VectorRef,
|
||||
};
|
||||
use derive_more::Display;
|
||||
use h3o::{CellIndex, LatLng, Resolution};
|
||||
use snafu::{ensure, ResultExt};
|
||||
use once_cell::sync::Lazy;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use super::helpers::{ensure_and_coerce, ensure_columns_len, ensure_columns_n};
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
static CELL_TYPES: Lazy<Vec<ConcreteDataType>> = Lazy::new(|| {
|
||||
vec![
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
]
|
||||
});
|
||||
|
||||
static COORDINATE_TYPES: Lazy<Vec<ConcreteDataType>> = Lazy::new(|| {
|
||||
vec![
|
||||
ConcreteDataType::float32_datatype(),
|
||||
ConcreteDataType::float64_datatype(),
|
||||
]
|
||||
});
|
||||
static RESOLUTION_TYPES: Lazy<Vec<ConcreteDataType>> = Lazy::new(|| {
|
||||
vec![
|
||||
ConcreteDataType::int8_datatype(),
|
||||
ConcreteDataType::int16_datatype(),
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::uint8_datatype(),
|
||||
ConcreteDataType::uint16_datatype(),
|
||||
ConcreteDataType::uint32_datatype(),
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
]
|
||||
});
|
||||
static DISTANCE_TYPES: Lazy<Vec<ConcreteDataType>> = Lazy::new(|| {
|
||||
vec![
|
||||
ConcreteDataType::int8_datatype(),
|
||||
ConcreteDataType::int16_datatype(),
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::uint8_datatype(),
|
||||
ConcreteDataType::uint16_datatype(),
|
||||
ConcreteDataType::uint32_datatype(),
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
]
|
||||
});
|
||||
|
||||
static POSITION_TYPES: Lazy<Vec<ConcreteDataType>> = Lazy::new(|| {
|
||||
vec![
|
||||
ConcreteDataType::int8_datatype(),
|
||||
ConcreteDataType::int16_datatype(),
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::uint8_datatype(),
|
||||
ConcreteDataType::uint16_datatype(),
|
||||
ConcreteDataType::uint32_datatype(),
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
]
|
||||
});
|
||||
|
||||
/// Function that returns [h3] encoding cellid for a given geospatial coordinate.
|
||||
///
|
||||
/// [h3]: https://h3geo.org/
|
||||
@@ -50,20 +103,8 @@ impl Function for H3LatLngToCell {
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
let mut signatures = Vec::new();
|
||||
for coord_type in &[
|
||||
ConcreteDataType::float32_datatype(),
|
||||
ConcreteDataType::float64_datatype(),
|
||||
] {
|
||||
for resolution_type in &[
|
||||
ConcreteDataType::int8_datatype(),
|
||||
ConcreteDataType::int16_datatype(),
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::uint8_datatype(),
|
||||
ConcreteDataType::uint16_datatype(),
|
||||
ConcreteDataType::uint32_datatype(),
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
] {
|
||||
for coord_type in COORDINATE_TYPES.as_slice() {
|
||||
for resolution_type in RESOLUTION_TYPES.as_slice() {
|
||||
signatures.push(TypeSignature::Exact(vec![
|
||||
// latitude
|
||||
coord_type.clone(),
|
||||
@@ -78,15 +119,7 @@ impl Function for H3LatLngToCell {
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 3,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 3, provided : {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
ensure_columns_n!(columns, 3);
|
||||
|
||||
let lat_vec = &columns[0];
|
||||
let lon_vec = &columns[1];
|
||||
@@ -142,20 +175,8 @@ impl Function for H3LatLngToCellString {
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
let mut signatures = Vec::new();
|
||||
for coord_type in &[
|
||||
ConcreteDataType::float32_datatype(),
|
||||
ConcreteDataType::float64_datatype(),
|
||||
] {
|
||||
for resolution_type in &[
|
||||
ConcreteDataType::int8_datatype(),
|
||||
ConcreteDataType::int16_datatype(),
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::uint8_datatype(),
|
||||
ConcreteDataType::uint16_datatype(),
|
||||
ConcreteDataType::uint32_datatype(),
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
] {
|
||||
for coord_type in COORDINATE_TYPES.as_slice() {
|
||||
for resolution_type in RESOLUTION_TYPES.as_slice() {
|
||||
signatures.push(TypeSignature::Exact(vec![
|
||||
// latitude
|
||||
coord_type.clone(),
|
||||
@@ -170,15 +191,7 @@ impl Function for H3LatLngToCellString {
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 3,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 3, provided : {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
ensure_columns_n!(columns, 3);
|
||||
|
||||
let lat_vec = &columns[0];
|
||||
let lon_vec = &columns[1];
|
||||
@@ -234,15 +247,7 @@ impl Function for H3CellToString {
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 1, provided : {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
ensure_columns_n!(columns, 1);
|
||||
|
||||
let cell_vec = &columns[0];
|
||||
let size = cell_vec.len();
|
||||
@@ -280,15 +285,7 @@ impl Function for H3StringToCell {
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 1, provided : {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
ensure_columns_n!(columns, 1);
|
||||
|
||||
let string_vec = &columns[0];
|
||||
let size = string_vec.len();
|
||||
@@ -319,18 +316,20 @@ impl Function for H3StringToCell {
|
||||
}
|
||||
}
|
||||
|
||||
/// Function that returns centroid latitude of given cell id
|
||||
/// Function that returns centroid latitude and longitude of given cell id
|
||||
#[derive(Clone, Debug, Default, Display)]
|
||||
#[display("{}", self.name())]
|
||||
pub struct H3CellCenterLat;
|
||||
pub struct H3CellCenterLatLng;
|
||||
|
||||
impl Function for H3CellCenterLat {
|
||||
impl Function for H3CellCenterLatLng {
|
||||
fn name(&self) -> &str {
|
||||
"h3_cell_center_lat"
|
||||
"h3_cell_center_latlng"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::float64_datatype())
|
||||
Ok(ConcreteDataType::list_datatype(
|
||||
ConcreteDataType::float64_datatype(),
|
||||
))
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
@@ -338,69 +337,26 @@ impl Function for H3CellCenterLat {
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 1, provided : {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
ensure_columns_n!(columns, 1);
|
||||
|
||||
let cell_vec = &columns[0];
|
||||
let size = cell_vec.len();
|
||||
let mut results = Float64VectorBuilder::with_capacity(size);
|
||||
let mut results =
|
||||
ListVectorBuilder::with_type_capacity(ConcreteDataType::float64_datatype(), size);
|
||||
|
||||
for i in 0..size {
|
||||
let cell = cell_from_value(cell_vec.get(i))?;
|
||||
let lat = cell.map(|cell| LatLng::from(cell).lat());
|
||||
let latlng = cell.map(LatLng::from);
|
||||
|
||||
results.push(lat);
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
/// Function that returns centroid longitude of given cell id
|
||||
#[derive(Clone, Debug, Default, Display)]
|
||||
#[display("{}", self.name())]
|
||||
pub struct H3CellCenterLng;
|
||||
|
||||
impl Function for H3CellCenterLng {
|
||||
fn name(&self) -> &str {
|
||||
"h3_cell_center_lng"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::float64_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
signature_of_cell()
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 1, provided : {}",
|
||||
columns.len()
|
||||
),
|
||||
if let Some(latlng) = latlng {
|
||||
let result = ListValue::new(
|
||||
vec![latlng.lat().into(), latlng.lng().into()],
|
||||
ConcreteDataType::float64_datatype(),
|
||||
);
|
||||
results.push(Some(result.as_scalar_ref()));
|
||||
} else {
|
||||
results.push(None);
|
||||
}
|
||||
);
|
||||
|
||||
let cell_vec = &columns[0];
|
||||
let size = cell_vec.len();
|
||||
let mut results = Float64VectorBuilder::with_capacity(size);
|
||||
|
||||
for i in 0..size {
|
||||
let cell = cell_from_value(cell_vec.get(i))?;
|
||||
let lat = cell.map(|cell| LatLng::from(cell).lng());
|
||||
|
||||
results.push(lat);
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
@@ -426,15 +382,7 @@ impl Function for H3CellResolution {
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 1, provided : {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
ensure_columns_n!(columns, 1);
|
||||
|
||||
let cell_vec = &columns[0];
|
||||
let size = cell_vec.len();
|
||||
@@ -470,15 +418,7 @@ impl Function for H3CellBase {
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 1, provided : {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
ensure_columns_n!(columns, 1);
|
||||
|
||||
let cell_vec = &columns[0];
|
||||
let size = cell_vec.len();
|
||||
@@ -514,15 +454,7 @@ impl Function for H3CellIsPentagon {
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 1, provided : {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
ensure_columns_n!(columns, 1);
|
||||
|
||||
let cell_vec = &columns[0];
|
||||
let size = cell_vec.len();
|
||||
@@ -558,15 +490,7 @@ impl Function for H3CellCenterChild {
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 2, provided : {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
ensure_columns_n!(columns, 2);
|
||||
|
||||
let cell_vec = &columns[0];
|
||||
let res_vec = &columns[1];
|
||||
@@ -606,15 +530,7 @@ impl Function for H3CellParent {
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 2, provided : {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
ensure_columns_n!(columns, 2);
|
||||
|
||||
let cell_vec = &columns[0];
|
||||
let res_vec = &columns[1];
|
||||
@@ -633,48 +549,323 @@ impl Function for H3CellParent {
|
||||
}
|
||||
}
|
||||
|
||||
/// Function that checks if two cells are neighbour
|
||||
/// Function that returns children cell list
|
||||
#[derive(Clone, Debug, Default, Display)]
|
||||
#[display("{}", self.name())]
|
||||
pub struct H3IsNeighbour;
|
||||
pub struct H3CellToChildren;
|
||||
|
||||
impl Function for H3IsNeighbour {
|
||||
impl Function for H3CellToChildren {
|
||||
fn name(&self) -> &str {
|
||||
"h3_is_neighbour"
|
||||
"h3_cell_to_children"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::boolean_datatype())
|
||||
Ok(ConcreteDataType::list_datatype(
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
))
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
signature_of_double_cell()
|
||||
signature_of_cell_and_resolution()
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 2, provided : {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
ensure_columns_n!(columns, 2);
|
||||
|
||||
let cell_vec = &columns[0];
|
||||
let cell2_vec = &columns[1];
|
||||
let res_vec = &columns[1];
|
||||
let size = cell_vec.len();
|
||||
let mut results = BooleanVectorBuilder::with_capacity(size);
|
||||
let mut results =
|
||||
ListVectorBuilder::with_type_capacity(ConcreteDataType::uint64_datatype(), size);
|
||||
|
||||
for i in 0..size {
|
||||
let cell = cell_from_value(cell_vec.get(i))?;
|
||||
let res = value_to_resolution(res_vec.get(i))?;
|
||||
let result = cell.map(|cell| {
|
||||
let children: Vec<Value> = cell
|
||||
.children(res)
|
||||
.map(|child| Value::from(u64::from(child)))
|
||||
.collect();
|
||||
ListValue::new(children, ConcreteDataType::uint64_datatype())
|
||||
});
|
||||
|
||||
if let Some(list_value) = result {
|
||||
results.push(Some(list_value.as_scalar_ref()));
|
||||
} else {
|
||||
results.push(None);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
/// Function that returns children cell count
|
||||
#[derive(Clone, Debug, Default, Display)]
|
||||
#[display("{}", self.name())]
|
||||
pub struct H3CellToChildrenSize;
|
||||
|
||||
impl Function for H3CellToChildrenSize {
|
||||
fn name(&self) -> &str {
|
||||
"h3_cell_to_children_size"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::uint64_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
signature_of_cell_and_resolution()
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure_columns_n!(columns, 2);
|
||||
|
||||
let cell_vec = &columns[0];
|
||||
let res_vec = &columns[1];
|
||||
let size = cell_vec.len();
|
||||
let mut results = UInt64VectorBuilder::with_capacity(size);
|
||||
|
||||
for i in 0..size {
|
||||
let cell = cell_from_value(cell_vec.get(i))?;
|
||||
let res = value_to_resolution(res_vec.get(i))?;
|
||||
let result = cell.map(|cell| cell.children_count(res));
|
||||
results.push(result);
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
/// Function that returns the cell position if its parent at given resolution
|
||||
#[derive(Clone, Debug, Default, Display)]
|
||||
#[display("{}", self.name())]
|
||||
pub struct H3CellToChildPos;
|
||||
|
||||
impl Function for H3CellToChildPos {
|
||||
fn name(&self) -> &str {
|
||||
"h3_cell_to_child_pos"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::uint64_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
signature_of_cell_and_resolution()
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure_columns_n!(columns, 2);
|
||||
|
||||
let cell_vec = &columns[0];
|
||||
let res_vec = &columns[1];
|
||||
let size = cell_vec.len();
|
||||
let mut results = UInt64VectorBuilder::with_capacity(size);
|
||||
|
||||
for i in 0..size {
|
||||
let cell = cell_from_value(cell_vec.get(i))?;
|
||||
let res = value_to_resolution(res_vec.get(i))?;
|
||||
let result = cell.and_then(|cell| cell.child_position(res));
|
||||
results.push(result);
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
/// Function that returns the cell at given position of the parent at given resolution
|
||||
#[derive(Clone, Debug, Default, Display)]
|
||||
#[display("{}", self.name())]
|
||||
pub struct H3ChildPosToCell;
|
||||
|
||||
impl Function for H3ChildPosToCell {
|
||||
fn name(&self) -> &str {
|
||||
"h3_child_pos_to_cell"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::uint64_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
let mut signatures =
|
||||
Vec::with_capacity(POSITION_TYPES.len() * CELL_TYPES.len() * RESOLUTION_TYPES.len());
|
||||
for position_type in POSITION_TYPES.as_slice() {
|
||||
for cell_type in CELL_TYPES.as_slice() {
|
||||
for resolution_type in RESOLUTION_TYPES.as_slice() {
|
||||
signatures.push(TypeSignature::Exact(vec![
|
||||
position_type.clone(),
|
||||
cell_type.clone(),
|
||||
resolution_type.clone(),
|
||||
]));
|
||||
}
|
||||
}
|
||||
}
|
||||
Signature::one_of(signatures, Volatility::Stable)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure_columns_n!(columns, 3);
|
||||
|
||||
let pos_vec = &columns[0];
|
||||
let cell_vec = &columns[1];
|
||||
let res_vec = &columns[2];
|
||||
let size = cell_vec.len();
|
||||
let mut results = UInt64VectorBuilder::with_capacity(size);
|
||||
|
||||
for i in 0..size {
|
||||
let cell = cell_from_value(cell_vec.get(i))?;
|
||||
let pos = value_to_position(pos_vec.get(i))?;
|
||||
let res = value_to_resolution(res_vec.get(i))?;
|
||||
let result = cell.and_then(|cell| cell.child_at(pos, res).map(u64::from));
|
||||
results.push(result);
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
/// Function that returns cells with k distances of given cell
|
||||
#[derive(Clone, Debug, Default, Display)]
|
||||
#[display("{}", self.name())]
|
||||
pub struct H3GridDisk;
|
||||
|
||||
impl Function for H3GridDisk {
|
||||
fn name(&self) -> &str {
|
||||
"h3_grid_disk"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::list_datatype(
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
))
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
signature_of_cell_and_distance()
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure_columns_n!(columns, 2);
|
||||
|
||||
let cell_vec = &columns[0];
|
||||
let k_vec = &columns[1];
|
||||
let size = cell_vec.len();
|
||||
let mut results =
|
||||
ListVectorBuilder::with_type_capacity(ConcreteDataType::uint64_datatype(), size);
|
||||
|
||||
for i in 0..size {
|
||||
let cell = cell_from_value(cell_vec.get(i))?;
|
||||
let k = value_to_distance(k_vec.get(i))?;
|
||||
|
||||
let result = cell.map(|cell| {
|
||||
let children: Vec<Value> = cell
|
||||
.grid_disk::<Vec<_>>(k)
|
||||
.into_iter()
|
||||
.map(|child| Value::from(u64::from(child)))
|
||||
.collect();
|
||||
ListValue::new(children, ConcreteDataType::uint64_datatype())
|
||||
});
|
||||
|
||||
if let Some(list_value) = result {
|
||||
results.push(Some(list_value.as_scalar_ref()));
|
||||
} else {
|
||||
results.push(None);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
/// Function that returns all cells within k distances of given cell
|
||||
#[derive(Clone, Debug, Default, Display)]
|
||||
#[display("{}", self.name())]
|
||||
pub struct H3GridDiskDistances;
|
||||
|
||||
impl Function for H3GridDiskDistances {
|
||||
fn name(&self) -> &str {
|
||||
"h3_grid_disk_distances"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::list_datatype(
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
))
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
signature_of_cell_and_distance()
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure_columns_n!(columns, 2);
|
||||
|
||||
let cell_vec = &columns[0];
|
||||
let k_vec = &columns[1];
|
||||
let size = cell_vec.len();
|
||||
let mut results =
|
||||
ListVectorBuilder::with_type_capacity(ConcreteDataType::uint64_datatype(), size);
|
||||
|
||||
for i in 0..size {
|
||||
let cell = cell_from_value(cell_vec.get(i))?;
|
||||
let k = value_to_distance(k_vec.get(i))?;
|
||||
|
||||
let result = cell.map(|cell| {
|
||||
let children: Vec<Value> = cell
|
||||
.grid_disk_distances::<Vec<_>>(k)
|
||||
.into_iter()
|
||||
.map(|(child, _distance)| Value::from(u64::from(child)))
|
||||
.collect();
|
||||
ListValue::new(children, ConcreteDataType::uint64_datatype())
|
||||
});
|
||||
|
||||
if let Some(list_value) = result {
|
||||
results.push(Some(list_value.as_scalar_ref()));
|
||||
} else {
|
||||
results.push(None);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
/// Function that returns distance between two cells
|
||||
#[derive(Clone, Debug, Default, Display)]
|
||||
#[display("{}", self.name())]
|
||||
pub struct H3GridDistance;
|
||||
|
||||
impl Function for H3GridDistance {
|
||||
fn name(&self) -> &str {
|
||||
"h3_grid_distance"
|
||||
}
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::int32_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
signature_of_double_cells()
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure_columns_n!(columns, 2);
|
||||
|
||||
let cell_this_vec = &columns[0];
|
||||
let cell_that_vec = &columns[1];
|
||||
let size = cell_this_vec.len();
|
||||
|
||||
let mut results = Int32VectorBuilder::with_capacity(size);
|
||||
|
||||
for i in 0..size {
|
||||
let result = match (
|
||||
cell_from_value(cell_vec.get(i))?,
|
||||
cell_from_value(cell2_vec.get(i))?,
|
||||
cell_from_value(cell_this_vec.get(i))?,
|
||||
cell_from_value(cell_that_vec.get(i))?,
|
||||
) {
|
||||
(Some(cell_this), Some(cell_that)) => {
|
||||
let is_neighbour = cell_this
|
||||
.is_neighbor_with(cell_that)
|
||||
let dist = cell_this
|
||||
.grid_distance(cell_that)
|
||||
.map_err(|e| {
|
||||
BoxedError::new(PlainError::new(
|
||||
format!("H3 error: {}", e),
|
||||
@@ -682,7 +873,7 @@ impl Function for H3IsNeighbour {
|
||||
))
|
||||
})
|
||||
.context(error::ExecuteSnafu)?;
|
||||
Some(is_neighbour)
|
||||
Some(dist)
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
@@ -694,6 +885,73 @@ impl Function for H3IsNeighbour {
|
||||
}
|
||||
}
|
||||
|
||||
/// Function that returns path cells between two cells
|
||||
#[derive(Clone, Debug, Default, Display)]
|
||||
#[display("{}", self.name())]
|
||||
pub struct H3GridPathCells;
|
||||
|
||||
impl Function for H3GridPathCells {
|
||||
fn name(&self) -> &str {
|
||||
"h3_grid_path_cells"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::list_datatype(
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
))
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
signature_of_double_cells()
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure_columns_n!(columns, 2);
|
||||
|
||||
let cell_this_vec = &columns[0];
|
||||
let cell_that_vec = &columns[1];
|
||||
let size = cell_this_vec.len();
|
||||
let mut results =
|
||||
ListVectorBuilder::with_type_capacity(ConcreteDataType::uint64_datatype(), size);
|
||||
|
||||
for i in 0..size {
|
||||
let result = match (
|
||||
cell_from_value(cell_this_vec.get(i))?,
|
||||
cell_from_value(cell_that_vec.get(i))?,
|
||||
) {
|
||||
(Some(cell_this), Some(cell_that)) => {
|
||||
let cells = cell_this
|
||||
.grid_path_cells(cell_that)
|
||||
.and_then(|x| x.collect::<std::result::Result<Vec<CellIndex>, _>>())
|
||||
.map_err(|e| {
|
||||
BoxedError::new(PlainError::new(
|
||||
format!("H3 error: {}", e),
|
||||
StatusCode::EngineExecuteQuery,
|
||||
))
|
||||
})
|
||||
.context(error::ExecuteSnafu)?;
|
||||
Some(ListValue::new(
|
||||
cells
|
||||
.into_iter()
|
||||
.map(|c| Value::from(u64::from(c)))
|
||||
.collect(),
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
))
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
|
||||
if let Some(list_value) = result {
|
||||
results.push(Some(list_value.as_scalar_ref()));
|
||||
} else {
|
||||
results.push(None);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
fn value_to_resolution(v: Value) -> Result<Resolution> {
|
||||
let r = match v {
|
||||
Value::Int8(v) => v as u8,
|
||||
@@ -716,26 +974,47 @@ fn value_to_resolution(v: Value) -> Result<Resolution> {
|
||||
.context(error::ExecuteSnafu)
|
||||
}
|
||||
|
||||
fn value_to_position(v: Value) -> Result<u64> {
|
||||
match v {
|
||||
Value::Int8(v) => ensure_and_coerce!(v >= 0, v as u64),
|
||||
Value::Int16(v) => ensure_and_coerce!(v >= 0, v as u64),
|
||||
Value::Int32(v) => ensure_and_coerce!(v >= 0, v as u64),
|
||||
Value::Int64(v) => ensure_and_coerce!(v >= 0, v as u64),
|
||||
Value::UInt8(v) => Ok(v as u64),
|
||||
Value::UInt16(v) => Ok(v as u64),
|
||||
Value::UInt32(v) => Ok(v as u64),
|
||||
Value::UInt64(v) => Ok(v),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn value_to_distance(v: Value) -> Result<u32> {
|
||||
match v {
|
||||
Value::Int8(v) => ensure_and_coerce!(v >= 0, v as u32),
|
||||
Value::Int16(v) => ensure_and_coerce!(v >= 0, v as u32),
|
||||
Value::Int32(v) => ensure_and_coerce!(v >= 0, v as u32),
|
||||
Value::Int64(v) => ensure_and_coerce!(v >= 0, v as u32),
|
||||
Value::UInt8(v) => Ok(v as u32),
|
||||
Value::UInt16(v) => Ok(v as u32),
|
||||
Value::UInt32(v) => Ok(v),
|
||||
Value::UInt64(v) => Ok(v as u32),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn signature_of_cell() -> Signature {
|
||||
let mut signatures = Vec::new();
|
||||
for cell_type in &[
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
] {
|
||||
let mut signatures = Vec::with_capacity(CELL_TYPES.len());
|
||||
for cell_type in CELL_TYPES.as_slice() {
|
||||
signatures.push(TypeSignature::Exact(vec![cell_type.clone()]));
|
||||
}
|
||||
|
||||
Signature::one_of(signatures, Volatility::Stable)
|
||||
}
|
||||
|
||||
fn signature_of_double_cell() -> Signature {
|
||||
let mut signatures = Vec::new();
|
||||
let cell_types = &[
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
];
|
||||
for cell_type in cell_types {
|
||||
for cell_type2 in cell_types {
|
||||
fn signature_of_double_cells() -> Signature {
|
||||
let mut signatures = Vec::with_capacity(CELL_TYPES.len() * CELL_TYPES.len());
|
||||
for cell_type in CELL_TYPES.as_slice() {
|
||||
for cell_type2 in CELL_TYPES.as_slice() {
|
||||
signatures.push(TypeSignature::Exact(vec![
|
||||
cell_type.clone(),
|
||||
cell_type2.clone(),
|
||||
@@ -747,21 +1026,9 @@ fn signature_of_double_cell() -> Signature {
|
||||
}
|
||||
|
||||
fn signature_of_cell_and_resolution() -> Signature {
|
||||
let mut signatures = Vec::new();
|
||||
for cell_type in &[
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
] {
|
||||
for resolution_type in &[
|
||||
ConcreteDataType::int8_datatype(),
|
||||
ConcreteDataType::int16_datatype(),
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::uint8_datatype(),
|
||||
ConcreteDataType::uint16_datatype(),
|
||||
ConcreteDataType::uint32_datatype(),
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
] {
|
||||
let mut signatures = Vec::with_capacity(CELL_TYPES.len() * RESOLUTION_TYPES.len());
|
||||
for cell_type in CELL_TYPES.as_slice() {
|
||||
for resolution_type in RESOLUTION_TYPES.as_slice() {
|
||||
signatures.push(TypeSignature::Exact(vec![
|
||||
cell_type.clone(),
|
||||
resolution_type.clone(),
|
||||
@@ -771,6 +1038,19 @@ fn signature_of_cell_and_resolution() -> Signature {
|
||||
Signature::one_of(signatures, Volatility::Stable)
|
||||
}
|
||||
|
||||
fn signature_of_cell_and_distance() -> Signature {
|
||||
let mut signatures = Vec::with_capacity(CELL_TYPES.len() * DISTANCE_TYPES.len());
|
||||
for cell_type in CELL_TYPES.as_slice() {
|
||||
for distance_type in DISTANCE_TYPES.as_slice() {
|
||||
signatures.push(TypeSignature::Exact(vec![
|
||||
cell_type.clone(),
|
||||
distance_type.clone(),
|
||||
]));
|
||||
}
|
||||
}
|
||||
Signature::one_of(signatures, Volatility::Stable)
|
||||
}
|
||||
|
||||
fn cell_from_value(v: Value) -> Result<Option<CellIndex>> {
|
||||
let cell = match v {
|
||||
Value::Int64(v) => Some(
|
||||
|
||||
75
src/common/function/src/scalars/geo/helpers.rs
Normal file
75
src/common/function/src/scalars/geo/helpers.rs
Normal file
@@ -0,0 +1,75 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
macro_rules! ensure_columns_len {
|
||||
($columns:ident) => {
|
||||
snafu::ensure!(
|
||||
$columns.windows(2).all(|c| c[0].len() == c[1].len()),
|
||||
common_query::error::InvalidFuncArgsSnafu {
|
||||
err_msg: "The length of input columns are in different size"
|
||||
}
|
||||
)
|
||||
};
|
||||
($column_a:ident, $column_b:ident, $($column_n:ident),*) => {
|
||||
snafu::ensure!(
|
||||
{
|
||||
let mut result = $column_a.len() == $column_b.len();
|
||||
$(
|
||||
result = result && ($column_a.len() == $column_n.len());
|
||||
)*
|
||||
result
|
||||
}
|
||||
common_query::error::InvalidFuncArgsSnafu {
|
||||
err_msg: "The length of input columns are in different size"
|
||||
}
|
||||
)
|
||||
};
|
||||
}
|
||||
|
||||
pub(super) use ensure_columns_len;
|
||||
|
||||
macro_rules! ensure_columns_n {
|
||||
($columns:ident, $n:literal) => {
|
||||
snafu::ensure!(
|
||||
$columns.len() == $n,
|
||||
common_query::error::InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of arguments is not correct, expect {}, provided : {}",
|
||||
stringify!($n),
|
||||
$columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
if $n > 1 {
|
||||
ensure_columns_len!($columns);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub(super) use ensure_columns_n;
|
||||
|
||||
macro_rules! ensure_and_coerce {
|
||||
($compare:expr, $coerce:expr) => {{
|
||||
snafu::ensure!(
|
||||
$compare,
|
||||
common_query::error::InvalidFuncArgsSnafu {
|
||||
err_msg: "Argument was outside of acceptable range "
|
||||
}
|
||||
);
|
||||
Ok($coerce)
|
||||
}};
|
||||
}
|
||||
|
||||
pub(super) use ensure_and_coerce;
|
||||
275
src/common/function/src/scalars/geo/s2.rs
Normal file
275
src/common/function/src/scalars/geo/s2.rs
Normal file
@@ -0,0 +1,275 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::{Signature, TypeSignature};
|
||||
use datafusion::logical_expr::Volatility;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{MutableVector, StringVectorBuilder, UInt64VectorBuilder, VectorRef};
|
||||
use derive_more::Display;
|
||||
use once_cell::sync::Lazy;
|
||||
use s2::cellid::{CellID, MAX_LEVEL};
|
||||
use s2::latlng::LatLng;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::scalars::geo::helpers::{ensure_and_coerce, ensure_columns_len, ensure_columns_n};
|
||||
|
||||
static CELL_TYPES: Lazy<Vec<ConcreteDataType>> = Lazy::new(|| {
|
||||
vec![
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
]
|
||||
});
|
||||
|
||||
static COORDINATE_TYPES: Lazy<Vec<ConcreteDataType>> = Lazy::new(|| {
|
||||
vec![
|
||||
ConcreteDataType::float32_datatype(),
|
||||
ConcreteDataType::float64_datatype(),
|
||||
]
|
||||
});
|
||||
|
||||
static LEVEL_TYPES: Lazy<Vec<ConcreteDataType>> = Lazy::new(|| {
|
||||
vec![
|
||||
ConcreteDataType::int8_datatype(),
|
||||
ConcreteDataType::int16_datatype(),
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::uint8_datatype(),
|
||||
ConcreteDataType::uint16_datatype(),
|
||||
ConcreteDataType::uint32_datatype(),
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
]
|
||||
});
|
||||
|
||||
/// Function that returns [s2] encoding cellid for a given geospatial coordinate.
|
||||
///
|
||||
/// [s2]: http://s2geometry.io
|
||||
#[derive(Clone, Debug, Default, Display)]
|
||||
#[display("{}", self.name())]
|
||||
pub struct S2LatLngToCell;
|
||||
|
||||
impl Function for S2LatLngToCell {
|
||||
fn name(&self) -> &str {
|
||||
"s2_latlng_to_cell"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::uint64_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
let mut signatures = Vec::with_capacity(COORDINATE_TYPES.len());
|
||||
for coord_type in COORDINATE_TYPES.as_slice() {
|
||||
signatures.push(TypeSignature::Exact(vec![
|
||||
// latitude
|
||||
coord_type.clone(),
|
||||
// longitude
|
||||
coord_type.clone(),
|
||||
]));
|
||||
}
|
||||
Signature::one_of(signatures, Volatility::Stable)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure_columns_n!(columns, 2);
|
||||
|
||||
let lat_vec = &columns[0];
|
||||
let lon_vec = &columns[1];
|
||||
|
||||
let size = lat_vec.len();
|
||||
let mut results = UInt64VectorBuilder::with_capacity(size);
|
||||
|
||||
for i in 0..size {
|
||||
let lat = lat_vec.get(i).as_f64_lossy();
|
||||
let lon = lon_vec.get(i).as_f64_lossy();
|
||||
|
||||
let result = match (lat, lon) {
|
||||
(Some(lat), Some(lon)) => {
|
||||
let coord = LatLng::from_degrees(lat, lon);
|
||||
ensure!(
|
||||
coord.is_valid(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: "The input coordinates are invalid",
|
||||
}
|
||||
);
|
||||
let cellid = CellID::from(coord);
|
||||
let encoded: u64 = cellid.0;
|
||||
Some(encoded)
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
|
||||
results.push(result);
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the level of current s2 cell
|
||||
#[derive(Clone, Debug, Default, Display)]
|
||||
#[display("{}", self.name())]
|
||||
pub struct S2CellLevel;
|
||||
|
||||
impl Function for S2CellLevel {
|
||||
fn name(&self) -> &str {
|
||||
"s2_cell_level"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::uint64_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
signature_of_cell()
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure_columns_n!(columns, 1);
|
||||
|
||||
let cell_vec = &columns[0];
|
||||
let size = cell_vec.len();
|
||||
let mut results = UInt64VectorBuilder::with_capacity(size);
|
||||
|
||||
for i in 0..size {
|
||||
let cell = cell_from_value(cell_vec.get(i));
|
||||
let res = cell.map(|cell| cell.level());
|
||||
|
||||
results.push(res);
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the string presentation of the cell
|
||||
#[derive(Clone, Debug, Default, Display)]
|
||||
#[display("{}", self.name())]
|
||||
pub struct S2CellToToken;
|
||||
|
||||
impl Function for S2CellToToken {
|
||||
fn name(&self) -> &str {
|
||||
"s2_cell_to_token"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::string_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
signature_of_cell()
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure_columns_n!(columns, 1);
|
||||
|
||||
let cell_vec = &columns[0];
|
||||
let size = cell_vec.len();
|
||||
let mut results = StringVectorBuilder::with_capacity(size);
|
||||
|
||||
for i in 0..size {
|
||||
let cell = cell_from_value(cell_vec.get(i));
|
||||
let res = cell.map(|cell| cell.to_token());
|
||||
|
||||
results.push(res.as_deref());
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
/// Return parent at given level of current s2 cell
|
||||
#[derive(Clone, Debug, Default, Display)]
|
||||
#[display("{}", self.name())]
|
||||
pub struct S2CellParent;
|
||||
|
||||
impl Function for S2CellParent {
|
||||
fn name(&self) -> &str {
|
||||
"s2_cell_parent"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::uint64_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
signature_of_cell_and_level()
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure_columns_n!(columns, 2);
|
||||
|
||||
let cell_vec = &columns[0];
|
||||
let level_vec = &columns[1];
|
||||
let size = cell_vec.len();
|
||||
let mut results = UInt64VectorBuilder::with_capacity(size);
|
||||
|
||||
for i in 0..size {
|
||||
let cell = cell_from_value(cell_vec.get(i));
|
||||
let level = value_to_level(level_vec.get(i))?;
|
||||
let result = cell.map(|cell| cell.parent(level).0);
|
||||
|
||||
results.push(result);
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
fn signature_of_cell() -> Signature {
|
||||
let mut signatures = Vec::with_capacity(CELL_TYPES.len());
|
||||
for cell_type in CELL_TYPES.as_slice() {
|
||||
signatures.push(TypeSignature::Exact(vec![cell_type.clone()]));
|
||||
}
|
||||
|
||||
Signature::one_of(signatures, Volatility::Stable)
|
||||
}
|
||||
|
||||
fn signature_of_cell_and_level() -> Signature {
|
||||
let mut signatures = Vec::with_capacity(CELL_TYPES.len() * LEVEL_TYPES.len());
|
||||
for cell_type in CELL_TYPES.as_slice() {
|
||||
for level_type in LEVEL_TYPES.as_slice() {
|
||||
signatures.push(TypeSignature::Exact(vec![
|
||||
cell_type.clone(),
|
||||
level_type.clone(),
|
||||
]));
|
||||
}
|
||||
}
|
||||
Signature::one_of(signatures, Volatility::Stable)
|
||||
}
|
||||
|
||||
fn cell_from_value(v: Value) -> Option<CellID> {
|
||||
match v {
|
||||
Value::Int64(v) => Some(CellID(v as u64)),
|
||||
Value::UInt64(v) => Some(CellID(v)),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn value_to_level(v: Value) -> Result<u64> {
|
||||
match v {
|
||||
Value::Int8(v) => ensure_and_coerce!(v >= 0 && v <= MAX_LEVEL as i8, v as u64),
|
||||
Value::Int16(v) => ensure_and_coerce!(v >= 0 && v <= MAX_LEVEL as i16, v as u64),
|
||||
Value::Int32(v) => ensure_and_coerce!(v >= 0 && v <= MAX_LEVEL as i32, v as u64),
|
||||
Value::Int64(v) => ensure_and_coerce!(v >= 0 && v <= MAX_LEVEL as i64, v as u64),
|
||||
Value::UInt8(v) => ensure_and_coerce!(v <= MAX_LEVEL as u8, v as u64),
|
||||
Value::UInt16(v) => ensure_and_coerce!(v <= MAX_LEVEL as u16, v as u64),
|
||||
Value::UInt32(v) => ensure_and_coerce!(v <= MAX_LEVEL as u32, v as u64),
|
||||
Value::UInt64(v) => ensure_and_coerce!(v <= MAX_LEVEL, v),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
@@ -15,6 +15,8 @@
|
||||
use std::sync::Arc;
|
||||
mod json_get;
|
||||
mod json_is;
|
||||
mod json_path_exists;
|
||||
mod json_path_match;
|
||||
mod json_to_string;
|
||||
mod parse_json;
|
||||
|
||||
@@ -46,5 +48,8 @@ impl JsonFunction {
|
||||
registry.register(Arc::new(JsonIsBool));
|
||||
registry.register(Arc::new(JsonIsArray));
|
||||
registry.register(Arc::new(JsonIsObject));
|
||||
|
||||
registry.register(Arc::new(json_path_exists::JsonPathExistsFunction));
|
||||
registry.register(Arc::new(json_path_match::JsonPathMatchFunction));
|
||||
}
|
||||
}
|
||||
|
||||
172
src/common/function/src/scalars/json/json_path_exists.rs
Normal file
172
src/common/function/src/scalars/json/json_path_exists.rs
Normal file
@@ -0,0 +1,172 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::{self, Display};
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
|
||||
use common_query::prelude::Signature;
|
||||
use datafusion::logical_expr::Volatility;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::prelude::VectorRef;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{BooleanVectorBuilder, MutableVector};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
/// Check if the given JSON data contains the given JSON path.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct JsonPathExistsFunction;
|
||||
|
||||
const NAME: &str = "json_path_exists";
|
||||
|
||||
impl Function for JsonPathExistsFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::boolean_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::exact(
|
||||
vec![
|
||||
ConcreteDataType::json_datatype(),
|
||||
ConcreteDataType::string_datatype(),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly two, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
let jsons = &columns[0];
|
||||
let paths = &columns[1];
|
||||
|
||||
let size = jsons.len();
|
||||
let datatype = jsons.data_type();
|
||||
let mut results = BooleanVectorBuilder::with_capacity(size);
|
||||
|
||||
match datatype {
|
||||
// JSON data type uses binary vector
|
||||
ConcreteDataType::Binary(_) => {
|
||||
for i in 0..size {
|
||||
let json = jsons.get_ref(i);
|
||||
let path = paths.get_ref(i);
|
||||
|
||||
let json = json.as_binary();
|
||||
let path = path.as_string();
|
||||
let result = match (json, path) {
|
||||
(Ok(Some(json)), Ok(Some(path))) => {
|
||||
let json_path = jsonb::jsonpath::parse_json_path(path.as_bytes());
|
||||
match json_path {
|
||||
Ok(json_path) => jsonb::path_exists(json, json_path).ok(),
|
||||
Err(_) => None,
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
|
||||
results.push(result);
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for JsonPathExistsFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "JSON_PATH_EXISTS")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::prelude::TypeSignature;
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::vectors::{BinaryVector, StringVector};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_json_path_exists_function() {
|
||||
let json_path_exists = JsonPathExistsFunction;
|
||||
|
||||
assert_eq!("json_path_exists", json_path_exists.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::boolean_datatype(),
|
||||
json_path_exists
|
||||
.return_type(&[ConcreteDataType::json_datatype()])
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
assert!(matches!(json_path_exists.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Exact(valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![ConcreteDataType::json_datatype(), ConcreteDataType::string_datatype()]
|
||||
));
|
||||
|
||||
let json_strings = [
|
||||
r#"{"a": {"b": 2}, "b": 2, "c": 3}"#,
|
||||
r#"{"a": 4, "b": {"c": 6}, "c": 6}"#,
|
||||
r#"{"a": 7, "b": 8, "c": {"a": 7}}"#,
|
||||
r#"{"a": 7, "b": 8, "c": {"a": 7}}"#,
|
||||
];
|
||||
let paths = vec!["$.a.b.c", "$.b", "$.c.a", ".d"];
|
||||
let results = [false, true, true, false];
|
||||
|
||||
let jsonbs = json_strings
|
||||
.iter()
|
||||
.map(|s| {
|
||||
let value = jsonb::parse_value(s.as_bytes()).unwrap();
|
||||
value.to_vec()
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let json_vector = BinaryVector::from_vec(jsonbs);
|
||||
let path_vector = StringVector::from_vec(paths);
|
||||
let args: Vec<VectorRef> = vec![Arc::new(json_vector), Arc::new(path_vector)];
|
||||
let vector = json_path_exists
|
||||
.eval(FunctionContext::default(), &args)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(4, vector.len());
|
||||
for (i, gt) in results.iter().enumerate() {
|
||||
let result = vector.get_ref(i);
|
||||
let result = result.as_boolean().unwrap().unwrap();
|
||||
assert_eq!(*gt, result);
|
||||
}
|
||||
}
|
||||
}
|
||||
202
src/common/function/src/scalars/json/json_path_match.rs
Normal file
202
src/common/function/src/scalars/json/json_path_match.rs
Normal file
@@ -0,0 +1,202 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::{self, Display};
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
|
||||
use common_query::prelude::Signature;
|
||||
use datafusion::logical_expr::Volatility;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::prelude::VectorRef;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{BooleanVectorBuilder, MutableVector};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
/// Check if the given JSON data match the given JSON path's predicate.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct JsonPathMatchFunction;
|
||||
|
||||
const NAME: &str = "json_path_match";
|
||||
|
||||
impl Function for JsonPathMatchFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::boolean_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::exact(
|
||||
vec![
|
||||
ConcreteDataType::json_datatype(),
|
||||
ConcreteDataType::string_datatype(),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly two, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
let jsons = &columns[0];
|
||||
let paths = &columns[1];
|
||||
|
||||
let size = jsons.len();
|
||||
let mut results = BooleanVectorBuilder::with_capacity(size);
|
||||
|
||||
for i in 0..size {
|
||||
let json = jsons.get_ref(i);
|
||||
let path = paths.get_ref(i);
|
||||
|
||||
match json.data_type() {
|
||||
// JSON data type uses binary vector
|
||||
ConcreteDataType::Binary(_) => {
|
||||
let json = json.as_binary();
|
||||
let path = path.as_string();
|
||||
let result = match (json, path) {
|
||||
(Ok(Some(json)), Ok(Some(path))) => {
|
||||
if !jsonb::is_null(json) {
|
||||
let json_path = jsonb::jsonpath::parse_json_path(path.as_bytes());
|
||||
match json_path {
|
||||
Ok(json_path) => jsonb::path_match(json, json_path).ok(),
|
||||
Err(_) => None,
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
|
||||
results.push(result);
|
||||
}
|
||||
|
||||
_ => {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for JsonPathMatchFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "JSON_PATH_MATCH")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::prelude::TypeSignature;
|
||||
use datatypes::vectors::{BinaryVector, StringVector};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_json_path_match_function() {
|
||||
let json_path_match = JsonPathMatchFunction;
|
||||
|
||||
assert_eq!("json_path_match", json_path_match.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::boolean_datatype(),
|
||||
json_path_match
|
||||
.return_type(&[ConcreteDataType::json_datatype()])
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
assert!(matches!(json_path_match.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Exact(valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![ConcreteDataType::json_datatype(), ConcreteDataType::string_datatype()],
|
||||
));
|
||||
|
||||
let json_strings = [
|
||||
Some(r#"{"a": {"b": 2}, "b": 2, "c": 3}"#.to_string()),
|
||||
Some(r#"{"a": 1, "b": [1,2,3]}"#.to_string()),
|
||||
Some(r#"{"a": 1 ,"b": [1,2,3]}"#.to_string()),
|
||||
Some(r#"[1,2,3]"#.to_string()),
|
||||
Some(r#"{"a":1,"b":[1,2,3]}"#.to_string()),
|
||||
Some(r#"null"#.to_string()),
|
||||
Some(r#"null"#.to_string()),
|
||||
];
|
||||
|
||||
let paths = vec![
|
||||
Some("$.a.b == 2".to_string()),
|
||||
Some("$.b[1 to last] >= 2".to_string()),
|
||||
Some("$.c > 0".to_string()),
|
||||
Some("$[0 to last] > 0".to_string()),
|
||||
Some(r#"null"#.to_string()),
|
||||
Some("$.c > 0".to_string()),
|
||||
Some(r#"null"#.to_string()),
|
||||
];
|
||||
|
||||
let results = [
|
||||
Some(true),
|
||||
Some(true),
|
||||
Some(false),
|
||||
Some(true),
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
];
|
||||
|
||||
let jsonbs = json_strings
|
||||
.into_iter()
|
||||
.map(|s| s.map(|json| jsonb::parse_value(json.as_bytes()).unwrap().to_vec()))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let json_vector = BinaryVector::from(jsonbs);
|
||||
let path_vector = StringVector::from(paths);
|
||||
let args: Vec<VectorRef> = vec![Arc::new(json_vector), Arc::new(path_vector)];
|
||||
let vector = json_path_match
|
||||
.eval(FunctionContext::default(), &args)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(7, vector.len());
|
||||
for (i, expected) in results.iter().enumerate() {
|
||||
let result = vector.get_ref(i);
|
||||
|
||||
match expected {
|
||||
Some(expected_value) => {
|
||||
assert!(!result.is_null());
|
||||
let result_value = result.as_boolean().unwrap().unwrap();
|
||||
assert_eq!(*expected_value, result_value);
|
||||
}
|
||||
None => {
|
||||
assert!(result.is_null());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -199,6 +199,7 @@ pub fn default_get_uuid(working_home: &Option<String>) -> Option<String> {
|
||||
}
|
||||
|
||||
/// Report version info to GreptimeDB.
|
||||
///
|
||||
/// We do not collect any identity-sensitive information.
|
||||
/// This task is scheduled to run every 30 minutes.
|
||||
/// The task will be disabled default. It can be enabled by setting the build feature `greptimedb-telemetry`
|
||||
@@ -324,7 +325,7 @@ mod tests {
|
||||
});
|
||||
let addr = ([127, 0, 0, 1], port).into();
|
||||
|
||||
let server = Server::bind(&addr).serve(make_svc);
|
||||
let server = Server::try_bind(&addr).unwrap().serve(make_svc);
|
||||
let graceful = server.with_graceful_shutdown(async {
|
||||
rx.await.ok();
|
||||
});
|
||||
|
||||
@@ -18,6 +18,7 @@ common-time.workspace = true
|
||||
datatypes.workspace = true
|
||||
prost.workspace = true
|
||||
snafu.workspace = true
|
||||
store-api.workspace = true
|
||||
table.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
@@ -22,12 +22,13 @@ use api::v1::{
|
||||
use common_query::AddColumnLocation;
|
||||
use datatypes::schema::{ColumnSchema, RawSchema};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::region_request::ChangeOption;
|
||||
use table::metadata::TableId;
|
||||
use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest, ChangeColumnTypeRequest};
|
||||
|
||||
use crate::error::{
|
||||
InvalidColumnDefSnafu, MissingFieldSnafu, MissingTimestampColumnSnafu, Result,
|
||||
UnknownLocationTypeSnafu,
|
||||
InvalidChangeTableOptionRequestSnafu, InvalidColumnDefSnafu, MissingFieldSnafu,
|
||||
MissingTimestampColumnSnafu, Result, UnknownLocationTypeSnafu,
|
||||
};
|
||||
|
||||
const LOCATION_TYPE_FIRST: i32 = LocationType::First as i32;
|
||||
@@ -92,6 +93,15 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterExpr) -> Result<Alter
|
||||
Kind::RenameTable(RenameTable { new_table_name }) => {
|
||||
AlterKind::RenameTable { new_table_name }
|
||||
}
|
||||
Kind::ChangeTableOptions(api::v1::ChangeTableOptions {
|
||||
change_table_options,
|
||||
}) => AlterKind::ChangeTableOptions {
|
||||
options: change_table_options
|
||||
.iter()
|
||||
.map(ChangeOption::try_from)
|
||||
.collect::<std::result::Result<Vec<_>, _>>()
|
||||
.context(InvalidChangeTableOptionRequestSnafu)?,
|
||||
},
|
||||
};
|
||||
|
||||
let request = AlterTableRequest {
|
||||
|
||||
@@ -19,6 +19,7 @@ use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
use store_api::metadata::MetadataError;
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
@@ -118,6 +119,12 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid change table option request"))]
|
||||
InvalidChangeTableOptionRequest {
|
||||
#[snafu(source)]
|
||||
error: MetadataError,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -141,6 +148,7 @@ impl ErrorExt for Error {
|
||||
Error::UnknownColumnDataType { .. } | Error::InvalidFulltextColumnType { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::InvalidChangeTableOptionRequest { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::helper::{convert_i128_to_interval, convert_to_pb_decimal128};
|
||||
use api::helper::{convert_month_day_nano_to_pb, convert_to_pb_decimal128};
|
||||
use api::v1::column::Values;
|
||||
use common_base::BitVec;
|
||||
use datatypes::types::{IntervalType, TimeType, TimestampType, WrapperType};
|
||||
@@ -211,7 +211,7 @@ pub fn values(arrays: &[VectorRef]) -> Result<Values> {
|
||||
ConcreteDataType::Interval(IntervalType::MonthDayNano(_)),
|
||||
IntervalMonthDayNanoVector,
|
||||
interval_month_day_nano_values,
|
||||
|x| { convert_i128_to_interval(x.into_native()) }
|
||||
|x| { convert_month_day_nano_to_pb(x) }
|
||||
),
|
||||
(
|
||||
ConcreteDataType::Decimal128(_),
|
||||
|
||||
@@ -21,23 +21,19 @@ use syn::{parse_macro_input, DeriveInput, ItemStruct};
|
||||
pub(crate) fn impl_aggr_func_type_store(ast: &DeriveInput) -> TokenStream {
|
||||
let name = &ast.ident;
|
||||
let gen = quote! {
|
||||
use common_query::logical_plan::accumulator::AggrFuncTypeStore;
|
||||
use common_query::error::{InvalidInputStateSnafu, Error as QueryError};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
|
||||
impl AggrFuncTypeStore for #name {
|
||||
fn input_types(&self) -> std::result::Result<Vec<ConcreteDataType>, QueryError> {
|
||||
impl common_query::logical_plan::accumulator::AggrFuncTypeStore for #name {
|
||||
fn input_types(&self) -> std::result::Result<Vec<datatypes::prelude::ConcreteDataType>, common_query::error::Error> {
|
||||
let input_types = self.input_types.load();
|
||||
snafu::ensure!(input_types.is_some(), InvalidInputStateSnafu);
|
||||
snafu::ensure!(input_types.is_some(), common_query::error::InvalidInputStateSnafu);
|
||||
Ok(input_types.as_ref().unwrap().as_ref().clone())
|
||||
}
|
||||
|
||||
fn set_input_types(&self, input_types: Vec<ConcreteDataType>) -> std::result::Result<(), QueryError> {
|
||||
fn set_input_types(&self, input_types: Vec<datatypes::prelude::ConcreteDataType>) -> std::result::Result<(), common_query::error::Error> {
|
||||
let old = self.input_types.swap(Some(std::sync::Arc::new(input_types.clone())));
|
||||
if let Some(old) = old {
|
||||
snafu::ensure!(old.len() == input_types.len(), InvalidInputStateSnafu);
|
||||
snafu::ensure!(old.len() == input_types.len(), common_query::error::InvalidInputStateSnafu);
|
||||
for (x, y) in old.iter().zip(input_types.iter()) {
|
||||
snafu::ensure!(x == y, InvalidInputStateSnafu);
|
||||
snafu::ensure!(x == y, common_query::error::InvalidInputStateSnafu);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@@ -51,7 +47,7 @@ pub(crate) fn impl_as_aggr_func_creator(_args: TokenStream, input: TokenStream)
|
||||
let mut item_struct = parse_macro_input!(input as ItemStruct);
|
||||
if let syn::Fields::Named(ref mut fields) = item_struct.fields {
|
||||
let result = syn::Field::parse_named.parse2(quote! {
|
||||
input_types: arc_swap::ArcSwapOption<Vec<ConcreteDataType>>
|
||||
input_types: arc_swap::ArcSwapOption<Vec<datatypes::prelude::ConcreteDataType>>
|
||||
});
|
||||
match result {
|
||||
Ok(field) => fields.named.push(field),
|
||||
|
||||
@@ -35,7 +35,9 @@ pub fn aggr_func_type_store_derive(input: TokenStream) -> TokenStream {
|
||||
}
|
||||
|
||||
/// A struct can be used as a creator for aggregate function if it has been annotated with this
|
||||
/// attribute first. This attribute add a necessary field which is intended to store the input
|
||||
/// attribute first.
|
||||
///
|
||||
/// This attribute add a necessary field which is intended to store the input
|
||||
/// data's types to the struct.
|
||||
/// This attribute is expected to be used along with derive macro [AggrFuncTypeStore].
|
||||
#[proc_macro_attribute]
|
||||
@@ -44,9 +46,10 @@ pub fn as_aggr_func_creator(args: TokenStream, input: TokenStream) -> TokenStrea
|
||||
}
|
||||
|
||||
/// Attribute macro to convert an arithimetic function to a range function. The annotated function
|
||||
/// should accept servaral arrays as input and return a single value as output. This procedure
|
||||
/// macro can works on any number of input parameters. Return type can be either primitive type
|
||||
/// or wrapped in `Option`.
|
||||
/// should accept servaral arrays as input and return a single value as output.
|
||||
///
|
||||
/// This procedure macro can works on any number of input parameters. Return type can be either
|
||||
/// primitive type or wrapped in `Option`.
|
||||
///
|
||||
/// # Example
|
||||
/// Take `count_over_time()` in PromQL as an example:
|
||||
|
||||
@@ -24,5 +24,5 @@ struct Foo {}
|
||||
fn test_derive() {
|
||||
let _ = Foo::default();
|
||||
assert_fields!(Foo: input_types);
|
||||
assert_impl_all!(Foo: std::fmt::Debug, Default, AggrFuncTypeStore);
|
||||
assert_impl_all!(Foo: std::fmt::Debug, Default, common_query::logical_plan::accumulator::AggrFuncTypeStore);
|
||||
}
|
||||
|
||||
@@ -60,7 +60,7 @@ table.workspace = true
|
||||
tokio.workspace = true
|
||||
tokio-postgres = { workspace = true, optional = true }
|
||||
tonic.workspace = true
|
||||
typetag = "0.2"
|
||||
typetag.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
chrono.workspace = true
|
||||
|
||||
@@ -55,6 +55,7 @@ pub trait ClusterInfo {
|
||||
}
|
||||
|
||||
/// The key of [NodeInfo] in the storage. The format is `__meta_cluster_node_info-{cluster_id}-{role}-{node_id}`.
|
||||
///
|
||||
/// This key cannot be used to describe the `Metasrv` because the `Metasrv` does not have
|
||||
/// a `cluster_id`, it serves multiple clusters.
|
||||
#[derive(Debug, Clone, Eq, Hash, PartialEq, Serialize, Deserialize)]
|
||||
|
||||
@@ -78,17 +78,21 @@ pub struct RegionStat {
|
||||
/// The write capacity units during this period
|
||||
pub wcus: i64,
|
||||
/// Approximate bytes of this region
|
||||
pub approximate_bytes: i64,
|
||||
pub approximate_bytes: u64,
|
||||
/// The engine name.
|
||||
pub engine: String,
|
||||
/// The region role.
|
||||
pub role: RegionRole,
|
||||
/// The number of rows
|
||||
pub num_rows: u64,
|
||||
/// The size of the memtable in bytes.
|
||||
pub memtable_size: u64,
|
||||
/// The size of the manifest in bytes.
|
||||
pub manifest_size: u64,
|
||||
/// The size of the SST files in bytes.
|
||||
/// The size of the SST data files in bytes.
|
||||
pub sst_size: u64,
|
||||
/// The size of the SST index files in bytes.
|
||||
pub index_size: u64,
|
||||
}
|
||||
|
||||
impl Stat {
|
||||
@@ -178,12 +182,14 @@ impl From<&api::v1::meta::RegionStat> for RegionStat {
|
||||
id: RegionId::from_u64(value.region_id),
|
||||
rcus: value.rcus,
|
||||
wcus: value.wcus,
|
||||
approximate_bytes: value.approximate_bytes,
|
||||
approximate_bytes: value.approximate_bytes as u64,
|
||||
engine: value.engine.to_string(),
|
||||
role: RegionRole::from(value.role()),
|
||||
num_rows: region_stat.num_rows,
|
||||
memtable_size: region_stat.memtable_size,
|
||||
manifest_size: region_stat.manifest_size,
|
||||
sst_size: region_stat.sst_size,
|
||||
index_size: region_stat.index_size,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,10 +43,10 @@ impl AlterLogicalTablesProcedure {
|
||||
&self.data.physical_columns,
|
||||
);
|
||||
|
||||
// Updates physical table's metadata
|
||||
// Updates physical table's metadata, and we don't need to touch per-region settings.
|
||||
self.context
|
||||
.table_metadata_manager
|
||||
.update_table_info(physical_table_info, new_raw_table_info)
|
||||
.update_table_info(physical_table_info, None, new_raw_table_info)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -43,10 +43,10 @@ use crate::ddl::DdlContext;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::DeserializedValueWithBytes;
|
||||
use crate::key::{DeserializedValueWithBytes, RegionDistribution};
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableLock, TableNameLock};
|
||||
use crate::rpc::ddl::AlterTableTask;
|
||||
use crate::rpc::router::{find_leader_regions, find_leaders};
|
||||
use crate::rpc::router::{find_leader_regions, find_leaders, region_distribution};
|
||||
use crate::{metrics, ClusterId};
|
||||
|
||||
/// The alter table procedure
|
||||
@@ -101,6 +101,9 @@ impl AlterTableProcedure {
|
||||
.get_physical_table_route(table_id)
|
||||
.await?;
|
||||
|
||||
self.data.region_distribution =
|
||||
Some(region_distribution(&physical_table_route.region_routes));
|
||||
|
||||
let leaders = find_leaders(&physical_table_route.region_routes);
|
||||
let mut alter_region_tasks = Vec::with_capacity(leaders.len());
|
||||
|
||||
@@ -161,8 +164,14 @@ impl AlterTableProcedure {
|
||||
self.on_update_metadata_for_rename(new_table_name.to_string(), table_info_value)
|
||||
.await?;
|
||||
} else {
|
||||
self.on_update_metadata_for_alter(new_info.into(), table_info_value)
|
||||
.await?;
|
||||
// region distribution is set in submit_alter_region_requests
|
||||
let region_distribution = self.data.region_distribution.as_ref().unwrap().clone();
|
||||
self.on_update_metadata_for_alter(
|
||||
new_info.into(),
|
||||
region_distribution,
|
||||
table_info_value,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
info!("Updated table metadata for table {table_ref}, table_id: {table_id}");
|
||||
@@ -271,6 +280,8 @@ pub struct AlterTableData {
|
||||
table_id: TableId,
|
||||
/// Table info value before alteration.
|
||||
table_info_value: Option<DeserializedValueWithBytes<TableInfoValue>>,
|
||||
/// Region distribution for table in case we need to update region options.
|
||||
region_distribution: Option<RegionDistribution>,
|
||||
}
|
||||
|
||||
impl AlterTableData {
|
||||
@@ -281,6 +292,7 @@ impl AlterTableData {
|
||||
table_id,
|
||||
cluster_id,
|
||||
table_info_value: None,
|
||||
region_distribution: None,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -106,6 +106,7 @@ fn create_proto_alter_kind(
|
||||
})))
|
||||
}
|
||||
Kind::RenameTable(_) => Ok(None),
|
||||
Kind::ChangeTableOptions(v) => Ok(Some(alter_request::Kind::ChangeTableOptions(v.clone()))),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ use table::requests::AlterKind;
|
||||
use crate::ddl::alter_table::AlterTableProcedure;
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::DeserializedValueWithBytes;
|
||||
use crate::key::{DeserializedValueWithBytes, RegionDistribution};
|
||||
|
||||
impl AlterTableProcedure {
|
||||
/// Builds new_meta
|
||||
@@ -51,7 +51,9 @@ impl AlterTableProcedure {
|
||||
AlterKind::RenameTable { new_table_name } => {
|
||||
new_info.name = new_table_name.to_string();
|
||||
}
|
||||
AlterKind::DropColumns { .. } | AlterKind::ChangeColumnTypes { .. } => {}
|
||||
AlterKind::DropColumns { .. }
|
||||
| AlterKind::ChangeColumnTypes { .. }
|
||||
| AlterKind::ChangeTableOptions { .. } => {}
|
||||
}
|
||||
|
||||
Ok(new_info)
|
||||
@@ -75,11 +77,16 @@ impl AlterTableProcedure {
|
||||
pub(crate) async fn on_update_metadata_for_alter(
|
||||
&self,
|
||||
new_table_info: RawTableInfo,
|
||||
region_distribution: RegionDistribution,
|
||||
current_table_info_value: &DeserializedValueWithBytes<TableInfoValue>,
|
||||
) -> Result<()> {
|
||||
let table_metadata_manager = &self.context.table_metadata_manager;
|
||||
table_metadata_manager
|
||||
.update_table_info(current_table_info_value, new_table_info)
|
||||
.update_table_info(
|
||||
current_table_info_value,
|
||||
Some(region_distribution),
|
||||
new_table_info,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -58,10 +58,10 @@ impl CreateLogicalTablesProcedure {
|
||||
&new_table_info.name,
|
||||
);
|
||||
|
||||
// Update physical table's metadata
|
||||
// Update physical table's metadata and we don't need to touch per-region settings.
|
||||
self.context
|
||||
.table_metadata_manager
|
||||
.update_table_info(&physical_table_info, new_table_info)
|
||||
.update_table_info(&physical_table_info, None, new_table_info)
|
||||
.await?;
|
||||
|
||||
// Invalid physical table cache
|
||||
|
||||
@@ -29,7 +29,10 @@ use crate::test_util::MockDatanodeHandler;
|
||||
#[async_trait::async_trait]
|
||||
impl MockDatanodeHandler for () {
|
||||
async fn handle(&self, _peer: &Peer, _request: RegionRequest) -> Result<RegionResponse> {
|
||||
unreachable!()
|
||||
Ok(RegionResponse {
|
||||
affected_rows: 0,
|
||||
extensions: Default::default(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn handle_query(
|
||||
|
||||
@@ -19,13 +19,14 @@ use std::sync::Arc;
|
||||
use api::v1::alter_expr::Kind;
|
||||
use api::v1::region::{region_request, RegionRequest};
|
||||
use api::v1::{
|
||||
AddColumn, AddColumns, AlterExpr, ColumnDataType, ColumnDef as PbColumnDef, DropColumn,
|
||||
DropColumns, SemanticType,
|
||||
AddColumn, AddColumns, AlterExpr, ChangeTableOption, ChangeTableOptions, ColumnDataType,
|
||||
ColumnDef as PbColumnDef, DropColumn, DropColumns, SemanticType,
|
||||
};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use store_api::storage::RegionId;
|
||||
use table::requests::TTL_KEY;
|
||||
use tokio::sync::mpsc::{self};
|
||||
|
||||
use crate::ddl::alter_table::AlterTableProcedure;
|
||||
@@ -34,6 +35,7 @@ use crate::ddl::test_util::create_table::test_create_table_task;
|
||||
use crate::ddl::test_util::datanode_handler::{
|
||||
DatanodeWatcher, RequestOutdatedErrorDatanodeHandler,
|
||||
};
|
||||
use crate::key::datanode_table::DatanodeTableKey;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::peer::Peer;
|
||||
@@ -293,12 +295,21 @@ async fn test_on_update_metadata_add_columns() {
|
||||
let table_name = "foo";
|
||||
let table_id = 1024;
|
||||
let task = test_create_table_task(table_name, table_id);
|
||||
|
||||
let region_id = RegionId::new(table_id, 0);
|
||||
let mock_table_routes = vec![RegionRoute {
|
||||
region: Region::new_test(region_id),
|
||||
leader_peer: Some(Peer::default()),
|
||||
follower_peers: vec![],
|
||||
leader_state: None,
|
||||
leader_down_since: None,
|
||||
}];
|
||||
// Puts a value to table name key.
|
||||
ddl_context
|
||||
.table_metadata_manager
|
||||
.create_table_metadata(
|
||||
task.table_info.clone(),
|
||||
TableRouteValue::physical(vec![]),
|
||||
TableRouteValue::physical(mock_table_routes),
|
||||
HashMap::new(),
|
||||
)
|
||||
.await
|
||||
@@ -326,6 +337,7 @@ async fn test_on_update_metadata_add_columns() {
|
||||
let mut procedure =
|
||||
AlterTableProcedure::new(cluster_id, table_id, task, ddl_context.clone()).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
procedure.submit_alter_region_requests().await.unwrap();
|
||||
procedure.on_update_metadata().await.unwrap();
|
||||
|
||||
let table_info = ddl_context
|
||||
@@ -343,3 +355,76 @@ async fn test_on_update_metadata_add_columns() {
|
||||
table_info.meta.next_column_id
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_update_table_options() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let table_name = "foo";
|
||||
let table_id = 1024;
|
||||
let task = test_create_table_task(table_name, table_id);
|
||||
|
||||
let region_id = RegionId::new(table_id, 0);
|
||||
let mock_table_routes = vec![RegionRoute {
|
||||
region: Region::new_test(region_id),
|
||||
leader_peer: Some(Peer::default()),
|
||||
follower_peers: vec![],
|
||||
leader_state: None,
|
||||
leader_down_since: None,
|
||||
}];
|
||||
// Puts a value to table name key.
|
||||
ddl_context
|
||||
.table_metadata_manager
|
||||
.create_table_metadata(
|
||||
task.table_info.clone(),
|
||||
TableRouteValue::physical(mock_table_routes),
|
||||
HashMap::new(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let task = AlterTableTask {
|
||||
alter_table: AlterExpr {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
kind: Some(Kind::ChangeTableOptions(ChangeTableOptions {
|
||||
change_table_options: vec![ChangeTableOption {
|
||||
key: TTL_KEY.to_string(),
|
||||
value: "1d".to_string(),
|
||||
}],
|
||||
})),
|
||||
},
|
||||
};
|
||||
let mut procedure =
|
||||
AlterTableProcedure::new(cluster_id, table_id, task, ddl_context.clone()).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
procedure.submit_alter_region_requests().await.unwrap();
|
||||
procedure.on_update_metadata().await.unwrap();
|
||||
|
||||
let table_info = ddl_context
|
||||
.table_metadata_manager
|
||||
.table_info_manager()
|
||||
.get(table_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.into_inner()
|
||||
.table_info;
|
||||
|
||||
let datanode_key = DatanodeTableKey::new(0, table_id);
|
||||
let region_info = ddl_context
|
||||
.table_metadata_manager
|
||||
.datanode_table_manager()
|
||||
.get(&datanode_key)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.region_info;
|
||||
|
||||
assert_eq!(
|
||||
region_info.region_options,
|
||||
HashMap::from(&table_info.meta.options)
|
||||
);
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ use common_procedure::error::Error as ProcedureError;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::metric_engine_consts::LOGICAL_TABLE_METADATA_KEY;
|
||||
use table::metadata::TableId;
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::ddl::DetectingRegion;
|
||||
use crate::error::{Error, OperateDatanodeSnafu, Result, TableNotFoundSnafu, UnsupportedSnafu};
|
||||
@@ -109,8 +110,8 @@ pub async fn check_and_get_physical_table_id(
|
||||
.table_name_manager()
|
||||
.get(physical_table_name)
|
||||
.await?
|
||||
.context(TableNotFoundSnafu {
|
||||
table_name: physical_table_name.to_string(),
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
table_name: TableReference::from(physical_table_name).to_string(),
|
||||
})
|
||||
.map(|table| table.table_id())
|
||||
}
|
||||
@@ -123,8 +124,8 @@ pub async fn get_physical_table_id(
|
||||
.table_name_manager()
|
||||
.get(logical_table_name)
|
||||
.await?
|
||||
.context(TableNotFoundSnafu {
|
||||
table_name: logical_table_name.to_string(),
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
table_name: TableReference::from(logical_table_name).to_string(),
|
||||
})
|
||||
.map(|table| table.table_id())?;
|
||||
|
||||
|
||||
@@ -147,6 +147,20 @@ pub enum Error {
|
||||
source: common_procedure::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start procedure manager"))]
|
||||
StartProcedureManager {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_procedure::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to stop procedure manager"))]
|
||||
StopProcedureManager {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_procedure::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to get procedure output, procedure id: {procedure_id}, error: {err_msg}"
|
||||
))]
|
||||
@@ -638,6 +652,18 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Datanode table info not found, table id: {}, datanode id: {}",
|
||||
table_id,
|
||||
datanode_id
|
||||
))]
|
||||
DatanodeTableInfoNotFound {
|
||||
datanode_id: DatanodeId,
|
||||
table_id: TableId,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -715,7 +741,9 @@ impl ErrorExt for Error {
|
||||
|
||||
SubmitProcedure { source, .. }
|
||||
| QueryProcedure { source, .. }
|
||||
| WaitProcedure { source, .. } => source.status_code(),
|
||||
| WaitProcedure { source, .. }
|
||||
| StartProcedureManager { source, .. }
|
||||
| StopProcedureManager { source, .. } => source.status_code(),
|
||||
RegisterProcedureLoader { source, .. } => source.status_code(),
|
||||
External { source, .. } => source.status_code(),
|
||||
OperateDatanode { source, .. } => source.status_code(),
|
||||
@@ -736,6 +764,7 @@ impl ErrorExt for Error {
|
||||
PostgresExecution { .. } => StatusCode::Internal,
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
ConnectPostgres { .. } => StatusCode::Internal,
|
||||
Error::DatanodeTableInfoNotFound { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -133,7 +133,6 @@ use self::flow::flow_name::FlowNameValue;
|
||||
use self::schema_name::{SchemaManager, SchemaNameKey, SchemaNameValue};
|
||||
use self::table_route::{TableRouteManager, TableRouteValue};
|
||||
use self::tombstone::TombstoneManager;
|
||||
use crate::ddl::utils::region_storage_path;
|
||||
use crate::error::{self, Result, SerdeJsonSnafu};
|
||||
use crate::key::node_address::NodeAddressValue;
|
||||
use crate::key::table_route::TableRouteKey;
|
||||
@@ -593,8 +592,6 @@ impl TableMetadataManager {
|
||||
table_info.meta.region_numbers = region_numbers;
|
||||
let table_id = table_info.ident.table_id;
|
||||
let engine = table_info.meta.engine.clone();
|
||||
let region_storage_path =
|
||||
region_storage_path(&table_info.catalog_name, &table_info.schema_name);
|
||||
|
||||
// Creates table name.
|
||||
let table_name = TableNameKey::new(
|
||||
@@ -606,7 +603,7 @@ impl TableMetadataManager {
|
||||
.table_name_manager()
|
||||
.build_create_txn(&table_name, table_id)?;
|
||||
|
||||
let region_options = (&table_info.meta.options).into();
|
||||
let region_options = table_info.to_region_options();
|
||||
// Creates table info.
|
||||
let table_info_value = TableInfoValue::new(table_info);
|
||||
let (create_table_info_txn, on_create_table_info_failure) = self
|
||||
@@ -625,6 +622,7 @@ impl TableMetadataManager {
|
||||
]);
|
||||
|
||||
if let TableRouteValue::Physical(x) = &table_route_value {
|
||||
let region_storage_path = table_info_value.region_storage_path();
|
||||
let create_datanode_table_txn = self.datanode_table_manager().build_create_txn(
|
||||
table_id,
|
||||
&engine,
|
||||
@@ -926,13 +924,15 @@ impl TableMetadataManager {
|
||||
}
|
||||
|
||||
/// Updates table info and returns an error if different metadata exists.
|
||||
/// And cascade-ly update all redundant table options for each region
|
||||
/// if region_distribution is present.
|
||||
pub async fn update_table_info(
|
||||
&self,
|
||||
current_table_info_value: &DeserializedValueWithBytes<TableInfoValue>,
|
||||
region_distribution: Option<RegionDistribution>,
|
||||
new_table_info: RawTableInfo,
|
||||
) -> Result<()> {
|
||||
let table_id = current_table_info_value.table_info.ident.table_id;
|
||||
|
||||
let new_table_info_value = current_table_info_value.update(new_table_info);
|
||||
|
||||
// Updates table info.
|
||||
@@ -940,8 +940,19 @@ impl TableMetadataManager {
|
||||
.table_info_manager()
|
||||
.build_update_txn(table_id, current_table_info_value, &new_table_info_value)?;
|
||||
|
||||
let mut r = self.kv_backend.txn(update_table_info_txn).await?;
|
||||
let txn = if let Some(region_distribution) = region_distribution {
|
||||
// region options induced from table info.
|
||||
let new_region_options = new_table_info_value.table_info.to_region_options();
|
||||
let update_datanode_table_options_txn = self
|
||||
.datanode_table_manager
|
||||
.build_update_table_options_txn(table_id, region_distribution, new_region_options)
|
||||
.await?;
|
||||
Txn::merge_all([update_table_info_txn, update_datanode_table_options_txn])
|
||||
} else {
|
||||
update_table_info_txn
|
||||
};
|
||||
|
||||
let mut r = self.kv_backend.txn(txn).await?;
|
||||
// Checks whether metadata was already updated.
|
||||
if !r.succeeded {
|
||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
||||
@@ -1669,12 +1680,12 @@ mod tests {
|
||||
DeserializedValueWithBytes::from_inner(TableInfoValue::new(table_info.clone()));
|
||||
// should be ok.
|
||||
table_metadata_manager
|
||||
.update_table_info(¤t_table_info_value, new_table_info.clone())
|
||||
.update_table_info(¤t_table_info_value, None, new_table_info.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
// if table info was updated, it should be ok.
|
||||
table_metadata_manager
|
||||
.update_table_info(¤t_table_info_value, new_table_info.clone())
|
||||
.update_table_info(¤t_table_info_value, None, new_table_info.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -1696,7 +1707,7 @@ mod tests {
|
||||
// if the current_table_info_value is wrong, it should return an error.
|
||||
// The ABA problem.
|
||||
assert!(table_metadata_manager
|
||||
.update_table_info(&wrong_table_info_value, new_table_info)
|
||||
.update_table_info(&wrong_table_info_value, None, new_table_info)
|
||||
.await
|
||||
.is_err())
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ pub struct CatalogNameKey<'a> {
|
||||
pub catalog: &'a str,
|
||||
}
|
||||
|
||||
impl<'a> Default for CatalogNameKey<'a> {
|
||||
impl Default for CatalogNameKey<'_> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
catalog: DEFAULT_CATALOG_NAME,
|
||||
|
||||
@@ -23,7 +23,7 @@ use store_api::storage::RegionNumber;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use super::MetadataKey;
|
||||
use crate::error::{InvalidMetadataSnafu, Result};
|
||||
use crate::error::{DatanodeTableInfoNotFoundSnafu, InvalidMetadataSnafu, Result};
|
||||
use crate::key::{
|
||||
MetadataValue, RegionDistribution, DATANODE_TABLE_KEY_PATTERN, DATANODE_TABLE_KEY_PREFIX,
|
||||
};
|
||||
@@ -77,7 +77,7 @@ impl DatanodeTableKey {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, DatanodeTableKey> for DatanodeTableKey {
|
||||
impl MetadataKey<'_, DatanodeTableKey> for DatanodeTableKey {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.to_string().into_bytes()
|
||||
}
|
||||
@@ -209,6 +209,49 @@ impl DatanodeTableManager {
|
||||
Ok(txn)
|
||||
}
|
||||
|
||||
/// Builds a transaction to updates the redundant table options (including WAL options)
|
||||
/// for given table id, if provided.
|
||||
///
|
||||
/// Note that the provided `new_region_options` must be a
|
||||
/// complete set of all options rather than incremental changes.
|
||||
pub(crate) async fn build_update_table_options_txn(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
region_distribution: RegionDistribution,
|
||||
new_region_options: HashMap<String, String>,
|
||||
) -> Result<Txn> {
|
||||
assert!(!region_distribution.is_empty());
|
||||
// safety: region_distribution must not be empty
|
||||
let (any_datanode, _) = region_distribution.first_key_value().unwrap();
|
||||
|
||||
let mut region_info = self
|
||||
.kv_backend
|
||||
.get(&DatanodeTableKey::new(*any_datanode, table_id).to_bytes())
|
||||
.await
|
||||
.transpose()
|
||||
.context(DatanodeTableInfoNotFoundSnafu {
|
||||
datanode_id: *any_datanode,
|
||||
table_id,
|
||||
})?
|
||||
.and_then(|r| DatanodeTableValue::try_from_raw_value(&r.value))?
|
||||
.region_info;
|
||||
// substitute region options only.
|
||||
region_info.region_options = new_region_options;
|
||||
|
||||
let mut txns = Vec::with_capacity(region_distribution.len());
|
||||
|
||||
for (datanode, regions) in region_distribution.into_iter() {
|
||||
let key = DatanodeTableKey::new(datanode, table_id);
|
||||
let key_bytes = key.to_bytes();
|
||||
let value_bytes = DatanodeTableValue::new(table_id, regions, region_info.clone())
|
||||
.try_as_raw_value()?;
|
||||
txns.push(TxnOp::Put(key_bytes, value_bytes));
|
||||
}
|
||||
|
||||
let txn = Txn::new().and_then(txns);
|
||||
Ok(txn)
|
||||
}
|
||||
|
||||
/// Builds the update datanode table transactions. It only executes while the primary keys comparing successes.
|
||||
pub(crate) fn build_update_txn(
|
||||
&self,
|
||||
|
||||
@@ -42,6 +42,8 @@ lazy_static! {
|
||||
/// The layout: `__flow/info/{flow_id}`.
|
||||
pub struct FlowInfoKey(FlowScoped<FlowInfoKeyInner>);
|
||||
|
||||
pub type FlowInfoDecodeResult = Result<Option<DeserializedValueWithBytes<FlowInfoValue>>>;
|
||||
|
||||
impl<'a> MetadataKey<'a, FlowInfoKey> for FlowInfoKey {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.0.to_bytes()
|
||||
@@ -203,9 +205,7 @@ impl FlowInfoManager {
|
||||
flow_value: &FlowInfoValue,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(
|
||||
&mut TxnOpGetResponseSet,
|
||||
) -> Result<Option<DeserializedValueWithBytes<FlowInfoValue>>>,
|
||||
impl FnOnce(&mut TxnOpGetResponseSet) -> FlowInfoDecodeResult,
|
||||
)> {
|
||||
let key = FlowInfoKey::new(flow_id).to_bytes();
|
||||
let txn = Txn::put_if_not_exists(key.clone(), flow_value.try_as_raw_value()?);
|
||||
|
||||
@@ -46,6 +46,8 @@ lazy_static! {
|
||||
/// The layout: `__flow/name/{catalog_name}/{flow_name}`.
|
||||
pub struct FlowNameKey<'a>(FlowScoped<FlowNameKeyInner<'a>>);
|
||||
|
||||
pub type FlowNameDecodeResult = Result<Option<DeserializedValueWithBytes<FlowNameValue>>>;
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl<'a> FlowNameKey<'a> {
|
||||
/// Returns the [FlowNameKey]
|
||||
@@ -104,7 +106,7 @@ impl<'a> MetadataKey<'a, FlowNameKeyInner<'a>> for FlowNameKeyInner<'_> {
|
||||
.into_bytes()
|
||||
}
|
||||
|
||||
fn from_bytes(bytes: &'a [u8]) -> Result<FlowNameKeyInner> {
|
||||
fn from_bytes(bytes: &'a [u8]) -> Result<FlowNameKeyInner<'a>> {
|
||||
let key = std::str::from_utf8(bytes).map_err(|e| {
|
||||
error::InvalidMetadataSnafu {
|
||||
err_msg: format!(
|
||||
@@ -223,9 +225,7 @@ impl FlowNameManager {
|
||||
flow_id: FlowId,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(
|
||||
&mut TxnOpGetResponseSet,
|
||||
) -> Result<Option<DeserializedValueWithBytes<FlowNameValue>>>,
|
||||
impl FnOnce(&mut TxnOpGetResponseSet) -> FlowNameDecodeResult,
|
||||
)> {
|
||||
let key = FlowNameKey::new(catalog_name, flow_name);
|
||||
let raw_key = key.to_bytes();
|
||||
|
||||
@@ -52,7 +52,7 @@ impl NodeAddressValue {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, NodeAddressKey> for NodeAddressKey {
|
||||
impl MetadataKey<'_, NodeAddressKey> for NodeAddressKey {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.to_string().into_bytes()
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ pub struct SchemaNameKey<'a> {
|
||||
pub schema: &'a str,
|
||||
}
|
||||
|
||||
impl<'a> Default for SchemaNameKey<'a> {
|
||||
impl Default for SchemaNameKey<'_> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
catalog: DEFAULT_CATALOG_NAME,
|
||||
|
||||
@@ -23,6 +23,7 @@ use table::table_name::TableName;
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use super::TABLE_INFO_KEY_PATTERN;
|
||||
use crate::ddl::utils::region_storage_path;
|
||||
use crate::error::{InvalidMetadataSnafu, Result};
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::key::{DeserializedValueWithBytes, MetadataKey, MetadataValue, TABLE_INFO_KEY_PREFIX};
|
||||
@@ -51,7 +52,7 @@ impl Display for TableInfoKey {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, TableInfoKey> for TableInfoKey {
|
||||
impl MetadataKey<'_, TableInfoKey> for TableInfoKey {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.to_string().into_bytes()
|
||||
}
|
||||
@@ -125,6 +126,11 @@ impl TableInfoValue {
|
||||
table_name: self.table_info.name.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds storage path for all regions in table.
|
||||
pub fn region_storage_path(&self) -> String {
|
||||
region_storage_path(&self.table_info.catalog_name, &self.table_info.schema_name)
|
||||
}
|
||||
}
|
||||
|
||||
pub type TableInfoManagerRef = Arc<TableInfoManager>;
|
||||
@@ -132,6 +138,7 @@ pub type TableInfoManagerRef = Arc<TableInfoManager>;
|
||||
pub struct TableInfoManager {
|
||||
kv_backend: KvBackendRef,
|
||||
}
|
||||
pub type TableInfoDecodeResult = Result<Option<DeserializedValueWithBytes<TableInfoValue>>>;
|
||||
|
||||
impl TableInfoManager {
|
||||
pub fn new(kv_backend: KvBackendRef) -> Self {
|
||||
@@ -145,9 +152,7 @@ impl TableInfoManager {
|
||||
table_info_value: &TableInfoValue,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(
|
||||
&mut TxnOpGetResponseSet,
|
||||
) -> Result<Option<DeserializedValueWithBytes<TableInfoValue>>>,
|
||||
impl FnOnce(&mut TxnOpGetResponseSet) -> TableInfoDecodeResult,
|
||||
)> {
|
||||
let key = TableInfoKey::new(table_id);
|
||||
let raw_key = key.to_bytes();
|
||||
@@ -169,9 +174,7 @@ impl TableInfoManager {
|
||||
new_table_info_value: &TableInfoValue,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(
|
||||
&mut TxnOpGetResponseSet,
|
||||
) -> Result<Option<DeserializedValueWithBytes<TableInfoValue>>>,
|
||||
impl FnOnce(&mut TxnOpGetResponseSet) -> TableInfoDecodeResult,
|
||||
)> {
|
||||
let key = TableInfoKey::new(table_id);
|
||||
let raw_key = key.to_bytes();
|
||||
|
||||
@@ -21,6 +21,7 @@ use serde::{Deserialize, Serialize};
|
||||
use snafu::OptionExt;
|
||||
use table::metadata::TableId;
|
||||
use table::table_name::TableName;
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use super::{MetadataKey, MetadataValue, TABLE_NAME_KEY_PATTERN, TABLE_NAME_KEY_PREFIX};
|
||||
use crate::error::{Error, InvalidMetadataSnafu, Result};
|
||||
@@ -122,6 +123,16 @@ impl From<TableNameKey<'_>> for TableName {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<TableNameKey<'a>> for TableReference<'a> {
|
||||
fn from(value: TableNameKey<'a>) -> Self {
|
||||
Self {
|
||||
catalog: value.catalog,
|
||||
schema: value.schema,
|
||||
table: value.table,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> TryFrom<&'a str> for TableNameKey<'a> {
|
||||
type Error = Error;
|
||||
|
||||
|
||||
@@ -245,7 +245,7 @@ impl LogicalTableRouteValue {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, TableRouteKey> for TableRouteKey {
|
||||
impl MetadataKey<'_, TableRouteKey> for TableRouteKey {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.to_string().into_bytes()
|
||||
}
|
||||
@@ -472,6 +472,8 @@ pub struct TableRouteStorage {
|
||||
kv_backend: KvBackendRef,
|
||||
}
|
||||
|
||||
pub type TableRouteValueDecodeResult = Result<Option<DeserializedValueWithBytes<TableRouteValue>>>;
|
||||
|
||||
impl TableRouteStorage {
|
||||
pub fn new(kv_backend: KvBackendRef) -> Self {
|
||||
Self { kv_backend }
|
||||
@@ -485,9 +487,7 @@ impl TableRouteStorage {
|
||||
table_route_value: &TableRouteValue,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(
|
||||
&mut TxnOpGetResponseSet,
|
||||
) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>,
|
||||
impl FnOnce(&mut TxnOpGetResponseSet) -> TableRouteValueDecodeResult,
|
||||
)> {
|
||||
let key = TableRouteKey::new(table_id);
|
||||
let raw_key = key.to_bytes();
|
||||
@@ -510,9 +510,7 @@ impl TableRouteStorage {
|
||||
new_table_route_value: &TableRouteValue,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(
|
||||
&mut TxnOpGetResponseSet,
|
||||
) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>,
|
||||
impl FnOnce(&mut TxnOpGetResponseSet) -> TableRouteValueDecodeResult,
|
||||
)> {
|
||||
let key = TableRouteKey::new(table_id);
|
||||
let raw_key = key.to_bytes();
|
||||
|
||||
@@ -53,7 +53,7 @@ impl Display for ViewInfoKey {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataKey<'a, ViewInfoKey> for ViewInfoKey {
|
||||
impl MetadataKey<'_, ViewInfoKey> for ViewInfoKey {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.to_string().into_bytes()
|
||||
}
|
||||
@@ -139,6 +139,8 @@ pub struct ViewInfoManager {
|
||||
|
||||
pub type ViewInfoManagerRef = Arc<ViewInfoManager>;
|
||||
|
||||
pub type ViewInfoValueDecodeResult = Result<Option<DeserializedValueWithBytes<ViewInfoValue>>>;
|
||||
|
||||
impl ViewInfoManager {
|
||||
pub fn new(kv_backend: KvBackendRef) -> Self {
|
||||
Self { kv_backend }
|
||||
@@ -151,9 +153,7 @@ impl ViewInfoManager {
|
||||
view_info_value: &ViewInfoValue,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(
|
||||
&mut TxnOpGetResponseSet,
|
||||
) -> Result<Option<DeserializedValueWithBytes<ViewInfoValue>>>,
|
||||
impl FnOnce(&mut TxnOpGetResponseSet) -> ViewInfoValueDecodeResult,
|
||||
)> {
|
||||
let key = ViewInfoKey::new(view_id);
|
||||
let raw_key = key.to_bytes();
|
||||
@@ -175,9 +175,7 @@ impl ViewInfoManager {
|
||||
new_view_info_value: &ViewInfoValue,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(
|
||||
&mut TxnOpGetResponseSet,
|
||||
) -> Result<Option<DeserializedValueWithBytes<ViewInfoValue>>>,
|
||||
impl FnOnce(&mut TxnOpGetResponseSet) -> ViewInfoValueDecodeResult,
|
||||
)> {
|
||||
let key = ViewInfoKey::new(view_id);
|
||||
let raw_key = key.to_bytes();
|
||||
|
||||
183
src/common/meta/src/leadership_notifier.rs
Normal file
183
src/common/meta/src/leadership_notifier.rs
Normal file
@@ -0,0 +1,183 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_telemetry::{error, info};
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
pub type LeadershipChangeNotifierCustomizerRef = Arc<dyn LeadershipChangeNotifierCustomizer>;
|
||||
|
||||
/// A trait for customizing the leadership change notifier.
|
||||
pub trait LeadershipChangeNotifierCustomizer: Send + Sync {
|
||||
fn customize(&self, notifier: &mut LeadershipChangeNotifier);
|
||||
|
||||
fn add_listener(&self, listener: Arc<dyn LeadershipChangeListener>);
|
||||
}
|
||||
|
||||
/// A trait for handling leadership change events in a distributed system.
|
||||
#[async_trait]
|
||||
pub trait LeadershipChangeListener: Send + Sync {
|
||||
/// Returns the listener name.
|
||||
fn name(&self) -> &str;
|
||||
|
||||
/// Called when the node transitions to the leader role.
|
||||
async fn on_leader_start(&self) -> Result<()>;
|
||||
|
||||
/// Called when the node transitions to the follower role.
|
||||
async fn on_leader_stop(&self) -> Result<()>;
|
||||
}
|
||||
|
||||
/// A notifier for leadership change events.
|
||||
#[derive(Default)]
|
||||
pub struct LeadershipChangeNotifier {
|
||||
listeners: Vec<Arc<dyn LeadershipChangeListener>>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct DefaultLeadershipChangeNotifierCustomizer {
|
||||
listeners: Mutex<Vec<Arc<dyn LeadershipChangeListener>>>,
|
||||
}
|
||||
|
||||
impl DefaultLeadershipChangeNotifierCustomizer {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
listeners: Mutex::new(Vec::new()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl LeadershipChangeNotifierCustomizer for DefaultLeadershipChangeNotifierCustomizer {
|
||||
fn customize(&self, notifier: &mut LeadershipChangeNotifier) {
|
||||
info!("Customizing leadership change notifier");
|
||||
let listeners = self.listeners.lock().unwrap().clone();
|
||||
notifier.listeners.extend(listeners);
|
||||
}
|
||||
|
||||
fn add_listener(&self, listener: Arc<dyn LeadershipChangeListener>) {
|
||||
self.listeners.lock().unwrap().push(listener);
|
||||
}
|
||||
}
|
||||
|
||||
impl LeadershipChangeNotifier {
|
||||
/// Adds a listener to the notifier.
|
||||
pub fn add_listener(&mut self, listener: Arc<dyn LeadershipChangeListener>) {
|
||||
self.listeners.push(listener);
|
||||
}
|
||||
|
||||
/// Notify all listeners that the node has become a leader.
|
||||
pub async fn notify_on_leader_start(&self) {
|
||||
for listener in &self.listeners {
|
||||
if let Err(err) = listener.on_leader_start().await {
|
||||
error!(
|
||||
err;
|
||||
"Failed to notify listener: {}, event 'on_leader_start'",
|
||||
listener.name()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Notify all listeners that the node has become a follower.
|
||||
pub async fn notify_on_leader_stop(&self) {
|
||||
for listener in &self.listeners {
|
||||
if let Err(err) = listener.on_leader_stop().await {
|
||||
error!(
|
||||
err;
|
||||
"Failed to notify listener: {}, event: 'on_follower_start'",
|
||||
listener.name()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::*;
|
||||
|
||||
struct MockListener {
|
||||
name: String,
|
||||
on_leader_start_fn: Option<Box<dyn Fn() -> Result<()> + Send + Sync>>,
|
||||
on_follower_start_fn: Option<Box<dyn Fn() -> Result<()> + Send + Sync>>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl LeadershipChangeListener for MockListener {
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
async fn on_leader_start(&self) -> Result<()> {
|
||||
if let Some(f) = &self.on_leader_start_fn {
|
||||
return f();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn on_leader_stop(&self) -> Result<()> {
|
||||
if let Some(f) = &self.on_follower_start_fn {
|
||||
return f();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_leadership_change_notifier() {
|
||||
let mut notifier = LeadershipChangeNotifier::default();
|
||||
let listener1 = Arc::new(MockListener {
|
||||
name: "listener1".to_string(),
|
||||
on_leader_start_fn: None,
|
||||
on_follower_start_fn: None,
|
||||
});
|
||||
let called_on_leader_start = Arc::new(AtomicBool::new(false));
|
||||
let called_on_follower_start = Arc::new(AtomicBool::new(false));
|
||||
let called_on_leader_start_moved = called_on_leader_start.clone();
|
||||
let called_on_follower_start_moved = called_on_follower_start.clone();
|
||||
let listener2 = Arc::new(MockListener {
|
||||
name: "listener2".to_string(),
|
||||
on_leader_start_fn: Some(Box::new(move || {
|
||||
called_on_leader_start_moved.store(true, Ordering::Relaxed);
|
||||
Ok(())
|
||||
})),
|
||||
on_follower_start_fn: Some(Box::new(move || {
|
||||
called_on_follower_start_moved.store(true, Ordering::Relaxed);
|
||||
Ok(())
|
||||
})),
|
||||
});
|
||||
|
||||
notifier.add_listener(listener1);
|
||||
notifier.add_listener(listener2);
|
||||
|
||||
let listener1 = notifier.listeners.first().unwrap();
|
||||
let listener2 = notifier.listeners.get(1).unwrap();
|
||||
|
||||
assert_eq!(listener1.name(), "listener1");
|
||||
assert_eq!(listener2.name(), "listener2");
|
||||
|
||||
notifier.notify_on_leader_start().await;
|
||||
assert!(!called_on_follower_start.load(Ordering::Relaxed));
|
||||
assert!(called_on_leader_start.load(Ordering::Relaxed));
|
||||
|
||||
notifier.notify_on_leader_stop().await;
|
||||
assert!(called_on_follower_start.load(Ordering::Relaxed));
|
||||
assert!(called_on_leader_start.load(Ordering::Relaxed));
|
||||
}
|
||||
}
|
||||
@@ -32,6 +32,7 @@ pub mod heartbeat;
|
||||
pub mod instruction;
|
||||
pub mod key;
|
||||
pub mod kv_backend;
|
||||
pub mod leadership_notifier;
|
||||
pub mod lock_key;
|
||||
pub mod metrics;
|
||||
pub mod node_manager;
|
||||
|
||||
@@ -34,7 +34,7 @@ pub enum CatalogLock<'a> {
|
||||
Write(&'a str),
|
||||
}
|
||||
|
||||
impl<'a> Display for CatalogLock<'a> {
|
||||
impl Display for CatalogLock<'_> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let key = match self {
|
||||
CatalogLock::Read(s) => s,
|
||||
@@ -44,7 +44,7 @@ impl<'a> Display for CatalogLock<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<CatalogLock<'a>> for StringKey {
|
||||
impl From<CatalogLock<'_>> for StringKey {
|
||||
fn from(value: CatalogLock) -> Self {
|
||||
match value {
|
||||
CatalogLock::Write(_) => StringKey::Exclusive(value.to_string()),
|
||||
|
||||
@@ -289,6 +289,7 @@ pub enum LeaderState {
|
||||
///
|
||||
/// - The [`Region`] may be unavailable (e.g., Crashed, Network disconnected).
|
||||
/// - The [`Region`] was planned to migrate to another [`Peer`].
|
||||
#[serde(alias = "Downgraded")]
|
||||
Downgrading,
|
||||
}
|
||||
|
||||
@@ -516,6 +517,73 @@ mod tests {
|
||||
assert_eq!(decoded, region_route);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_region_route_compatibility() {
|
||||
let region_route = RegionRoute {
|
||||
region: Region {
|
||||
id: 2.into(),
|
||||
name: "r2".to_string(),
|
||||
partition: None,
|
||||
attrs: BTreeMap::new(),
|
||||
},
|
||||
leader_peer: Some(Peer::new(1, "a1")),
|
||||
follower_peers: vec![Peer::new(2, "a2"), Peer::new(3, "a3")],
|
||||
leader_state: Some(LeaderState::Downgrading),
|
||||
leader_down_since: None,
|
||||
};
|
||||
let input = r#"{"region":{"id":2,"name":"r2","partition":null,"attrs":{}},"leader_peer":{"id":1,"addr":"a1"},"follower_peers":[{"id":2,"addr":"a2"},{"id":3,"addr":"a3"}],"leader_state":"Downgraded","leader_down_since":null}"#;
|
||||
let decoded: RegionRoute = serde_json::from_str(input).unwrap();
|
||||
assert_eq!(decoded, region_route);
|
||||
|
||||
let region_route = RegionRoute {
|
||||
region: Region {
|
||||
id: 2.into(),
|
||||
name: "r2".to_string(),
|
||||
partition: None,
|
||||
attrs: BTreeMap::new(),
|
||||
},
|
||||
leader_peer: Some(Peer::new(1, "a1")),
|
||||
follower_peers: vec![Peer::new(2, "a2"), Peer::new(3, "a3")],
|
||||
leader_state: Some(LeaderState::Downgrading),
|
||||
leader_down_since: None,
|
||||
};
|
||||
let input = r#"{"region":{"id":2,"name":"r2","partition":null,"attrs":{}},"leader_peer":{"id":1,"addr":"a1"},"follower_peers":[{"id":2,"addr":"a2"},{"id":3,"addr":"a3"}],"leader_status":"Downgraded","leader_down_since":null}"#;
|
||||
let decoded: RegionRoute = serde_json::from_str(input).unwrap();
|
||||
assert_eq!(decoded, region_route);
|
||||
|
||||
let region_route = RegionRoute {
|
||||
region: Region {
|
||||
id: 2.into(),
|
||||
name: "r2".to_string(),
|
||||
partition: None,
|
||||
attrs: BTreeMap::new(),
|
||||
},
|
||||
leader_peer: Some(Peer::new(1, "a1")),
|
||||
follower_peers: vec![Peer::new(2, "a2"), Peer::new(3, "a3")],
|
||||
leader_state: Some(LeaderState::Downgrading),
|
||||
leader_down_since: None,
|
||||
};
|
||||
let input = r#"{"region":{"id":2,"name":"r2","partition":null,"attrs":{}},"leader_peer":{"id":1,"addr":"a1"},"follower_peers":[{"id":2,"addr":"a2"},{"id":3,"addr":"a3"}],"leader_state":"Downgrading","leader_down_since":null}"#;
|
||||
let decoded: RegionRoute = serde_json::from_str(input).unwrap();
|
||||
assert_eq!(decoded, region_route);
|
||||
|
||||
let region_route = RegionRoute {
|
||||
region: Region {
|
||||
id: 2.into(),
|
||||
name: "r2".to_string(),
|
||||
partition: None,
|
||||
attrs: BTreeMap::new(),
|
||||
},
|
||||
leader_peer: Some(Peer::new(1, "a1")),
|
||||
follower_peers: vec![Peer::new(2, "a2"), Peer::new(3, "a3")],
|
||||
leader_state: Some(LeaderState::Downgrading),
|
||||
leader_down_since: None,
|
||||
};
|
||||
let input = r#"{"region":{"id":2,"name":"r2","partition":null,"attrs":{}},"leader_peer":{"id":1,"addr":"a1"},"follower_peers":[{"id":2,"addr":"a2"},{"id":3,"addr":"a3"}],"leader_status":"Downgrading","leader_down_since":null}"#;
|
||||
let decoded: RegionRoute = serde_json::from_str(input).unwrap();
|
||||
assert_eq!(decoded, region_route);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_de_serialize_partition() {
|
||||
let p = Partition {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user