mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-06 13:22:57 +00:00
Compare commits
35 Commits
refactor/a
...
v0.17.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4bb9ceb63b | ||
|
|
38456638f8 | ||
|
|
97c0b1f5c1 | ||
|
|
4fc7f12360 | ||
|
|
ed17997449 | ||
|
|
849ae8ebb6 | ||
|
|
a0587e2e87 | ||
|
|
1ed71169ac | ||
|
|
e62f0e2b64 | ||
|
|
f92e753a34 | ||
|
|
a22b016f90 | ||
|
|
7a9fa99069 | ||
|
|
d808e7be7e | ||
|
|
8e22fcfd5c | ||
|
|
26729c31a6 | ||
|
|
b73617eaba | ||
|
|
3b909f63e3 | ||
|
|
0d4e07eddd | ||
|
|
b94ce9019d | ||
|
|
3dcd40c4ba | ||
|
|
a67803d0e9 | ||
|
|
aa7e7942f8 | ||
|
|
f1b7581dc3 | ||
|
|
cd761df369 | ||
|
|
0cea6ae64d | ||
|
|
8bf772fb50 | ||
|
|
9c1240921d | ||
|
|
eb52129a91 | ||
|
|
a0a2b40cbe | ||
|
|
067c4458d6 | ||
|
|
4e9c31bf5c | ||
|
|
9320a6ddaa | ||
|
|
4c9fcb7dee | ||
|
|
9dc16772fe | ||
|
|
6ee91f6af4 |
@@ -2,7 +2,7 @@
|
|||||||
linker = "aarch64-linux-gnu-gcc"
|
linker = "aarch64-linux-gnu-gcc"
|
||||||
|
|
||||||
[alias]
|
[alias]
|
||||||
sqlness = "run --bin sqlness-runner --target-dir target/sqlness --"
|
sqlness = "run --bin sqlness-runner --"
|
||||||
|
|
||||||
[unstable.git]
|
[unstable.git]
|
||||||
shallow_index = true
|
shallow_index = true
|
||||||
|
|||||||
6
.github/workflows/semantic-pull-request.yml
vendored
6
.github/workflows/semantic-pull-request.yml
vendored
@@ -1,7 +1,7 @@
|
|||||||
name: "Semantic Pull Request"
|
name: "Semantic Pull Request"
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request_target:
|
||||||
types:
|
types:
|
||||||
- opened
|
- opened
|
||||||
- reopened
|
- reopened
|
||||||
@@ -12,9 +12,9 @@ concurrency:
|
|||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
issues: write
|
contents: read
|
||||||
contents: write
|
|
||||||
pull-requests: write
|
pull-requests: write
|
||||||
|
issues: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check:
|
check:
|
||||||
|
|||||||
171
Cargo.lock
generated
171
Cargo.lock
generated
@@ -218,7 +218,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "api"
|
name = "api"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"common-base",
|
"common-base",
|
||||||
"common-decimal",
|
"common-decimal",
|
||||||
@@ -737,7 +737,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "auth"
|
name = "auth"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -1387,7 +1387,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cache"
|
name = "cache"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"catalog",
|
"catalog",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -1422,7 +1422,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "catalog"
|
name = "catalog"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arrow",
|
"arrow",
|
||||||
@@ -1447,7 +1447,6 @@ dependencies = [
|
|||||||
"common-telemetry",
|
"common-telemetry",
|
||||||
"common-time",
|
"common-time",
|
||||||
"common-version",
|
"common-version",
|
||||||
"common-workload",
|
|
||||||
"dashmap",
|
"dashmap",
|
||||||
"datafusion",
|
"datafusion",
|
||||||
"datatypes",
|
"datatypes",
|
||||||
@@ -1465,7 +1464,6 @@ dependencies = [
|
|||||||
"promql-parser",
|
"promql-parser",
|
||||||
"rand 0.9.1",
|
"rand 0.9.1",
|
||||||
"rustc-hash 2.1.1",
|
"rustc-hash 2.1.1",
|
||||||
"serde",
|
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"session",
|
"session",
|
||||||
"snafu 0.8.6",
|
"snafu 0.8.6",
|
||||||
@@ -1765,7 +1763,7 @@ checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cli"
|
name = "cli"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-stream",
|
"async-stream",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -1809,7 +1807,7 @@ dependencies = [
|
|||||||
"session",
|
"session",
|
||||||
"snafu 0.8.6",
|
"snafu 0.8.6",
|
||||||
"store-api",
|
"store-api",
|
||||||
"substrait 0.18.0",
|
"substrait 0.17.2",
|
||||||
"table",
|
"table",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
"tokio",
|
"tokio",
|
||||||
@@ -1818,7 +1816,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "client"
|
name = "client"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
@@ -1850,7 +1848,7 @@ dependencies = [
|
|||||||
"serde_json",
|
"serde_json",
|
||||||
"snafu 0.8.6",
|
"snafu 0.8.6",
|
||||||
"store-api",
|
"store-api",
|
||||||
"substrait 0.18.0",
|
"substrait 0.17.2",
|
||||||
"substrait 0.37.3",
|
"substrait 0.37.3",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-stream",
|
"tokio-stream",
|
||||||
@@ -1891,7 +1889,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cmd"
|
name = "cmd"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"auth",
|
"auth",
|
||||||
@@ -1953,7 +1951,7 @@ dependencies = [
|
|||||||
"snafu 0.8.6",
|
"snafu 0.8.6",
|
||||||
"stat",
|
"stat",
|
||||||
"store-api",
|
"store-api",
|
||||||
"substrait 0.18.0",
|
"substrait 0.17.2",
|
||||||
"table",
|
"table",
|
||||||
"temp-env",
|
"temp-env",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
@@ -1999,7 +1997,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-base"
|
name = "common-base"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anymap2",
|
"anymap2",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -2021,11 +2019,11 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-catalog"
|
name = "common-catalog"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-config"
|
name = "common-config"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"common-base",
|
"common-base",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -2051,7 +2049,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-datasource"
|
name = "common-datasource"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow",
|
"arrow",
|
||||||
"arrow-schema",
|
"arrow-schema",
|
||||||
@@ -2086,7 +2084,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-decimal"
|
name = "common-decimal"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bigdecimal 0.4.8",
|
"bigdecimal 0.4.8",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -2099,7 +2097,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-error"
|
name = "common-error"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"common-macro",
|
"common-macro",
|
||||||
"http 1.3.1",
|
"http 1.3.1",
|
||||||
@@ -2110,7 +2108,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-event-recorder"
|
name = "common-event-recorder"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -2132,7 +2130,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-frontend"
|
name = "common-frontend"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -2154,7 +2152,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-function"
|
name = "common-function"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.12",
|
"ahash 0.8.12",
|
||||||
"api",
|
"api",
|
||||||
@@ -2194,6 +2192,7 @@ dependencies = [
|
|||||||
"nalgebra",
|
"nalgebra",
|
||||||
"num",
|
"num",
|
||||||
"num-traits",
|
"num-traits",
|
||||||
|
"once_cell",
|
||||||
"paste",
|
"paste",
|
||||||
"pretty_assertions",
|
"pretty_assertions",
|
||||||
"s2",
|
"s2",
|
||||||
@@ -2211,7 +2210,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-greptimedb-telemetry"
|
name = "common-greptimedb-telemetry"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"common-runtime",
|
"common-runtime",
|
||||||
@@ -2228,7 +2227,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-grpc"
|
name = "common-grpc"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arrow-flight",
|
"arrow-flight",
|
||||||
@@ -2261,7 +2260,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-grpc-expr"
|
name = "common-grpc-expr"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"common-base",
|
"common-base",
|
||||||
@@ -2281,7 +2280,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-macro"
|
name = "common-macro"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"greptime-proto",
|
"greptime-proto",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
@@ -2292,7 +2291,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-mem-prof"
|
name = "common-mem-prof"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -2308,7 +2307,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-meta"
|
name = "common-meta"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anymap2",
|
"anymap2",
|
||||||
"api",
|
"api",
|
||||||
@@ -2380,7 +2379,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-options"
|
name = "common-options"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"common-grpc",
|
"common-grpc",
|
||||||
"humantime-serde",
|
"humantime-serde",
|
||||||
@@ -2389,11 +2388,11 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-plugins"
|
name = "common-plugins"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-pprof"
|
name = "common-pprof"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"common-error",
|
"common-error",
|
||||||
"common-macro",
|
"common-macro",
|
||||||
@@ -2405,7 +2404,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-procedure"
|
name = "common-procedure"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-stream",
|
"async-stream",
|
||||||
@@ -2434,7 +2433,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-procedure-test"
|
name = "common-procedure-test"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"common-procedure",
|
"common-procedure",
|
||||||
@@ -2444,7 +2443,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-query"
|
name = "common-query"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -2469,14 +2468,12 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-recordbatch"
|
name = "common-recordbatch"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"common-base",
|
|
||||||
"common-error",
|
"common-error",
|
||||||
"common-macro",
|
"common-macro",
|
||||||
"common-telemetry",
|
"common-telemetry",
|
||||||
"common-time",
|
|
||||||
"datafusion",
|
"datafusion",
|
||||||
"datafusion-common",
|
"datafusion-common",
|
||||||
"datatypes",
|
"datatypes",
|
||||||
@@ -2492,7 +2489,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-runtime"
|
name = "common-runtime"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"clap 4.5.40",
|
"clap 4.5.40",
|
||||||
@@ -2521,7 +2518,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-session"
|
name = "common-session"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"serde",
|
"serde",
|
||||||
"strum 0.27.1",
|
"strum 0.27.1",
|
||||||
@@ -2529,7 +2526,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-sql"
|
name = "common-sql"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"common-base",
|
"common-base",
|
||||||
"common-decimal",
|
"common-decimal",
|
||||||
@@ -2547,7 +2544,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-telemetry"
|
name = "common-telemetry"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"backtrace",
|
"backtrace",
|
||||||
"common-base",
|
"common-base",
|
||||||
@@ -2576,7 +2573,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-test-util"
|
name = "common-test-util"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"client",
|
"client",
|
||||||
"common-grpc",
|
"common-grpc",
|
||||||
@@ -2589,7 +2586,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-time"
|
name = "common-time"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow",
|
"arrow",
|
||||||
"chrono",
|
"chrono",
|
||||||
@@ -2607,7 +2604,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-version"
|
name = "common-version"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"build-data",
|
"build-data",
|
||||||
"cargo-manifest",
|
"cargo-manifest",
|
||||||
@@ -2618,7 +2615,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-wal"
|
name = "common-wal"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"common-base",
|
"common-base",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -2641,7 +2638,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-workload"
|
name = "common-workload"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"common-telemetry",
|
"common-telemetry",
|
||||||
"serde",
|
"serde",
|
||||||
@@ -3868,7 +3865,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "datanode"
|
name = "datanode"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arrow-flight",
|
"arrow-flight",
|
||||||
@@ -3921,7 +3918,7 @@ dependencies = [
|
|||||||
"session",
|
"session",
|
||||||
"snafu 0.8.6",
|
"snafu 0.8.6",
|
||||||
"store-api",
|
"store-api",
|
||||||
"substrait 0.18.0",
|
"substrait 0.17.2",
|
||||||
"table",
|
"table",
|
||||||
"tokio",
|
"tokio",
|
||||||
"toml 0.8.23",
|
"toml 0.8.23",
|
||||||
@@ -3931,7 +3928,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "datatypes"
|
name = "datatypes"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow",
|
"arrow",
|
||||||
"arrow-array",
|
"arrow-array",
|
||||||
@@ -4605,7 +4602,7 @@ checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "file-engine"
|
name = "file-engine"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -4737,7 +4734,7 @@ checksum = "8bf7cc16383c4b8d58b9905a8509f02926ce3058053c056376248d958c9df1e8"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "flow"
|
name = "flow"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arrow",
|
"arrow",
|
||||||
@@ -4804,7 +4801,7 @@ dependencies = [
|
|||||||
"sql",
|
"sql",
|
||||||
"store-api",
|
"store-api",
|
||||||
"strum 0.27.1",
|
"strum 0.27.1",
|
||||||
"substrait 0.18.0",
|
"substrait 0.17.2",
|
||||||
"table",
|
"table",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tonic 0.13.1",
|
"tonic 0.13.1",
|
||||||
@@ -4859,7 +4856,7 @@ checksum = "28dd6caf6059519a65843af8fe2a3ae298b14b80179855aeb4adc2c1934ee619"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "frontend"
|
name = "frontend"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
@@ -4922,7 +4919,7 @@ dependencies = [
|
|||||||
"sqlparser 0.55.0-greptime",
|
"sqlparser 0.55.0-greptime",
|
||||||
"store-api",
|
"store-api",
|
||||||
"strfmt",
|
"strfmt",
|
||||||
"substrait 0.18.0",
|
"substrait 0.17.2",
|
||||||
"table",
|
"table",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-util",
|
"tokio-util",
|
||||||
@@ -5302,7 +5299,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "greptime-proto"
|
name = "greptime-proto"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=f9836cf8aab30e672f640c6ef4c1cfd2cf9fbc36#f9836cf8aab30e672f640c6ef4c1cfd2cf9fbc36"
|
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=66eb089afa6baaa3ddfafabd0a4abbe317d012c3#66eb089afa6baaa3ddfafabd0a4abbe317d012c3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"prost 0.13.5",
|
"prost 0.13.5",
|
||||||
"prost-types 0.13.5",
|
"prost-types 0.13.5",
|
||||||
@@ -6064,7 +6061,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "index"
|
name = "index"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"asynchronous-codec",
|
"asynchronous-codec",
|
||||||
@@ -7004,7 +7001,7 @@ checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "log-query"
|
name = "log-query"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"chrono",
|
"chrono",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -7016,7 +7013,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "log-store"
|
name = "log-store"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-stream",
|
"async-stream",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -7287,8 +7284,7 @@ checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0"
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "memcomparable"
|
name = "memcomparable"
|
||||||
version = "0.2.0"
|
version = "0.2.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "git+https://github.com/v0y4g3r/memcomparable.git?rev=a07122dc03556bbd88ad66234cbea7efd3b23efb#a07122dc03556bbd88ad66234cbea7efd3b23efb"
|
||||||
checksum = "376101dbd964fc502d5902216e180f92b3d003b5cc3d2e40e044eb5470fca677"
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"serde",
|
"serde",
|
||||||
@@ -7324,7 +7320,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "meta-client"
|
name = "meta-client"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -7352,7 +7348,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "meta-srv"
|
name = "meta-srv"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -7448,7 +7444,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "metric-engine"
|
name = "metric-engine"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"aquamarine",
|
"aquamarine",
|
||||||
@@ -7541,7 +7537,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "mito-codec"
|
name = "mito-codec"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"bytes",
|
"bytes",
|
||||||
@@ -7565,7 +7561,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "mito2"
|
name = "mito2"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"aquamarine",
|
"aquamarine",
|
||||||
@@ -7594,7 +7590,6 @@ dependencies = [
|
|||||||
"crc32fast",
|
"crc32fast",
|
||||||
"criterion 0.4.0",
|
"criterion 0.4.0",
|
||||||
"crossbeam-utils",
|
"crossbeam-utils",
|
||||||
"dashmap",
|
|
||||||
"datafusion",
|
"datafusion",
|
||||||
"datafusion-common",
|
"datafusion-common",
|
||||||
"datafusion-expr",
|
"datafusion-expr",
|
||||||
@@ -7607,12 +7602,10 @@ dependencies = [
|
|||||||
"itertools 0.14.0",
|
"itertools 0.14.0",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"log-store",
|
"log-store",
|
||||||
"memcomparable",
|
|
||||||
"mito-codec",
|
"mito-codec",
|
||||||
"moka",
|
"moka",
|
||||||
"object-store",
|
"object-store",
|
||||||
"parquet",
|
"parquet",
|
||||||
"partition",
|
|
||||||
"paste",
|
"paste",
|
||||||
"pin-project",
|
"pin-project",
|
||||||
"prometheus",
|
"prometheus",
|
||||||
@@ -8302,7 +8295,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "object-store"
|
name = "object-store"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"bytes",
|
"bytes",
|
||||||
@@ -8587,7 +8580,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "operator"
|
name = "operator"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.12",
|
"ahash 0.8.12",
|
||||||
"api",
|
"api",
|
||||||
@@ -8645,7 +8638,7 @@ dependencies = [
|
|||||||
"sql",
|
"sql",
|
||||||
"sqlparser 0.55.0-greptime",
|
"sqlparser 0.55.0-greptime",
|
||||||
"store-api",
|
"store-api",
|
||||||
"substrait 0.18.0",
|
"substrait 0.17.2",
|
||||||
"table",
|
"table",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-util",
|
"tokio-util",
|
||||||
@@ -8957,7 +8950,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "partition"
|
name = "partition"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -9296,7 +9289,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pipeline"
|
name = "pipeline"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.12",
|
"ahash 0.8.12",
|
||||||
"api",
|
"api",
|
||||||
@@ -9452,7 +9445,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "plugins"
|
name = "plugins"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"auth",
|
"auth",
|
||||||
"clap 4.5.40",
|
"clap 4.5.40",
|
||||||
@@ -9750,7 +9743,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "promql"
|
name = "promql"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.12",
|
"ahash 0.8.12",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -10033,7 +10026,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "puffin"
|
name = "puffin"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-compression 0.4.19",
|
"async-compression 0.4.19",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -10075,7 +10068,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "query"
|
name = "query"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.12",
|
"ahash 0.8.12",
|
||||||
"api",
|
"api",
|
||||||
@@ -10140,7 +10133,7 @@ dependencies = [
|
|||||||
"sql",
|
"sql",
|
||||||
"sqlparser 0.55.0-greptime",
|
"sqlparser 0.55.0-greptime",
|
||||||
"store-api",
|
"store-api",
|
||||||
"substrait 0.18.0",
|
"substrait 0.17.2",
|
||||||
"table",
|
"table",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-stream",
|
"tokio-stream",
|
||||||
@@ -11504,7 +11497,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "servers"
|
name = "servers"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.12",
|
"ahash 0.8.12",
|
||||||
"api",
|
"api",
|
||||||
@@ -11575,7 +11568,6 @@ dependencies = [
|
|||||||
"openmetrics-parser",
|
"openmetrics-parser",
|
||||||
"opensrv-mysql",
|
"opensrv-mysql",
|
||||||
"opentelemetry-proto",
|
"opentelemetry-proto",
|
||||||
"operator",
|
|
||||||
"otel-arrow-rust",
|
"otel-arrow-rust",
|
||||||
"parking_lot 0.12.4",
|
"parking_lot 0.12.4",
|
||||||
"permutation",
|
"permutation",
|
||||||
@@ -11628,7 +11620,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "session"
|
name = "session"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.12",
|
"ahash 0.8.12",
|
||||||
"api",
|
"api",
|
||||||
@@ -11956,7 +11948,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "sql"
|
name = "sql"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arrow-buffer",
|
"arrow-buffer",
|
||||||
@@ -12014,7 +12006,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "sqlness-runner"
|
name = "sqlness-runner"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"clap 4.5.40",
|
"clap 4.5.40",
|
||||||
@@ -12314,7 +12306,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "stat"
|
name = "stat"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"nix 0.30.1",
|
"nix 0.30.1",
|
||||||
]
|
]
|
||||||
@@ -12327,7 +12319,7 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "store-api"
|
name = "store-api"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"aquamarine",
|
"aquamarine",
|
||||||
@@ -12474,11 +12466,12 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "substrait"
|
name = "substrait"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"bytes",
|
"bytes",
|
||||||
"common-error",
|
"common-error",
|
||||||
|
"common-function",
|
||||||
"common-macro",
|
"common-macro",
|
||||||
"common-telemetry",
|
"common-telemetry",
|
||||||
"datafusion",
|
"datafusion",
|
||||||
@@ -12642,7 +12635,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "table"
|
name = "table"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -12911,7 +12904,7 @@ checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tests-fuzz"
|
name = "tests-fuzz"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arbitrary",
|
"arbitrary",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -12955,7 +12948,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tests-integration"
|
name = "tests-integration"
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arrow-flight",
|
"arrow-flight",
|
||||||
@@ -13027,7 +13020,7 @@ dependencies = [
|
|||||||
"sql",
|
"sql",
|
||||||
"sqlx",
|
"sqlx",
|
||||||
"store-api",
|
"store-api",
|
||||||
"substrait 0.18.0",
|
"substrait 0.17.2",
|
||||||
"table",
|
"table",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
"time",
|
"time",
|
||||||
|
|||||||
@@ -73,8 +73,8 @@ members = [
|
|||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "0.18.0"
|
version = "0.17.2"
|
||||||
edition = "2024"
|
edition = "2021"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
|
|
||||||
[workspace.lints]
|
[workspace.lints]
|
||||||
@@ -145,7 +145,7 @@ etcd-client = { git = "https://github.com/GreptimeTeam/etcd-client", rev = "f62d
|
|||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "f9836cf8aab30e672f640c6ef4c1cfd2cf9fbc36" }
|
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "66eb089afa6baaa3ddfafabd0a4abbe317d012c3" }
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
http = "1"
|
http = "1"
|
||||||
humantime = "2.1"
|
humantime = "2.1"
|
||||||
|
|||||||
@@ -103,7 +103,6 @@
|
|||||||
| `storage` | -- | -- | The data storage options. |
|
| `storage` | -- | -- | The data storage options. |
|
||||||
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
||||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||||
| `storage.enable_read_cache` | Bool | `true` | Whether to enable read cache. If not set, the read cache will be enabled by default. |
|
|
||||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||||
@@ -495,7 +494,6 @@
|
|||||||
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
||||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||||
| `storage.enable_read_cache` | Bool | `true` | Whether to enable read cache. If not set, the read cache will be enabled by default. |
|
|
||||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||||
|
|||||||
@@ -274,9 +274,6 @@ type = "File"
|
|||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
#+ cache_path = ""
|
#+ cache_path = ""
|
||||||
|
|
||||||
## Whether to enable read cache. If not set, the read cache will be enabled by default.
|
|
||||||
enable_read_cache = true
|
|
||||||
|
|
||||||
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger.
|
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger.
|
||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
cache_capacity = "5GiB"
|
cache_capacity = "5GiB"
|
||||||
|
|||||||
@@ -361,9 +361,6 @@ data_home = "./greptimedb_data"
|
|||||||
## - `Oss`: the data is stored in the Aliyun OSS.
|
## - `Oss`: the data is stored in the Aliyun OSS.
|
||||||
type = "File"
|
type = "File"
|
||||||
|
|
||||||
## Whether to enable read cache. If not set, the read cache will be enabled by default.
|
|
||||||
enable_read_cache = true
|
|
||||||
|
|
||||||
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
||||||
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
|
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
|
||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
|
|||||||
@@ -19,8 +19,8 @@ use common_error::status_code::StatusCode;
|
|||||||
use common_macro::stack_trace_debug;
|
use common_macro::stack_trace_debug;
|
||||||
use common_time::timestamp::TimeUnit;
|
use common_time::timestamp::TimeUnit;
|
||||||
use datatypes::prelude::ConcreteDataType;
|
use datatypes::prelude::ConcreteDataType;
|
||||||
use snafu::Location;
|
|
||||||
use snafu::prelude::*;
|
use snafu::prelude::*;
|
||||||
|
use snafu::Location;
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
|
|
||||||
|
|||||||
@@ -16,15 +16,15 @@ use std::collections::HashSet;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common_base::BitVec;
|
use common_base::BitVec;
|
||||||
use common_decimal::Decimal128;
|
|
||||||
use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECISION};
|
use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECISION};
|
||||||
|
use common_decimal::Decimal128;
|
||||||
use common_time::time::Time;
|
use common_time::time::Time;
|
||||||
use common_time::timestamp::TimeUnit;
|
use common_time::timestamp::TimeUnit;
|
||||||
use common_time::{Date, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth, Timestamp};
|
use common_time::{Date, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth, Timestamp};
|
||||||
use datatypes::prelude::{ConcreteDataType, ValueRef};
|
use datatypes::prelude::{ConcreteDataType, ValueRef};
|
||||||
use datatypes::scalars::ScalarVector;
|
use datatypes::scalars::ScalarVector;
|
||||||
use datatypes::types::{
|
use datatypes::types::{
|
||||||
Int8Type, Int16Type, IntervalType, TimeType, TimestampType, UInt8Type, UInt16Type,
|
Int16Type, Int8Type, IntervalType, TimeType, TimestampType, UInt16Type, UInt8Type,
|
||||||
};
|
};
|
||||||
use datatypes::value::{OrderedF32, OrderedF64, Value};
|
use datatypes::value::{OrderedF32, OrderedF64, Value};
|
||||||
use datatypes::vectors::{
|
use datatypes::vectors::{
|
||||||
@@ -295,7 +295,7 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
|||||||
| ConcreteDataType::Struct(_)
|
| ConcreteDataType::Struct(_)
|
||||||
| ConcreteDataType::Dictionary(_)
|
| ConcreteDataType::Dictionary(_)
|
||||||
| ConcreteDataType::Duration(_) => {
|
| ConcreteDataType::Duration(_) => {
|
||||||
return error::IntoColumnDataTypeSnafu { from: datatype }.fail();
|
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let datatype_extension = match column_datatype {
|
let datatype_extension = match column_datatype {
|
||||||
|
|||||||
@@ -15,9 +15,9 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use datatypes::schema::{
|
use datatypes::schema::{
|
||||||
COMMENT_KEY, ColumnDefaultConstraint, ColumnSchema, FULLTEXT_KEY, FulltextAnalyzer,
|
ColumnDefaultConstraint, ColumnSchema, FulltextAnalyzer, FulltextBackend, FulltextOptions,
|
||||||
FulltextBackend, FulltextOptions, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY, SkippingIndexOptions,
|
SkippingIndexOptions, SkippingIndexType, COMMENT_KEY, FULLTEXT_KEY, INVERTED_INDEX_KEY,
|
||||||
SkippingIndexType,
|
SKIPPING_INDEX_KEY,
|
||||||
};
|
};
|
||||||
use greptime_proto::v1::{
|
use greptime_proto::v1::{
|
||||||
Analyzer, FulltextBackend as PbFulltextBackend, SkippingIndexType as PbSkippingIndexType,
|
Analyzer, FulltextBackend as PbFulltextBackend, SkippingIndexType as PbSkippingIndexType,
|
||||||
|
|||||||
@@ -17,13 +17,13 @@ use std::sync::Arc;
|
|||||||
use common_base::secrets::SecretString;
|
use common_base::secrets::SecretString;
|
||||||
use digest::Digest;
|
use digest::Digest;
|
||||||
use sha1::Sha1;
|
use sha1::Sha1;
|
||||||
use snafu::{OptionExt, ensure};
|
use snafu::{ensure, OptionExt};
|
||||||
|
|
||||||
use crate::error::{IllegalParamSnafu, InvalidConfigSnafu, Result, UserPasswordMismatchSnafu};
|
use crate::error::{IllegalParamSnafu, InvalidConfigSnafu, Result, UserPasswordMismatchSnafu};
|
||||||
use crate::user_info::DefaultUserInfo;
|
use crate::user_info::DefaultUserInfo;
|
||||||
use crate::user_provider::static_user_provider::{STATIC_USER_PROVIDER, StaticUserProvider};
|
use crate::user_provider::static_user_provider::{StaticUserProvider, STATIC_USER_PROVIDER};
|
||||||
use crate::user_provider::watch_file_user_provider::{
|
use crate::user_provider::watch_file_user_provider::{
|
||||||
WATCH_FILE_USER_PROVIDER, WatchFileUserProvider,
|
WatchFileUserProvider, WATCH_FILE_USER_PROVIDER,
|
||||||
};
|
};
|
||||||
use crate::{UserInfoRef, UserProviderRef};
|
use crate::{UserInfoRef, UserProviderRef};
|
||||||
|
|
||||||
|
|||||||
@@ -22,13 +22,13 @@ mod user_provider;
|
|||||||
pub mod tests;
|
pub mod tests;
|
||||||
|
|
||||||
pub use common::{
|
pub use common::{
|
||||||
HashedPassword, Identity, Password, auth_mysql, static_user_provider_from_option,
|
auth_mysql, static_user_provider_from_option, user_provider_from_option, userinfo_by_name,
|
||||||
user_provider_from_option, userinfo_by_name,
|
HashedPassword, Identity, Password,
|
||||||
};
|
};
|
||||||
pub use permission::{PermissionChecker, PermissionReq, PermissionResp};
|
pub use permission::{PermissionChecker, PermissionReq, PermissionResp};
|
||||||
pub use user_info::UserInfo;
|
pub use user_info::UserInfo;
|
||||||
pub use user_provider::UserProvider;
|
|
||||||
pub use user_provider::static_user_provider::StaticUserProvider;
|
pub use user_provider::static_user_provider::StaticUserProvider;
|
||||||
|
pub use user_provider::UserProvider;
|
||||||
|
|
||||||
/// pub type alias
|
/// pub type alias
|
||||||
pub type UserInfoRef = std::sync::Arc<dyn UserInfo>;
|
pub type UserInfoRef = std::sync::Arc<dyn UserInfo>;
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ use crate::error::{
|
|||||||
UserPasswordMismatchSnafu,
|
UserPasswordMismatchSnafu,
|
||||||
};
|
};
|
||||||
use crate::user_info::DefaultUserInfo;
|
use crate::user_info::DefaultUserInfo;
|
||||||
use crate::{Identity, Password, UserInfoRef, UserProvider, auth_mysql};
|
use crate::{auth_mysql, Identity, Password, UserInfoRef, UserProvider};
|
||||||
|
|
||||||
pub struct DatabaseAuthInfo<'a> {
|
pub struct DatabaseAuthInfo<'a> {
|
||||||
pub catalog: &'a str,
|
pub catalog: &'a str,
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ use std::io::BufRead;
|
|||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
use common_base::secrets::ExposeSecret;
|
use common_base::secrets::ExposeSecret;
|
||||||
use snafu::{OptionExt, ResultExt, ensure};
|
use snafu::{ensure, OptionExt, ResultExt};
|
||||||
|
|
||||||
use crate::common::{Identity, Password};
|
use crate::common::{Identity, Password};
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
@@ -30,7 +30,7 @@ use crate::error::{
|
|||||||
UserNotFoundSnafu, UserPasswordMismatchSnafu,
|
UserNotFoundSnafu, UserPasswordMismatchSnafu,
|
||||||
};
|
};
|
||||||
use crate::user_info::DefaultUserInfo;
|
use crate::user_info::DefaultUserInfo;
|
||||||
use crate::{UserInfoRef, auth_mysql};
|
use crate::{auth_mysql, UserInfoRef};
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
pub trait UserProvider: Send + Sync {
|
pub trait UserProvider: Send + Sync {
|
||||||
|
|||||||
@@ -102,10 +102,10 @@ pub mod test {
|
|||||||
|
|
||||||
use common_test_util::temp_dir::create_temp_dir;
|
use common_test_util::temp_dir::create_temp_dir;
|
||||||
|
|
||||||
use crate::UserProvider;
|
|
||||||
use crate::user_info::DefaultUserInfo;
|
use crate::user_info::DefaultUserInfo;
|
||||||
use crate::user_provider::static_user_provider::StaticUserProvider;
|
use crate::user_provider::static_user_provider::StaticUserProvider;
|
||||||
use crate::user_provider::{Identity, Password};
|
use crate::user_provider::{Identity, Password};
|
||||||
|
use crate::UserProvider;
|
||||||
|
|
||||||
async fn test_authenticate(provider: &dyn UserProvider, username: &str, password: &str) {
|
async fn test_authenticate(provider: &dyn UserProvider, username: &str, password: &str) {
|
||||||
let re = provider
|
let re = provider
|
||||||
@@ -143,13 +143,12 @@ pub mod test {
|
|||||||
let file = File::create(&file_path);
|
let file = File::create(&file_path);
|
||||||
let file = file.unwrap();
|
let file = file.unwrap();
|
||||||
let mut lw = LineWriter::new(file);
|
let mut lw = LineWriter::new(file);
|
||||||
assert!(
|
assert!(lw
|
||||||
lw.write_all(
|
.write_all(
|
||||||
b"root=123456
|
b"root=123456
|
||||||
admin=654321",
|
admin=654321",
|
||||||
)
|
)
|
||||||
.is_ok()
|
.is_ok());
|
||||||
);
|
|
||||||
lw.flush().unwrap();
|
lw.flush().unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ use std::sync::{Arc, Mutex};
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use common_telemetry::{info, warn};
|
use common_telemetry::{info, warn};
|
||||||
use notify::{EventKind, RecursiveMode, Watcher};
|
use notify::{EventKind, RecursiveMode, Watcher};
|
||||||
use snafu::{ResultExt, ensure};
|
use snafu::{ensure, ResultExt};
|
||||||
|
|
||||||
use crate::error::{FileWatchSnafu, InvalidConfigSnafu, Result};
|
use crate::error::{FileWatchSnafu, InvalidConfigSnafu, Result};
|
||||||
use crate::user_info::DefaultUserInfo;
|
use crate::user_info::DefaultUserInfo;
|
||||||
@@ -133,9 +133,9 @@ pub mod test {
|
|||||||
use common_test_util::temp_dir::create_temp_dir;
|
use common_test_util::temp_dir::create_temp_dir;
|
||||||
use tokio::time::sleep;
|
use tokio::time::sleep;
|
||||||
|
|
||||||
use crate::UserProvider;
|
|
||||||
use crate::user_provider::watch_file_user_provider::WatchFileUserProvider;
|
use crate::user_provider::watch_file_user_provider::WatchFileUserProvider;
|
||||||
use crate::user_provider::{Identity, Password};
|
use crate::user_provider::{Identity, Password};
|
||||||
|
use crate::UserProvider;
|
||||||
|
|
||||||
async fn test_authenticate(
|
async fn test_authenticate(
|
||||||
provider: &dyn UserProvider,
|
provider: &dyn UserProvider,
|
||||||
|
|||||||
6
src/cache/src/lib.rs
vendored
6
src/cache/src/lib.rs
vendored
@@ -19,9 +19,9 @@ use std::time::Duration;
|
|||||||
|
|
||||||
use catalog::kvbackend::new_table_cache;
|
use catalog::kvbackend::new_table_cache;
|
||||||
use common_meta::cache::{
|
use common_meta::cache::{
|
||||||
CacheRegistry, CacheRegistryBuilder, LayeredCacheRegistryBuilder, new_schema_cache,
|
new_schema_cache, new_table_flownode_set_cache, new_table_info_cache, new_table_name_cache,
|
||||||
new_table_flownode_set_cache, new_table_info_cache, new_table_name_cache,
|
new_table_route_cache, new_table_schema_cache, new_view_info_cache, CacheRegistry,
|
||||||
new_table_route_cache, new_table_schema_cache, new_view_info_cache,
|
CacheRegistryBuilder, LayeredCacheRegistryBuilder,
|
||||||
};
|
};
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
use moka::future::CacheBuilder;
|
use moka::future::CacheBuilder;
|
||||||
|
|||||||
@@ -32,7 +32,6 @@ common-runtime.workspace = true
|
|||||||
common-telemetry.workspace = true
|
common-telemetry.workspace = true
|
||||||
common-time.workspace = true
|
common-time.workspace = true
|
||||||
common-version.workspace = true
|
common-version.workspace = true
|
||||||
common-workload.workspace = true
|
|
||||||
dashmap.workspace = true
|
dashmap.workspace = true
|
||||||
datafusion.workspace = true
|
datafusion.workspace = true
|
||||||
datatypes.workspace = true
|
datatypes.workspace = true
|
||||||
@@ -49,7 +48,6 @@ prometheus.workspace = true
|
|||||||
promql-parser.workspace = true
|
promql-parser.workspace = true
|
||||||
rand.workspace = true
|
rand.workspace = true
|
||||||
rustc-hash.workspace = true
|
rustc-hash.workspace = true
|
||||||
serde.workspace = true
|
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
session.workspace = true
|
session.workspace = true
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
|
|||||||
@@ -297,20 +297,6 @@ pub enum Error {
|
|||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to handle query"))]
|
|
||||||
HandleQuery {
|
|
||||||
source: common_meta::error::Error,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to project schema"))]
|
|
||||||
ProjectSchema {
|
|
||||||
source: datatypes::error::Error,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Error {
|
impl Error {
|
||||||
@@ -383,8 +369,6 @@ impl ErrorExt for Error {
|
|||||||
Error::FrontendNotFound { .. } | Error::MetaClientMissing { .. } => {
|
Error::FrontendNotFound { .. } | Error::MetaClientMissing { .. } => {
|
||||||
StatusCode::Unexpected
|
StatusCode::Unexpected
|
||||||
}
|
}
|
||||||
Error::HandleQuery { source, .. } => source.status_code(),
|
|
||||||
Error::ProjectSchema { source, .. } => source.status_code(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -14,34 +14,25 @@
|
|||||||
|
|
||||||
use api::v1::meta::ProcedureStatus;
|
use api::v1::meta::ProcedureStatus;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::cluster::{ClusterInfo, NodeInfo, Role};
|
use common_meta::cluster::{ClusterInfo, NodeInfo};
|
||||||
use common_meta::datanode::RegionStat;
|
use common_meta::datanode::RegionStat;
|
||||||
use common_meta::key::flow::flow_state::FlowStat;
|
use common_meta::key::flow::flow_state::FlowStat;
|
||||||
use common_meta::node_manager::DatanodeManagerRef;
|
|
||||||
use common_meta::procedure_executor::{ExecutorContext, ProcedureExecutor};
|
use common_meta::procedure_executor::{ExecutorContext, ProcedureExecutor};
|
||||||
use common_meta::rpc::procedure;
|
use common_meta::rpc::procedure;
|
||||||
use common_procedure::{ProcedureInfo, ProcedureState};
|
use common_procedure::{ProcedureInfo, ProcedureState};
|
||||||
use common_query::request::QueryRequest;
|
|
||||||
use common_recordbatch::SendableRecordBatchStream;
|
|
||||||
use common_recordbatch::util::ChainedRecordBatchStream;
|
|
||||||
use meta_client::MetaClientRef;
|
use meta_client::MetaClientRef;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use store_api::storage::RegionId;
|
|
||||||
|
|
||||||
use crate::error;
|
use crate::error;
|
||||||
use crate::information_schema::{DatanodeInspectRequest, InformationExtension};
|
use crate::information_schema::InformationExtension;
|
||||||
|
|
||||||
pub struct DistributedInformationExtension {
|
pub struct DistributedInformationExtension {
|
||||||
meta_client: MetaClientRef,
|
meta_client: MetaClientRef,
|
||||||
datanode_manager: DatanodeManagerRef,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DistributedInformationExtension {
|
impl DistributedInformationExtension {
|
||||||
pub fn new(meta_client: MetaClientRef, datanode_manager: DatanodeManagerRef) -> Self {
|
pub fn new(meta_client: MetaClientRef) -> Self {
|
||||||
Self {
|
Self { meta_client }
|
||||||
meta_client,
|
|
||||||
datanode_manager,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -107,39 +98,4 @@ impl InformationExtension for DistributedInformationExtension {
|
|||||||
.map_err(BoxedError::new)
|
.map_err(BoxedError::new)
|
||||||
.context(crate::error::ListFlowStatsSnafu)
|
.context(crate::error::ListFlowStatsSnafu)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn inspect_datanode(
|
|
||||||
&self,
|
|
||||||
request: DatanodeInspectRequest,
|
|
||||||
) -> std::result::Result<SendableRecordBatchStream, Self::Error> {
|
|
||||||
// Aggregate results from all datanodes
|
|
||||||
let nodes = self
|
|
||||||
.meta_client
|
|
||||||
.list_nodes(Some(Role::Datanode))
|
|
||||||
.await
|
|
||||||
.map_err(BoxedError::new)
|
|
||||||
.context(crate::error::ListNodesSnafu)?;
|
|
||||||
|
|
||||||
let plan = request
|
|
||||||
.build_plan()
|
|
||||||
.context(crate::error::DatafusionSnafu)?;
|
|
||||||
|
|
||||||
let mut streams = Vec::with_capacity(nodes.len());
|
|
||||||
for node in nodes {
|
|
||||||
let client = self.datanode_manager.datanode(&node.peer).await;
|
|
||||||
let stream = client
|
|
||||||
.handle_query(QueryRequest {
|
|
||||||
plan: plan.clone(),
|
|
||||||
region_id: RegionId::default(),
|
|
||||||
header: None,
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.context(crate::error::HandleQuerySnafu)?;
|
|
||||||
streams.push(stream);
|
|
||||||
}
|
|
||||||
|
|
||||||
let chained =
|
|
||||||
ChainedRecordBatchStream::new(streams).context(crate::error::CreateRecordBatchSnafu)?;
|
|
||||||
Ok(Box::pin(chained))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,4 +21,4 @@ mod table_cache;
|
|||||||
|
|
||||||
pub use builder::KvBackendCatalogManagerBuilder;
|
pub use builder::KvBackendCatalogManagerBuilder;
|
||||||
pub use manager::KvBackendCatalogManager;
|
pub use manager::KvBackendCatalogManager;
|
||||||
pub use table_cache::{TableCache, TableCacheRef, new_table_cache};
|
pub use table_cache::{new_table_cache, TableCache, TableCacheRef};
|
||||||
|
|||||||
@@ -16,8 +16,8 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||||
use common_meta::cache::LayeredCacheRegistryRef;
|
use common_meta::cache::LayeredCacheRegistryRef;
|
||||||
use common_meta::key::TableMetadataManager;
|
|
||||||
use common_meta::key::flow::FlowMetadataManager;
|
use common_meta::key::flow::FlowMetadataManager;
|
||||||
|
use common_meta::key::TableMetadataManager;
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
use common_procedure::ProcedureManagerRef;
|
use common_procedure::ProcedureManagerRef;
|
||||||
use moka::sync::Cache;
|
use moka::sync::Cache;
|
||||||
@@ -26,8 +26,8 @@ use partition::manager::PartitionRuleManager;
|
|||||||
#[cfg(feature = "enterprise")]
|
#[cfg(feature = "enterprise")]
|
||||||
use crate::information_schema::InformationSchemaTableFactoryRef;
|
use crate::information_schema::InformationSchemaTableFactoryRef;
|
||||||
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
||||||
|
use crate::kvbackend::manager::{SystemCatalog, CATALOG_CACHE_MAX_CAPACITY};
|
||||||
use crate::kvbackend::KvBackendCatalogManager;
|
use crate::kvbackend::KvBackendCatalogManager;
|
||||||
use crate::kvbackend::manager::{CATALOG_CACHE_MAX_CAPACITY, SystemCatalog};
|
|
||||||
use crate::process_manager::ProcessManagerRef;
|
use crate::process_manager::ProcessManagerRef;
|
||||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||||
|
|
||||||
|
|||||||
@@ -24,12 +24,12 @@ use common_meta::error::Error::CacheNotGet;
|
|||||||
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, GetKvCacheSnafu, Result};
|
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, GetKvCacheSnafu, Result};
|
||||||
use common_meta::kv_backend::txn::{Txn, TxnResponse};
|
use common_meta::kv_backend::txn::{Txn, TxnResponse};
|
||||||
use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService};
|
use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService};
|
||||||
use common_meta::rpc::KeyValue;
|
|
||||||
use common_meta::rpc::store::{
|
use common_meta::rpc::store::{
|
||||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||||
DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
|
DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
|
||||||
};
|
};
|
||||||
|
use common_meta::rpc::KeyValue;
|
||||||
use common_telemetry::debug;
|
use common_telemetry::debug;
|
||||||
use meta_client::client::MetaClient;
|
use meta_client::client::MetaClient;
|
||||||
use moka::future::{Cache, CacheBuilder};
|
use moka::future::{Cache, CacheBuilder};
|
||||||
@@ -461,17 +461,17 @@ impl KvBackend for MetaKvBackend {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
use std::sync::Arc;
|
|
||||||
use std::sync::atomic::{AtomicU32, Ordering};
|
use std::sync::atomic::{AtomicU32, Ordering};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use common_meta::kv_backend::{KvBackend, TxnService};
|
use common_meta::kv_backend::{KvBackend, TxnService};
|
||||||
use common_meta::rpc::KeyValue;
|
|
||||||
use common_meta::rpc::store::{
|
use common_meta::rpc::store::{
|
||||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse,
|
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse,
|
||||||
BatchPutRequest, BatchPutResponse, DeleteRangeRequest, DeleteRangeResponse, PutRequest,
|
BatchPutRequest, BatchPutResponse, DeleteRangeRequest, DeleteRangeResponse, PutRequest,
|
||||||
PutResponse, RangeRequest, RangeResponse,
|
PutResponse, RangeRequest, RangeResponse,
|
||||||
};
|
};
|
||||||
|
use common_meta::rpc::KeyValue;
|
||||||
use dashmap::DashMap;
|
use dashmap::DashMap;
|
||||||
|
|
||||||
use super::CachedKvBackend;
|
use super::CachedKvBackend;
|
||||||
|
|||||||
@@ -26,12 +26,12 @@ use common_meta::cache::{
|
|||||||
LayeredCacheRegistryRef, TableInfoCacheRef, TableNameCacheRef, TableRoute, TableRouteCacheRef,
|
LayeredCacheRegistryRef, TableInfoCacheRef, TableNameCacheRef, TableRoute, TableRouteCacheRef,
|
||||||
ViewInfoCacheRef,
|
ViewInfoCacheRef,
|
||||||
};
|
};
|
||||||
use common_meta::key::TableMetadataManagerRef;
|
|
||||||
use common_meta::key::catalog_name::CatalogNameKey;
|
use common_meta::key::catalog_name::CatalogNameKey;
|
||||||
use common_meta::key::flow::FlowMetadataManager;
|
use common_meta::key::flow::FlowMetadataManager;
|
||||||
use common_meta::key::schema_name::SchemaNameKey;
|
use common_meta::key::schema_name::SchemaNameKey;
|
||||||
use common_meta::key::table_info::{TableInfoManager, TableInfoValue};
|
use common_meta::key::table_info::{TableInfoManager, TableInfoValue};
|
||||||
use common_meta::key::table_name::TableNameKey;
|
use common_meta::key::table_name::TableNameKey;
|
||||||
|
use common_meta::key::TableMetadataManagerRef;
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
use common_procedure::ProcedureManagerRef;
|
use common_procedure::ProcedureManagerRef;
|
||||||
use futures_util::stream::BoxStream;
|
use futures_util::stream::BoxStream;
|
||||||
@@ -41,16 +41,15 @@ use partition::manager::PartitionRuleManagerRef;
|
|||||||
use session::context::{Channel, QueryContext};
|
use session::context::{Channel, QueryContext};
|
||||||
use snafu::prelude::*;
|
use snafu::prelude::*;
|
||||||
use store_api::metric_engine_consts::METRIC_ENGINE_NAME;
|
use store_api::metric_engine_consts::METRIC_ENGINE_NAME;
|
||||||
use table::TableRef;
|
|
||||||
use table::dist_table::DistTable;
|
use table::dist_table::DistTable;
|
||||||
use table::metadata::{TableId, TableInfoRef};
|
use table::metadata::{TableId, TableInfoRef};
|
||||||
|
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||||
use table::table::PartitionRules;
|
use table::table::PartitionRules;
|
||||||
use table::table::numbers::{NUMBERS_TABLE_NAME, NumbersTable};
|
|
||||||
use table::table_name::TableName;
|
use table::table_name::TableName;
|
||||||
|
use table::TableRef;
|
||||||
use tokio::sync::Semaphore;
|
use tokio::sync::Semaphore;
|
||||||
use tokio_stream::wrappers::ReceiverStream;
|
use tokio_stream::wrappers::ReceiverStream;
|
||||||
|
|
||||||
use crate::CatalogManager;
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CacheNotFoundSnafu, GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu,
|
CacheNotFoundSnafu, GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu,
|
||||||
ListSchemasSnafu, ListTablesSnafu, Result, TableMetadataManagerSnafu,
|
ListSchemasSnafu, ListTablesSnafu, Result, TableMetadataManagerSnafu,
|
||||||
@@ -60,8 +59,9 @@ use crate::information_schema::InformationSchemaTableFactoryRef;
|
|||||||
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
||||||
use crate::kvbackend::TableCacheRef;
|
use crate::kvbackend::TableCacheRef;
|
||||||
use crate::process_manager::ProcessManagerRef;
|
use crate::process_manager::ProcessManagerRef;
|
||||||
use crate::system_schema::SystemSchemaProvider;
|
|
||||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||||
|
use crate::system_schema::SystemSchemaProvider;
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
/// Access all existing catalog, schema and tables.
|
/// Access all existing catalog, schema and tables.
|
||||||
///
|
///
|
||||||
|
|||||||
@@ -20,9 +20,9 @@ use common_meta::instruction::CacheIdent;
|
|||||||
use futures::future::BoxFuture;
|
use futures::future::BoxFuture;
|
||||||
use moka::future::Cache;
|
use moka::future::Cache;
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
use table::TableRef;
|
|
||||||
use table::dist_table::DistTable;
|
use table::dist_table::DistTable;
|
||||||
use table::table_name::TableName;
|
use table::table_name::TableName;
|
||||||
|
use table::TableRef;
|
||||||
|
|
||||||
pub type TableCacheRef = Arc<TableCache>;
|
pub type TableCacheRef = Arc<TableCache>;
|
||||||
|
|
||||||
|
|||||||
@@ -25,8 +25,8 @@ use common_catalog::consts::{INFORMATION_SCHEMA_NAME, PG_CATALOG_NAME};
|
|||||||
use futures::future::BoxFuture;
|
use futures::future::BoxFuture;
|
||||||
use futures_util::stream::BoxStream;
|
use futures_util::stream::BoxStream;
|
||||||
use session::context::QueryContext;
|
use session::context::QueryContext;
|
||||||
use table::TableRef;
|
|
||||||
use table::metadata::{TableId, TableInfoRef};
|
use table::metadata::{TableId, TableInfoRef};
|
||||||
|
use table::TableRef;
|
||||||
|
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
|
|
||||||
|
|||||||
@@ -14,4 +14,4 @@
|
|||||||
|
|
||||||
pub mod manager;
|
pub mod manager;
|
||||||
|
|
||||||
pub use manager::{MemoryCatalogManager, new_memory_catalog_manager};
|
pub use manager::{new_memory_catalog_manager, MemoryCatalogManager};
|
||||||
|
|||||||
@@ -28,8 +28,8 @@ use common_meta::kv_backend::memory::MemoryKvBackend;
|
|||||||
use futures_util::stream::BoxStream;
|
use futures_util::stream::BoxStream;
|
||||||
use session::context::QueryContext;
|
use session::context::QueryContext;
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
use table::TableRef;
|
|
||||||
use table::metadata::{TableId, TableInfoRef};
|
use table::metadata::{TableId, TableInfoRef};
|
||||||
|
use table::TableRef;
|
||||||
|
|
||||||
use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
|
use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
|
||||||
use crate::information_schema::InformationSchemaProvider;
|
use crate::information_schema::InformationSchemaProvider;
|
||||||
@@ -419,7 +419,7 @@ pub fn new_memory_catalog_manager() -> Result<Arc<MemoryCatalogManager>> {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use common_catalog::consts::*;
|
use common_catalog::consts::*;
|
||||||
use futures_util::TryStreamExt;
|
use futures_util::TryStreamExt;
|
||||||
use table::table::numbers::{NUMBERS_TABLE_NAME, NumbersTable};
|
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
@@ -454,18 +454,16 @@ mod tests {
|
|||||||
tables[0].table_info().table_id()
|
tables[0].table_info().table_id()
|
||||||
);
|
);
|
||||||
|
|
||||||
assert!(
|
assert!(catalog_list
|
||||||
catalog_list
|
.table(
|
||||||
.table(
|
DEFAULT_CATALOG_NAME,
|
||||||
DEFAULT_CATALOG_NAME,
|
DEFAULT_SCHEMA_NAME,
|
||||||
DEFAULT_SCHEMA_NAME,
|
"not_exists",
|
||||||
"not_exists",
|
None
|
||||||
None
|
)
|
||||||
)
|
.await
|
||||||
.await
|
.unwrap()
|
||||||
.unwrap()
|
.is_none());
|
||||||
.is_none()
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -488,13 +486,11 @@ mod tests {
|
|||||||
table: NumbersTable::table(2333),
|
table: NumbersTable::table(2333),
|
||||||
};
|
};
|
||||||
catalog.register_table_sync(register_table_req).unwrap();
|
catalog.register_table_sync(register_table_req).unwrap();
|
||||||
assert!(
|
assert!(catalog
|
||||||
catalog
|
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name, None)
|
||||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name, None)
|
.await
|
||||||
.await
|
.unwrap()
|
||||||
.unwrap()
|
.is_some());
|
||||||
.is_some()
|
|
||||||
);
|
|
||||||
|
|
||||||
let deregister_table_req = DeregisterTableRequest {
|
let deregister_table_req = DeregisterTableRequest {
|
||||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||||
@@ -502,12 +498,10 @@ mod tests {
|
|||||||
table_name: table_name.to_string(),
|
table_name: table_name.to_string(),
|
||||||
};
|
};
|
||||||
catalog.deregister_table_sync(deregister_table_req).unwrap();
|
catalog.deregister_table_sync(deregister_table_req).unwrap();
|
||||||
assert!(
|
assert!(catalog
|
||||||
catalog
|
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name, None)
|
||||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name, None)
|
.await
|
||||||
.await
|
.unwrap()
|
||||||
.unwrap()
|
.is_none());
|
||||||
.is_none()
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,8 +12,8 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::collections::hash_map::Entry;
|
use std::collections::hash_map::Entry;
|
||||||
|
use std::collections::HashMap;
|
||||||
use std::fmt::{Debug, Display, Formatter};
|
use std::fmt::{Debug, Display, Formatter};
|
||||||
use std::sync::atomic::{AtomicU32, Ordering};
|
use std::sync::atomic::{AtomicU32, Ordering};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
@@ -30,7 +30,7 @@ use common_time::util::current_time_millis;
|
|||||||
use meta_client::MetaClientRef;
|
use meta_client::MetaClientRef;
|
||||||
use promql_parser::parser::EvalStmt;
|
use promql_parser::parser::EvalStmt;
|
||||||
use rand::random;
|
use rand::random;
|
||||||
use snafu::{OptionExt, ResultExt, ensure};
|
use snafu::{ensure, OptionExt, ResultExt};
|
||||||
use sql::statements::statement::Statement;
|
use sql::statements::statement::Statement;
|
||||||
|
|
||||||
use crate::error;
|
use crate::error;
|
||||||
|
|||||||
@@ -36,26 +36,22 @@ use common_catalog::consts::{self, DEFAULT_CATALOG_NAME, INFORMATION_SCHEMA_NAME
|
|||||||
use common_error::ext::ErrorExt;
|
use common_error::ext::ErrorExt;
|
||||||
use common_meta::cluster::NodeInfo;
|
use common_meta::cluster::NodeInfo;
|
||||||
use common_meta::datanode::RegionStat;
|
use common_meta::datanode::RegionStat;
|
||||||
use common_meta::key::flow::FlowMetadataManager;
|
|
||||||
use common_meta::key::flow::flow_state::FlowStat;
|
use common_meta::key::flow::flow_state::FlowStat;
|
||||||
|
use common_meta::key::flow::FlowMetadataManager;
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
use common_procedure::ProcedureInfo;
|
use common_procedure::ProcedureInfo;
|
||||||
use common_recordbatch::SendableRecordBatchStream;
|
use common_recordbatch::SendableRecordBatchStream;
|
||||||
use datafusion::error::DataFusionError;
|
|
||||||
use datafusion::logical_expr::LogicalPlan;
|
|
||||||
use datatypes::schema::SchemaRef;
|
use datatypes::schema::SchemaRef;
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use paste::paste;
|
use paste::paste;
|
||||||
use process_list::InformationSchemaProcessList;
|
use process_list::InformationSchemaProcessList;
|
||||||
use store_api::sst_entry::{ManifestSstEntry, StorageSstEntry};
|
|
||||||
use store_api::storage::{ScanRequest, TableId};
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
use table::TableRef;
|
|
||||||
use table::metadata::TableType;
|
use table::metadata::TableType;
|
||||||
|
use table::TableRef;
|
||||||
pub use table_names::*;
|
pub use table_names::*;
|
||||||
use views::InformationSchemaViews;
|
use views::InformationSchemaViews;
|
||||||
|
|
||||||
use self::columns::InformationSchemaColumns;
|
use self::columns::InformationSchemaColumns;
|
||||||
use crate::CatalogManager;
|
|
||||||
use crate::error::{Error, Result};
|
use crate::error::{Error, Result};
|
||||||
use crate::process_manager::ProcessManagerRef;
|
use crate::process_manager::ProcessManagerRef;
|
||||||
use crate::system_schema::information_schema::cluster_info::InformationSchemaClusterInfo;
|
use crate::system_schema::information_schema::cluster_info::InformationSchemaClusterInfo;
|
||||||
@@ -73,6 +69,7 @@ pub(crate) use crate::system_schema::predicate::Predicates;
|
|||||||
use crate::system_schema::{
|
use crate::system_schema::{
|
||||||
SystemSchemaProvider, SystemSchemaProviderInner, SystemTable, SystemTableRef,
|
SystemSchemaProvider, SystemSchemaProviderInner, SystemTable, SystemTableRef,
|
||||||
};
|
};
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
// Memory tables in `information_schema`.
|
// Memory tables in `information_schema`.
|
||||||
@@ -412,43 +409,8 @@ pub trait InformationExtension {
|
|||||||
|
|
||||||
/// Get the flow statistics. If no flownode is available, return `None`.
|
/// Get the flow statistics. If no flownode is available, return `None`.
|
||||||
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error>;
|
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error>;
|
||||||
|
|
||||||
/// Inspects the datanode.
|
|
||||||
async fn inspect_datanode(
|
|
||||||
&self,
|
|
||||||
request: DatanodeInspectRequest,
|
|
||||||
) -> std::result::Result<SendableRecordBatchStream, Self::Error>;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The request to inspect the datanode.
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
|
||||||
pub struct DatanodeInspectRequest {
|
|
||||||
/// Kind to fetch from datanode.
|
|
||||||
pub kind: DatanodeInspectKind,
|
|
||||||
|
|
||||||
/// Pushdown scan configuration (projection/predicate/limit) for the returned stream.
|
|
||||||
/// This allows server-side filtering to reduce I/O and network costs.
|
|
||||||
pub scan: ScanRequest,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The kind of the datanode inspect request.
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
|
||||||
pub enum DatanodeInspectKind {
|
|
||||||
/// List SST entries recorded in manifest
|
|
||||||
SstManifest,
|
|
||||||
/// List SST entries discovered in storage layer
|
|
||||||
SstStorage,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DatanodeInspectRequest {
|
|
||||||
/// Builds a logical plan for the datanode inspect request.
|
|
||||||
pub fn build_plan(self) -> std::result::Result<LogicalPlan, DataFusionError> {
|
|
||||||
match self.kind {
|
|
||||||
DatanodeInspectKind::SstManifest => ManifestSstEntry::build_plan(self.scan),
|
|
||||||
DatanodeInspectKind::SstStorage => StorageSstEntry::build_plan(self.scan),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub struct NoopInformationExtension;
|
pub struct NoopInformationExtension;
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
@@ -470,11 +432,4 @@ impl InformationExtension for NoopInformationExtension {
|
|||||||
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error> {
|
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error> {
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn inspect_datanode(
|
|
||||||
&self,
|
|
||||||
_request: DatanodeInspectRequest,
|
|
||||||
) -> std::result::Result<SendableRecordBatchStream, Self::Error> {
|
|
||||||
Ok(common_recordbatch::RecordBatches::empty().as_stream())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,46 +18,37 @@ use std::time::Duration;
|
|||||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
use common_catalog::consts::INFORMATION_SCHEMA_CLUSTER_INFO_TABLE_ID;
|
use common_catalog::consts::INFORMATION_SCHEMA_CLUSTER_INFO_TABLE_ID;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::cluster::{DatanodeStatus, NodeInfo, NodeStatus};
|
use common_meta::cluster::NodeInfo;
|
||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
use common_time::timestamp::Timestamp;
|
use common_time::timestamp::Timestamp;
|
||||||
use common_workload::DatanodeWorkloadType;
|
|
||||||
use datafusion::execution::TaskContext;
|
use datafusion::execution::TaskContext;
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
|
||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
use datatypes::timestamp::TimestampMillisecond;
|
use datatypes::timestamp::TimestampMillisecond;
|
||||||
use datatypes::value::Value;
|
use datatypes::value::Value;
|
||||||
use datatypes::vectors::{
|
use datatypes::vectors::{
|
||||||
Int64VectorBuilder, StringVectorBuilder, TimestampMillisecondVectorBuilder,
|
Int64VectorBuilder, StringVectorBuilder, TimestampMillisecondVectorBuilder,
|
||||||
UInt32VectorBuilder, UInt64VectorBuilder,
|
|
||||||
};
|
};
|
||||||
use serde::Serialize;
|
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use store_api::storage::{ScanRequest, TableId};
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
|
|
||||||
use crate::CatalogManager;
|
|
||||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||||
use crate::system_schema::information_schema::{CLUSTER_INFO, InformationTable, Predicates};
|
use crate::system_schema::information_schema::{InformationTable, Predicates, CLUSTER_INFO};
|
||||||
use crate::system_schema::utils;
|
use crate::system_schema::utils;
|
||||||
|
use crate::CatalogManager;
|
||||||
const PEER_TYPE_FRONTEND: &str = "FRONTEND";
|
|
||||||
const PEER_TYPE_METASRV: &str = "METASRV";
|
|
||||||
|
|
||||||
const PEER_ID: &str = "peer_id";
|
const PEER_ID: &str = "peer_id";
|
||||||
const PEER_TYPE: &str = "peer_type";
|
const PEER_TYPE: &str = "peer_type";
|
||||||
const PEER_ADDR: &str = "peer_addr";
|
const PEER_ADDR: &str = "peer_addr";
|
||||||
const CPUS: &str = "cpus";
|
|
||||||
const MEMORY_BYTES: &str = "memory_bytes";
|
|
||||||
const VERSION: &str = "version";
|
const VERSION: &str = "version";
|
||||||
const GIT_COMMIT: &str = "git_commit";
|
const GIT_COMMIT: &str = "git_commit";
|
||||||
const START_TIME: &str = "start_time";
|
const START_TIME: &str = "start_time";
|
||||||
const UPTIME: &str = "uptime";
|
const UPTIME: &str = "uptime";
|
||||||
const ACTIVE_TIME: &str = "active_time";
|
const ACTIVE_TIME: &str = "active_time";
|
||||||
const NODE_STATUS: &str = "node_status";
|
|
||||||
|
|
||||||
const INIT_CAPACITY: usize = 42;
|
const INIT_CAPACITY: usize = 42;
|
||||||
|
|
||||||
@@ -66,14 +57,11 @@ const INIT_CAPACITY: usize = 42;
|
|||||||
/// - `peer_id`: the peer server id.
|
/// - `peer_id`: the peer server id.
|
||||||
/// - `peer_type`: the peer type, such as `datanode`, `frontend`, `metasrv` etc.
|
/// - `peer_type`: the peer type, such as `datanode`, `frontend`, `metasrv` etc.
|
||||||
/// - `peer_addr`: the peer gRPC address.
|
/// - `peer_addr`: the peer gRPC address.
|
||||||
/// - `cpus`: the number of CPUs of the peer.
|
|
||||||
/// - `memory_bytes`: the memory bytes of the peer.
|
|
||||||
/// - `version`: the build package version of the peer.
|
/// - `version`: the build package version of the peer.
|
||||||
/// - `git_commit`: the build git commit hash of the peer.
|
/// - `git_commit`: the build git commit hash of the peer.
|
||||||
/// - `start_time`: the starting time of the peer.
|
/// - `start_time`: the starting time of the peer.
|
||||||
/// - `uptime`: the uptime of the peer.
|
/// - `uptime`: the uptime of the peer.
|
||||||
/// - `active_time`: the time since the last activity of the peer.
|
/// - `active_time`: the time since the last activity of the peer.
|
||||||
/// - `node_status`: the status info of the peer.
|
|
||||||
///
|
///
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaClusterInfo {
|
pub(super) struct InformationSchemaClusterInfo {
|
||||||
@@ -94,8 +82,6 @@ impl InformationSchemaClusterInfo {
|
|||||||
ColumnSchema::new(PEER_ID, ConcreteDataType::int64_datatype(), false),
|
ColumnSchema::new(PEER_ID, ConcreteDataType::int64_datatype(), false),
|
||||||
ColumnSchema::new(PEER_TYPE, ConcreteDataType::string_datatype(), false),
|
ColumnSchema::new(PEER_TYPE, ConcreteDataType::string_datatype(), false),
|
||||||
ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
|
ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
|
||||||
ColumnSchema::new(CPUS, ConcreteDataType::uint32_datatype(), false),
|
|
||||||
ColumnSchema::new(MEMORY_BYTES, ConcreteDataType::uint64_datatype(), false),
|
|
||||||
ColumnSchema::new(VERSION, ConcreteDataType::string_datatype(), false),
|
ColumnSchema::new(VERSION, ConcreteDataType::string_datatype(), false),
|
||||||
ColumnSchema::new(GIT_COMMIT, ConcreteDataType::string_datatype(), false),
|
ColumnSchema::new(GIT_COMMIT, ConcreteDataType::string_datatype(), false),
|
||||||
ColumnSchema::new(
|
ColumnSchema::new(
|
||||||
@@ -105,7 +91,6 @@ impl InformationSchemaClusterInfo {
|
|||||||
),
|
),
|
||||||
ColumnSchema::new(UPTIME, ConcreteDataType::string_datatype(), true),
|
ColumnSchema::new(UPTIME, ConcreteDataType::string_datatype(), true),
|
||||||
ColumnSchema::new(ACTIVE_TIME, ConcreteDataType::string_datatype(), true),
|
ColumnSchema::new(ACTIVE_TIME, ConcreteDataType::string_datatype(), true),
|
||||||
ColumnSchema::new(NODE_STATUS, ConcreteDataType::string_datatype(), true),
|
|
||||||
]))
|
]))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -155,14 +140,11 @@ struct InformationSchemaClusterInfoBuilder {
|
|||||||
peer_ids: Int64VectorBuilder,
|
peer_ids: Int64VectorBuilder,
|
||||||
peer_types: StringVectorBuilder,
|
peer_types: StringVectorBuilder,
|
||||||
peer_addrs: StringVectorBuilder,
|
peer_addrs: StringVectorBuilder,
|
||||||
cpus: UInt32VectorBuilder,
|
|
||||||
memory_bytes: UInt64VectorBuilder,
|
|
||||||
versions: StringVectorBuilder,
|
versions: StringVectorBuilder,
|
||||||
git_commits: StringVectorBuilder,
|
git_commits: StringVectorBuilder,
|
||||||
start_times: TimestampMillisecondVectorBuilder,
|
start_times: TimestampMillisecondVectorBuilder,
|
||||||
uptimes: StringVectorBuilder,
|
uptimes: StringVectorBuilder,
|
||||||
active_times: StringVectorBuilder,
|
active_times: StringVectorBuilder,
|
||||||
node_status: StringVectorBuilder,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl InformationSchemaClusterInfoBuilder {
|
impl InformationSchemaClusterInfoBuilder {
|
||||||
@@ -173,14 +155,11 @@ impl InformationSchemaClusterInfoBuilder {
|
|||||||
peer_ids: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
peer_ids: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
peer_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
peer_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
peer_addrs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
peer_addrs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
cpus: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
|
||||||
memory_bytes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
|
||||||
versions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
versions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
git_commits: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
git_commits: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
start_times: TimestampMillisecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
start_times: TimestampMillisecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
uptimes: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
uptimes: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
active_times: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
active_times: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
node_status: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -197,10 +176,9 @@ impl InformationSchemaClusterInfoBuilder {
|
|||||||
|
|
||||||
fn add_node_info(&mut self, predicates: &Predicates, node_info: NodeInfo) {
|
fn add_node_info(&mut self, predicates: &Predicates, node_info: NodeInfo) {
|
||||||
let peer_type = node_info.status.role_name();
|
let peer_type = node_info.status.role_name();
|
||||||
let peer_id = peer_id(peer_type, node_info.peer.id);
|
|
||||||
|
|
||||||
let row = [
|
let row = [
|
||||||
(PEER_ID, &Value::from(peer_id)),
|
(PEER_ID, &Value::from(node_info.peer.id)),
|
||||||
(PEER_TYPE, &Value::from(peer_type)),
|
(PEER_TYPE, &Value::from(peer_type)),
|
||||||
(PEER_ADDR, &Value::from(node_info.peer.addr.as_str())),
|
(PEER_ADDR, &Value::from(node_info.peer.addr.as_str())),
|
||||||
(VERSION, &Value::from(node_info.version.as_str())),
|
(VERSION, &Value::from(node_info.version.as_str())),
|
||||||
@@ -211,7 +189,13 @@ impl InformationSchemaClusterInfoBuilder {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
self.peer_ids.push(Some(peer_id));
|
if peer_type == "FRONTEND" || peer_type == "METASRV" {
|
||||||
|
// Always set peer_id to be -1 for frontends and metasrvs
|
||||||
|
self.peer_ids.push(Some(-1));
|
||||||
|
} else {
|
||||||
|
self.peer_ids.push(Some(node_info.peer.id as i64));
|
||||||
|
}
|
||||||
|
|
||||||
self.peer_types.push(Some(peer_type));
|
self.peer_types.push(Some(peer_type));
|
||||||
self.peer_addrs.push(Some(&node_info.peer.addr));
|
self.peer_addrs.push(Some(&node_info.peer.addr));
|
||||||
self.versions.push(Some(&node_info.version));
|
self.versions.push(Some(&node_info.version));
|
||||||
@@ -228,8 +212,6 @@ impl InformationSchemaClusterInfoBuilder {
|
|||||||
self.start_times.push(None);
|
self.start_times.push(None);
|
||||||
self.uptimes.push(None);
|
self.uptimes.push(None);
|
||||||
}
|
}
|
||||||
self.cpus.push(Some(node_info.cpus));
|
|
||||||
self.memory_bytes.push(Some(node_info.memory_bytes));
|
|
||||||
|
|
||||||
if node_info.last_activity_ts > 0 {
|
if node_info.last_activity_ts > 0 {
|
||||||
self.active_times.push(Some(
|
self.active_times.push(Some(
|
||||||
@@ -238,8 +220,6 @@ impl InformationSchemaClusterInfoBuilder {
|
|||||||
} else {
|
} else {
|
||||||
self.active_times.push(None);
|
self.active_times.push(None);
|
||||||
}
|
}
|
||||||
self.node_status
|
|
||||||
.push(format_node_status(&node_info).as_deref());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn format_duration_since(ts: u64) -> String {
|
fn format_duration_since(ts: u64) -> String {
|
||||||
@@ -253,14 +233,11 @@ impl InformationSchemaClusterInfoBuilder {
|
|||||||
Arc::new(self.peer_ids.finish()),
|
Arc::new(self.peer_ids.finish()),
|
||||||
Arc::new(self.peer_types.finish()),
|
Arc::new(self.peer_types.finish()),
|
||||||
Arc::new(self.peer_addrs.finish()),
|
Arc::new(self.peer_addrs.finish()),
|
||||||
Arc::new(self.cpus.finish()),
|
|
||||||
Arc::new(self.memory_bytes.finish()),
|
|
||||||
Arc::new(self.versions.finish()),
|
Arc::new(self.versions.finish()),
|
||||||
Arc::new(self.git_commits.finish()),
|
Arc::new(self.git_commits.finish()),
|
||||||
Arc::new(self.start_times.finish()),
|
Arc::new(self.start_times.finish()),
|
||||||
Arc::new(self.uptimes.finish()),
|
Arc::new(self.uptimes.finish()),
|
||||||
Arc::new(self.active_times.finish()),
|
Arc::new(self.active_times.finish()),
|
||||||
Arc::new(self.node_status.finish()),
|
|
||||||
];
|
];
|
||||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||||
}
|
}
|
||||||
@@ -286,56 +263,3 @@ impl DfPartitionStream for InformationSchemaClusterInfo {
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn peer_id(peer_type: &str, peer_id: u64) -> i64 {
|
|
||||||
if peer_type == PEER_TYPE_FRONTEND || peer_type == PEER_TYPE_METASRV {
|
|
||||||
-1
|
|
||||||
} else {
|
|
||||||
peer_id as i64
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize)]
|
|
||||||
struct DisplayMetasrvStatus {
|
|
||||||
is_leader: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize)]
|
|
||||||
struct DisplayDatanodeStatus {
|
|
||||||
workloads: Vec<DatanodeWorkloadType>,
|
|
||||||
leader_regions: usize,
|
|
||||||
follower_regions: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<&DatanodeStatus> for DisplayDatanodeStatus {
|
|
||||||
fn from(status: &DatanodeStatus) -> Self {
|
|
||||||
Self {
|
|
||||||
workloads: status
|
|
||||||
.workloads
|
|
||||||
.types
|
|
||||||
.iter()
|
|
||||||
.flat_map(|w| DatanodeWorkloadType::from_i32(*w))
|
|
||||||
.collect(),
|
|
||||||
leader_regions: status.leader_regions,
|
|
||||||
follower_regions: status.follower_regions,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn format_node_status(node_info: &NodeInfo) -> Option<String> {
|
|
||||||
match &node_info.status {
|
|
||||||
NodeStatus::Datanode(datanode_status) => {
|
|
||||||
serde_json::to_string(&DisplayDatanodeStatus::from(datanode_status)).ok()
|
|
||||||
}
|
|
||||||
NodeStatus::Frontend(_) => None,
|
|
||||||
NodeStatus::Flownode(_) => None,
|
|
||||||
NodeStatus::Metasrv(metasrv_status) => {
|
|
||||||
if metasrv_status.is_leader {
|
|
||||||
serde_json::to_string(&DisplayMetasrvStatus { is_leader: true }).ok()
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
NodeStatus::Standalone => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -23,9 +23,9 @@ use common_error::ext::BoxedError;
|
|||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
use datafusion::execution::TaskContext;
|
use datafusion::execution::TaskContext;
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
|
||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
use datatypes::prelude::{ConcreteDataType, DataType, MutableVector};
|
use datatypes::prelude::{ConcreteDataType, DataType, MutableVector};
|
||||||
use datatypes::scalars::ScalarVectorBuilder;
|
use datatypes::scalars::ScalarVectorBuilder;
|
||||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
@@ -38,12 +38,12 @@ use snafu::{OptionExt, ResultExt};
|
|||||||
use sql::statements;
|
use sql::statements;
|
||||||
use store_api::storage::{ScanRequest, TableId};
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
|
|
||||||
use crate::CatalogManager;
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||||
};
|
};
|
||||||
use crate::information_schema::Predicates;
|
use crate::information_schema::Predicates;
|
||||||
use crate::system_schema::information_schema::{COLUMNS, InformationTable};
|
use crate::system_schema::information_schema::{InformationTable, COLUMNS};
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaColumns {
|
pub(super) struct InformationSchemaColumns {
|
||||||
|
|||||||
@@ -16,10 +16,10 @@ use std::sync::{Arc, Weak};
|
|||||||
|
|
||||||
use common_catalog::consts::INFORMATION_SCHEMA_FLOW_TABLE_ID;
|
use common_catalog::consts::INFORMATION_SCHEMA_FLOW_TABLE_ID;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::key::FlowId;
|
|
||||||
use common_meta::key::flow::FlowMetadataManager;
|
|
||||||
use common_meta::key::flow::flow_info::FlowInfoValue;
|
use common_meta::key::flow::flow_info::FlowInfoValue;
|
||||||
use common_meta::key::flow::flow_state::FlowStat;
|
use common_meta::key::flow::flow_state::FlowStat;
|
||||||
|
use common_meta::key::flow::FlowMetadataManager;
|
||||||
|
use common_meta::key::FlowId;
|
||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch, SendableRecordBatchStream};
|
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch, SendableRecordBatchStream};
|
||||||
use datafusion::execution::TaskContext;
|
use datafusion::execution::TaskContext;
|
||||||
@@ -38,14 +38,14 @@ use futures::TryStreamExt;
|
|||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use store_api::storage::{ScanRequest, TableId};
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
|
|
||||||
use crate::CatalogManager;
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CreateRecordBatchSnafu, FlowInfoNotFoundSnafu, InternalSnafu, JsonSnafu, ListFlowsSnafu,
|
CreateRecordBatchSnafu, FlowInfoNotFoundSnafu, InternalSnafu, JsonSnafu, ListFlowsSnafu,
|
||||||
Result, UpgradeWeakCatalogManagerRefSnafu,
|
Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||||
};
|
};
|
||||||
use crate::information_schema::{FLOWS, Predicates};
|
use crate::information_schema::{Predicates, FLOWS};
|
||||||
use crate::system_schema::information_schema::InformationTable;
|
use crate::system_schema::information_schema::InformationTable;
|
||||||
use crate::system_schema::utils;
|
use crate::system_schema::utils;
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
const INIT_CAPACITY: usize = 42;
|
const INIT_CAPACITY: usize = 42;
|
||||||
|
|
||||||
|
|||||||
@@ -89,9 +89,9 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
|
|||||||
vec![
|
vec![
|
||||||
Arc::new(StringVector::from(vec![build_info.branch.to_string()])),
|
Arc::new(StringVector::from(vec![build_info.branch.to_string()])),
|
||||||
Arc::new(StringVector::from(vec![build_info.commit.to_string()])),
|
Arc::new(StringVector::from(vec![build_info.commit.to_string()])),
|
||||||
Arc::new(StringVector::from(vec![
|
Arc::new(StringVector::from(vec![build_info
|
||||||
build_info.commit_short.to_string(),
|
.commit_short
|
||||||
])),
|
.to_string()])),
|
||||||
Arc::new(StringVector::from(vec![build_info.clean.to_string()])),
|
Arc::new(StringVector::from(vec![build_info.clean.to_string()])),
|
||||||
Arc::new(StringVector::from(vec![build_info.version.to_string()])),
|
Arc::new(StringVector::from(vec![build_info.version.to_string()])),
|
||||||
],
|
],
|
||||||
@@ -369,9 +369,17 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
|
|||||||
TRIGGERS => (
|
TRIGGERS => (
|
||||||
vec![
|
vec![
|
||||||
string_column("TRIGGER_NAME"),
|
string_column("TRIGGER_NAME"),
|
||||||
ColumnSchema::new("trigger_id", ConcreteDataType::uint64_datatype(), false),
|
ColumnSchema::new(
|
||||||
|
"trigger_id",
|
||||||
|
ConcreteDataType::uint64_datatype(),
|
||||||
|
false,
|
||||||
|
),
|
||||||
string_column("TRIGGER_DEFINITION"),
|
string_column("TRIGGER_DEFINITION"),
|
||||||
ColumnSchema::new("flownode_id", ConcreteDataType::uint64_datatype(), true),
|
ColumnSchema::new(
|
||||||
|
"flownode_id",
|
||||||
|
ConcreteDataType::uint64_datatype(),
|
||||||
|
true,
|
||||||
|
),
|
||||||
],
|
],
|
||||||
vec![],
|
vec![],
|
||||||
),
|
),
|
||||||
|
|||||||
@@ -20,9 +20,9 @@ use common_error::ext::BoxedError;
|
|||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
use datafusion::execution::TaskContext;
|
use datafusion::execution::TaskContext;
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
|
||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
use datatypes::prelude::{ConcreteDataType, MutableVector, ScalarVectorBuilder, VectorRef};
|
use datatypes::prelude::{ConcreteDataType, MutableVector, ScalarVectorBuilder, VectorRef};
|
||||||
use datatypes::schema::{ColumnSchema, FulltextBackend, Schema, SchemaRef};
|
use datatypes::schema::{ColumnSchema, FulltextBackend, Schema, SchemaRef};
|
||||||
use datatypes::value::Value;
|
use datatypes::value::Value;
|
||||||
@@ -31,11 +31,11 @@ use futures_util::TryStreamExt;
|
|||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use store_api::storage::{ScanRequest, TableId};
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
|
|
||||||
use crate::CatalogManager;
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||||
};
|
};
|
||||||
use crate::system_schema::information_schema::{InformationTable, KEY_COLUMN_USAGE, Predicates};
|
use crate::system_schema::information_schema::{InformationTable, Predicates, KEY_COLUMN_USAGE};
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
pub const CONSTRAINT_SCHEMA: &str = "constraint_schema";
|
pub const CONSTRAINT_SCHEMA: &str = "constraint_schema";
|
||||||
pub const CONSTRAINT_NAME: &str = "constraint_name";
|
pub const CONSTRAINT_NAME: &str = "constraint_name";
|
||||||
@@ -277,15 +277,15 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
|||||||
constraints.push(CONSTRAINT_NAME_INVERTED_INDEX);
|
constraints.push(CONSTRAINT_NAME_INVERTED_INDEX);
|
||||||
greptime_index_type.push(INDEX_TYPE_INVERTED_INDEX);
|
greptime_index_type.push(INDEX_TYPE_INVERTED_INDEX);
|
||||||
}
|
}
|
||||||
if let Ok(Some(options)) = column.fulltext_options()
|
if let Ok(Some(options)) = column.fulltext_options() {
|
||||||
&& options.enable
|
if options.enable {
|
||||||
{
|
constraints.push(CONSTRAINT_NAME_FULLTEXT_INDEX);
|
||||||
constraints.push(CONSTRAINT_NAME_FULLTEXT_INDEX);
|
let index_type = match options.backend {
|
||||||
let index_type = match options.backend {
|
FulltextBackend::Bloom => INDEX_TYPE_FULLTEXT_BLOOM,
|
||||||
FulltextBackend::Bloom => INDEX_TYPE_FULLTEXT_BLOOM,
|
FulltextBackend::Tantivy => INDEX_TYPE_FULLTEXT_TANTIVY,
|
||||||
FulltextBackend::Tantivy => INDEX_TYPE_FULLTEXT_TANTIVY,
|
};
|
||||||
};
|
greptime_index_type.push(index_type);
|
||||||
greptime_index_type.push(index_type);
|
}
|
||||||
}
|
}
|
||||||
if column.is_skipping_indexed() {
|
if column.is_skipping_indexed() {
|
||||||
constraints.push(CONSTRAINT_NAME_SKIPPING_INDEX);
|
constraints.push(CONSTRAINT_NAME_SKIPPING_INDEX);
|
||||||
|
|||||||
@@ -21,17 +21,16 @@ use common_error::ext::BoxedError;
|
|||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
use datafusion::execution::TaskContext;
|
use datafusion::execution::TaskContext;
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
|
||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
use datatypes::timestamp::TimestampMicrosecond;
|
use datatypes::timestamp::TimestampSecond;
|
||||||
use datatypes::value::Value;
|
use datatypes::value::Value;
|
||||||
use datatypes::vectors::{
|
use datatypes::vectors::{
|
||||||
ConstantVector, Int64Vector, Int64VectorBuilder, MutableVector, StringVector,
|
ConstantVector, Int64Vector, Int64VectorBuilder, MutableVector, StringVector,
|
||||||
StringVectorBuilder, TimestampMicrosecondVector, TimestampMicrosecondVectorBuilder,
|
StringVectorBuilder, TimestampSecondVector, TimestampSecondVectorBuilder, UInt64VectorBuilder,
|
||||||
UInt64VectorBuilder,
|
|
||||||
};
|
};
|
||||||
use futures::{StreamExt, TryStreamExt};
|
use futures::{StreamExt, TryStreamExt};
|
||||||
use partition::manager::PartitionInfo;
|
use partition::manager::PartitionInfo;
|
||||||
@@ -39,13 +38,13 @@ use snafu::{OptionExt, ResultExt};
|
|||||||
use store_api::storage::{ScanRequest, TableId};
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
use table::metadata::{TableInfo, TableType};
|
use table::metadata::{TableInfo, TableType};
|
||||||
|
|
||||||
use crate::CatalogManager;
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CreateRecordBatchSnafu, FindPartitionsSnafu, InternalSnafu, PartitionManagerNotFoundSnafu,
|
CreateRecordBatchSnafu, FindPartitionsSnafu, InternalSnafu, PartitionManagerNotFoundSnafu,
|
||||||
Result, UpgradeWeakCatalogManagerRefSnafu,
|
Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||||
};
|
};
|
||||||
use crate::kvbackend::KvBackendCatalogManager;
|
use crate::kvbackend::KvBackendCatalogManager;
|
||||||
use crate::system_schema::information_schema::{InformationTable, PARTITIONS, Predicates};
|
use crate::system_schema::information_schema::{InformationTable, Predicates, PARTITIONS};
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
const TABLE_CATALOG: &str = "table_catalog";
|
const TABLE_CATALOG: &str = "table_catalog";
|
||||||
const TABLE_SCHEMA: &str = "table_schema";
|
const TABLE_SCHEMA: &str = "table_schema";
|
||||||
@@ -129,17 +128,17 @@ impl InformationSchemaPartitions {
|
|||||||
ColumnSchema::new("data_free", ConcreteDataType::int64_datatype(), true),
|
ColumnSchema::new("data_free", ConcreteDataType::int64_datatype(), true),
|
||||||
ColumnSchema::new(
|
ColumnSchema::new(
|
||||||
"create_time",
|
"create_time",
|
||||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
ConcreteDataType::timestamp_second_datatype(),
|
||||||
true,
|
true,
|
||||||
),
|
),
|
||||||
ColumnSchema::new(
|
ColumnSchema::new(
|
||||||
"update_time",
|
"update_time",
|
||||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
ConcreteDataType::timestamp_second_datatype(),
|
||||||
true,
|
true,
|
||||||
),
|
),
|
||||||
ColumnSchema::new(
|
ColumnSchema::new(
|
||||||
"check_time",
|
"check_time",
|
||||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
ConcreteDataType::timestamp_second_datatype(),
|
||||||
true,
|
true,
|
||||||
),
|
),
|
||||||
ColumnSchema::new("checksum", ConcreteDataType::int64_datatype(), true),
|
ColumnSchema::new("checksum", ConcreteDataType::int64_datatype(), true),
|
||||||
@@ -212,7 +211,7 @@ struct InformationSchemaPartitionsBuilder {
|
|||||||
partition_names: StringVectorBuilder,
|
partition_names: StringVectorBuilder,
|
||||||
partition_ordinal_positions: Int64VectorBuilder,
|
partition_ordinal_positions: Int64VectorBuilder,
|
||||||
partition_expressions: StringVectorBuilder,
|
partition_expressions: StringVectorBuilder,
|
||||||
create_times: TimestampMicrosecondVectorBuilder,
|
create_times: TimestampSecondVectorBuilder,
|
||||||
partition_ids: UInt64VectorBuilder,
|
partition_ids: UInt64VectorBuilder,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -232,7 +231,7 @@ impl InformationSchemaPartitionsBuilder {
|
|||||||
partition_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
partition_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
partition_ordinal_positions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
partition_ordinal_positions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
partition_expressions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
partition_expressions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
create_times: TimestampMicrosecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
create_times: TimestampSecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
partition_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
partition_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -331,8 +330,8 @@ impl InformationSchemaPartitionsBuilder {
|
|||||||
.push(Some((index + 1) as i64));
|
.push(Some((index + 1) as i64));
|
||||||
let expression = partition.partition_expr.as_ref().map(|e| e.to_string());
|
let expression = partition.partition_expr.as_ref().map(|e| e.to_string());
|
||||||
self.partition_expressions.push(expression.as_deref());
|
self.partition_expressions.push(expression.as_deref());
|
||||||
self.create_times.push(Some(TimestampMicrosecond::from(
|
self.create_times.push(Some(TimestampSecond::from(
|
||||||
table_info.meta.created_on.timestamp_millis(),
|
table_info.meta.created_on.timestamp(),
|
||||||
)));
|
)));
|
||||||
self.partition_ids.push(Some(partition.id.as_u64()));
|
self.partition_ids.push(Some(partition.id.as_u64()));
|
||||||
}
|
}
|
||||||
@@ -349,8 +348,8 @@ impl InformationSchemaPartitionsBuilder {
|
|||||||
Arc::new(Int64Vector::from(vec![None])),
|
Arc::new(Int64Vector::from(vec![None])),
|
||||||
rows_num,
|
rows_num,
|
||||||
));
|
));
|
||||||
let null_timestampmicrosecond_vector = Arc::new(ConstantVector::new(
|
let null_timestamp_second_vector = Arc::new(ConstantVector::new(
|
||||||
Arc::new(TimestampMicrosecondVector::from(vec![None])),
|
Arc::new(TimestampSecondVector::from(vec![None])),
|
||||||
rows_num,
|
rows_num,
|
||||||
));
|
));
|
||||||
let partition_methods = Arc::new(ConstantVector::new(
|
let partition_methods = Arc::new(ConstantVector::new(
|
||||||
@@ -380,8 +379,8 @@ impl InformationSchemaPartitionsBuilder {
|
|||||||
null_i64_vector.clone(),
|
null_i64_vector.clone(),
|
||||||
Arc::new(self.create_times.finish()),
|
Arc::new(self.create_times.finish()),
|
||||||
// TODO(dennis): supports update_time
|
// TODO(dennis): supports update_time
|
||||||
null_timestampmicrosecond_vector.clone(),
|
null_timestamp_second_vector.clone(),
|
||||||
null_timestampmicrosecond_vector,
|
null_timestamp_second_vector,
|
||||||
null_i64_vector,
|
null_i64_vector,
|
||||||
null_string_vector.clone(),
|
null_string_vector.clone(),
|
||||||
null_string_vector.clone(),
|
null_string_vector.clone(),
|
||||||
|
|||||||
@@ -22,9 +22,9 @@ use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
|||||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
use common_time::timestamp::Timestamp;
|
use common_time::timestamp::Timestamp;
|
||||||
use datafusion::execution::TaskContext;
|
use datafusion::execution::TaskContext;
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
|
||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
use datatypes::timestamp::TimestampMillisecond;
|
use datatypes::timestamp::TimestampMillisecond;
|
||||||
@@ -33,10 +33,10 @@ use datatypes::vectors::{StringVectorBuilder, TimestampMillisecondVectorBuilder}
|
|||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use store_api::storage::{ScanRequest, TableId};
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
|
|
||||||
use crate::CatalogManager;
|
|
||||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||||
use crate::system_schema::information_schema::{InformationTable, PROCEDURE_INFO, Predicates};
|
use crate::system_schema::information_schema::{InformationTable, Predicates, PROCEDURE_INFO};
|
||||||
use crate::system_schema::utils;
|
use crate::system_schema::utils;
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
const PROCEDURE_ID: &str = "procedure_id";
|
const PROCEDURE_ID: &str = "procedure_id";
|
||||||
const PROCEDURE_TYPE: &str = "procedure_type";
|
const PROCEDURE_TYPE: &str = "procedure_type";
|
||||||
|
|||||||
@@ -23,9 +23,9 @@ use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
|||||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
use datafusion::common::HashMap;
|
use datafusion::common::HashMap;
|
||||||
use datafusion::execution::TaskContext;
|
use datafusion::execution::TaskContext;
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
|
||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
use datatypes::value::Value;
|
use datatypes::value::Value;
|
||||||
@@ -35,13 +35,13 @@ use snafu::{OptionExt, ResultExt};
|
|||||||
use store_api::storage::{RegionId, ScanRequest, TableId};
|
use store_api::storage::{RegionId, ScanRequest, TableId};
|
||||||
use table::metadata::TableType;
|
use table::metadata::TableType;
|
||||||
|
|
||||||
use crate::CatalogManager;
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CreateRecordBatchSnafu, FindRegionRoutesSnafu, InternalSnafu, Result,
|
CreateRecordBatchSnafu, FindRegionRoutesSnafu, InternalSnafu, Result,
|
||||||
UpgradeWeakCatalogManagerRefSnafu,
|
UpgradeWeakCatalogManagerRefSnafu,
|
||||||
};
|
};
|
||||||
use crate::kvbackend::KvBackendCatalogManager;
|
use crate::kvbackend::KvBackendCatalogManager;
|
||||||
use crate::system_schema::information_schema::{InformationTable, Predicates, REGION_PEERS};
|
use crate::system_schema::information_schema::{InformationTable, Predicates, REGION_PEERS};
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||||
pub const TABLE_SCHEMA: &str = "table_schema";
|
pub const TABLE_SCHEMA: &str = "table_schema";
|
||||||
|
|||||||
@@ -30,17 +30,16 @@ use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, UInt64VectorB
|
|||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use store_api::storage::{ScanRequest, TableId};
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
|
|
||||||
use crate::CatalogManager;
|
|
||||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||||
use crate::information_schema::Predicates;
|
use crate::information_schema::Predicates;
|
||||||
use crate::system_schema::information_schema::{InformationTable, REGION_STATISTICS};
|
use crate::system_schema::information_schema::{InformationTable, REGION_STATISTICS};
|
||||||
use crate::system_schema::utils;
|
use crate::system_schema::utils;
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
const REGION_ID: &str = "region_id";
|
const REGION_ID: &str = "region_id";
|
||||||
const TABLE_ID: &str = "table_id";
|
const TABLE_ID: &str = "table_id";
|
||||||
const REGION_NUMBER: &str = "region_number";
|
const REGION_NUMBER: &str = "region_number";
|
||||||
const REGION_ROWS: &str = "region_rows";
|
const REGION_ROWS: &str = "region_rows";
|
||||||
const WRITTEN_BYTES: &str = "written_bytes_since_open";
|
|
||||||
const DISK_SIZE: &str = "disk_size";
|
const DISK_SIZE: &str = "disk_size";
|
||||||
const MEMTABLE_SIZE: &str = "memtable_size";
|
const MEMTABLE_SIZE: &str = "memtable_size";
|
||||||
const MANIFEST_SIZE: &str = "manifest_size";
|
const MANIFEST_SIZE: &str = "manifest_size";
|
||||||
@@ -58,7 +57,6 @@ const INIT_CAPACITY: usize = 42;
|
|||||||
/// - `table_id`: The table id.
|
/// - `table_id`: The table id.
|
||||||
/// - `region_number`: The region number.
|
/// - `region_number`: The region number.
|
||||||
/// - `region_rows`: The number of rows in region.
|
/// - `region_rows`: The number of rows in region.
|
||||||
/// - `written_bytes_since_open`: The total bytes written of the region since region opened.
|
|
||||||
/// - `memtable_size`: The memtable size in bytes.
|
/// - `memtable_size`: The memtable size in bytes.
|
||||||
/// - `disk_size`: The approximate disk size in bytes.
|
/// - `disk_size`: The approximate disk size in bytes.
|
||||||
/// - `manifest_size`: The manifest size in bytes.
|
/// - `manifest_size`: The manifest size in bytes.
|
||||||
@@ -86,7 +84,6 @@ impl InformationSchemaRegionStatistics {
|
|||||||
ColumnSchema::new(TABLE_ID, ConcreteDataType::uint32_datatype(), false),
|
ColumnSchema::new(TABLE_ID, ConcreteDataType::uint32_datatype(), false),
|
||||||
ColumnSchema::new(REGION_NUMBER, ConcreteDataType::uint32_datatype(), false),
|
ColumnSchema::new(REGION_NUMBER, ConcreteDataType::uint32_datatype(), false),
|
||||||
ColumnSchema::new(REGION_ROWS, ConcreteDataType::uint64_datatype(), true),
|
ColumnSchema::new(REGION_ROWS, ConcreteDataType::uint64_datatype(), true),
|
||||||
ColumnSchema::new(WRITTEN_BYTES, ConcreteDataType::uint64_datatype(), true),
|
|
||||||
ColumnSchema::new(DISK_SIZE, ConcreteDataType::uint64_datatype(), true),
|
ColumnSchema::new(DISK_SIZE, ConcreteDataType::uint64_datatype(), true),
|
||||||
ColumnSchema::new(MEMTABLE_SIZE, ConcreteDataType::uint64_datatype(), true),
|
ColumnSchema::new(MEMTABLE_SIZE, ConcreteDataType::uint64_datatype(), true),
|
||||||
ColumnSchema::new(MANIFEST_SIZE, ConcreteDataType::uint64_datatype(), true),
|
ColumnSchema::new(MANIFEST_SIZE, ConcreteDataType::uint64_datatype(), true),
|
||||||
@@ -150,7 +147,6 @@ struct InformationSchemaRegionStatisticsBuilder {
|
|||||||
table_ids: UInt32VectorBuilder,
|
table_ids: UInt32VectorBuilder,
|
||||||
region_numbers: UInt32VectorBuilder,
|
region_numbers: UInt32VectorBuilder,
|
||||||
region_rows: UInt64VectorBuilder,
|
region_rows: UInt64VectorBuilder,
|
||||||
written_bytes: UInt64VectorBuilder,
|
|
||||||
disk_sizes: UInt64VectorBuilder,
|
disk_sizes: UInt64VectorBuilder,
|
||||||
memtable_sizes: UInt64VectorBuilder,
|
memtable_sizes: UInt64VectorBuilder,
|
||||||
manifest_sizes: UInt64VectorBuilder,
|
manifest_sizes: UInt64VectorBuilder,
|
||||||
@@ -170,7 +166,6 @@ impl InformationSchemaRegionStatisticsBuilder {
|
|||||||
table_ids: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
table_ids: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
region_numbers: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
region_numbers: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
region_rows: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
region_rows: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
written_bytes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
|
||||||
disk_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
disk_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
memtable_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
memtable_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
manifest_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
manifest_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
@@ -202,7 +197,6 @@ impl InformationSchemaRegionStatisticsBuilder {
|
|||||||
(TABLE_ID, &Value::from(region_stat.id.table_id())),
|
(TABLE_ID, &Value::from(region_stat.id.table_id())),
|
||||||
(REGION_NUMBER, &Value::from(region_stat.id.region_number())),
|
(REGION_NUMBER, &Value::from(region_stat.id.region_number())),
|
||||||
(REGION_ROWS, &Value::from(region_stat.num_rows)),
|
(REGION_ROWS, &Value::from(region_stat.num_rows)),
|
||||||
(WRITTEN_BYTES, &Value::from(region_stat.written_bytes)),
|
|
||||||
(DISK_SIZE, &Value::from(region_stat.approximate_bytes)),
|
(DISK_SIZE, &Value::from(region_stat.approximate_bytes)),
|
||||||
(MEMTABLE_SIZE, &Value::from(region_stat.memtable_size)),
|
(MEMTABLE_SIZE, &Value::from(region_stat.memtable_size)),
|
||||||
(MANIFEST_SIZE, &Value::from(region_stat.manifest_size)),
|
(MANIFEST_SIZE, &Value::from(region_stat.manifest_size)),
|
||||||
@@ -222,7 +216,6 @@ impl InformationSchemaRegionStatisticsBuilder {
|
|||||||
self.region_numbers
|
self.region_numbers
|
||||||
.push(Some(region_stat.id.region_number()));
|
.push(Some(region_stat.id.region_number()));
|
||||||
self.region_rows.push(Some(region_stat.num_rows));
|
self.region_rows.push(Some(region_stat.num_rows));
|
||||||
self.written_bytes.push(Some(region_stat.written_bytes));
|
|
||||||
self.disk_sizes.push(Some(region_stat.approximate_bytes));
|
self.disk_sizes.push(Some(region_stat.approximate_bytes));
|
||||||
self.memtable_sizes.push(Some(region_stat.memtable_size));
|
self.memtable_sizes.push(Some(region_stat.memtable_size));
|
||||||
self.manifest_sizes.push(Some(region_stat.manifest_size));
|
self.manifest_sizes.push(Some(region_stat.manifest_size));
|
||||||
@@ -239,7 +232,6 @@ impl InformationSchemaRegionStatisticsBuilder {
|
|||||||
Arc::new(self.table_ids.finish()),
|
Arc::new(self.table_ids.finish()),
|
||||||
Arc::new(self.region_numbers.finish()),
|
Arc::new(self.region_numbers.finish()),
|
||||||
Arc::new(self.region_rows.finish()),
|
Arc::new(self.region_rows.finish()),
|
||||||
Arc::new(self.written_bytes.finish()),
|
|
||||||
Arc::new(self.disk_sizes.finish()),
|
Arc::new(self.disk_sizes.finish()),
|
||||||
Arc::new(self.memtable_sizes.finish()),
|
Arc::new(self.memtable_sizes.finish()),
|
||||||
Arc::new(self.manifest_sizes.finish()),
|
Arc::new(self.manifest_sizes.finish()),
|
||||||
|
|||||||
@@ -21,9 +21,9 @@ use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
|||||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
use common_time::util::current_time_millis;
|
use common_time::util::current_time_millis;
|
||||||
use datafusion::execution::TaskContext;
|
use datafusion::execution::TaskContext;
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
|
||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
use datatypes::prelude::{ConcreteDataType, MutableVector};
|
use datatypes::prelude::{ConcreteDataType, MutableVector};
|
||||||
use datatypes::scalars::ScalarVectorBuilder;
|
use datatypes::scalars::ScalarVectorBuilder;
|
||||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
|
|||||||
@@ -21,9 +21,9 @@ use common_meta::key::schema_name::SchemaNameKey;
|
|||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
use datafusion::execution::TaskContext;
|
use datafusion::execution::TaskContext;
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
|
||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
use datatypes::value::Value;
|
use datatypes::value::Value;
|
||||||
@@ -31,13 +31,13 @@ use datatypes::vectors::StringVectorBuilder;
|
|||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use store_api::storage::{ScanRequest, TableId};
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
|
|
||||||
use crate::CatalogManager;
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CreateRecordBatchSnafu, InternalSnafu, Result, TableMetadataManagerSnafu,
|
CreateRecordBatchSnafu, InternalSnafu, Result, TableMetadataManagerSnafu,
|
||||||
UpgradeWeakCatalogManagerRefSnafu,
|
UpgradeWeakCatalogManagerRefSnafu,
|
||||||
};
|
};
|
||||||
use crate::system_schema::information_schema::{InformationTable, Predicates, SCHEMATA};
|
use crate::system_schema::information_schema::{InformationTable, Predicates, SCHEMATA};
|
||||||
use crate::system_schema::utils;
|
use crate::system_schema::utils;
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
pub const CATALOG_NAME: &str = "catalog_name";
|
pub const CATALOG_NAME: &str = "catalog_name";
|
||||||
pub const SCHEMA_NAME: &str = "schema_name";
|
pub const SCHEMA_NAME: &str = "schema_name";
|
||||||
|
|||||||
@@ -20,9 +20,9 @@ use common_error::ext::BoxedError;
|
|||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
use datafusion::execution::TaskContext;
|
use datafusion::execution::TaskContext;
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
|
||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
use datatypes::prelude::{ConcreteDataType, MutableVector};
|
use datatypes::prelude::{ConcreteDataType, MutableVector};
|
||||||
use datatypes::scalars::ScalarVectorBuilder;
|
use datatypes::scalars::ScalarVectorBuilder;
|
||||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
@@ -32,15 +32,15 @@ use futures::TryStreamExt;
|
|||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use store_api::storage::{ScanRequest, TableId};
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
|
|
||||||
use crate::CatalogManager;
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||||
};
|
};
|
||||||
use crate::information_schema::Predicates;
|
|
||||||
use crate::information_schema::key_column_usage::{
|
use crate::information_schema::key_column_usage::{
|
||||||
CONSTRAINT_NAME_PRI, CONSTRAINT_NAME_TIME_INDEX,
|
CONSTRAINT_NAME_PRI, CONSTRAINT_NAME_TIME_INDEX,
|
||||||
};
|
};
|
||||||
|
use crate::information_schema::Predicates;
|
||||||
use crate::system_schema::information_schema::{InformationTable, TABLE_CONSTRAINTS};
|
use crate::system_schema::information_schema::{InformationTable, TABLE_CONSTRAINTS};
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
/// The `TABLE_CONSTRAINTS` table describes which tables have constraints.
|
/// The `TABLE_CONSTRAINTS` table describes which tables have constraints.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
|||||||
@@ -23,9 +23,9 @@ use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
|||||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
use common_telemetry::error;
|
use common_telemetry::error;
|
||||||
use datafusion::execution::TaskContext;
|
use datafusion::execution::TaskContext;
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
|
||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
use datatypes::value::Value;
|
use datatypes::value::Value;
|
||||||
@@ -37,12 +37,12 @@ use snafu::{OptionExt, ResultExt};
|
|||||||
use store_api::storage::{RegionId, ScanRequest, TableId};
|
use store_api::storage::{RegionId, ScanRequest, TableId};
|
||||||
use table::metadata::{TableInfo, TableType};
|
use table::metadata::{TableInfo, TableType};
|
||||||
|
|
||||||
use crate::CatalogManager;
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||||
};
|
};
|
||||||
use crate::system_schema::information_schema::{InformationTable, Predicates, TABLES};
|
use crate::system_schema::information_schema::{InformationTable, Predicates, TABLES};
|
||||||
use crate::system_schema::utils;
|
use crate::system_schema::utils;
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||||
pub const TABLE_SCHEMA: &str = "table_schema";
|
pub const TABLE_SCHEMA: &str = "table_schema";
|
||||||
|
|||||||
@@ -20,9 +20,9 @@ use common_error::ext::BoxedError;
|
|||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
use datafusion::execution::TaskContext;
|
use datafusion::execution::TaskContext;
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
|
||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
use datatypes::value::Value;
|
use datatypes::value::Value;
|
||||||
@@ -32,13 +32,13 @@ use snafu::{OptionExt, ResultExt};
|
|||||||
use store_api::storage::{ScanRequest, TableId};
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
use table::metadata::TableType;
|
use table::metadata::TableType;
|
||||||
|
|
||||||
use crate::CatalogManager;
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CastManagerSnafu, CreateRecordBatchSnafu, GetViewCacheSnafu, InternalSnafu, Result,
|
CastManagerSnafu, CreateRecordBatchSnafu, GetViewCacheSnafu, InternalSnafu, Result,
|
||||||
UpgradeWeakCatalogManagerRefSnafu, ViewInfoNotFoundSnafu,
|
UpgradeWeakCatalogManagerRefSnafu, ViewInfoNotFoundSnafu,
|
||||||
};
|
};
|
||||||
use crate::kvbackend::KvBackendCatalogManager;
|
use crate::kvbackend::KvBackendCatalogManager;
|
||||||
use crate::system_schema::information_schema::{InformationTable, Predicates, VIEWS};
|
use crate::system_schema::information_schema::{InformationTable, Predicates, VIEWS};
|
||||||
|
use crate::CatalogManager;
|
||||||
const INIT_CAPACITY: usize = 42;
|
const INIT_CAPACITY: usize = 42;
|
||||||
|
|
||||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||||
|
|||||||
@@ -21,9 +21,9 @@ use common_error::ext::BoxedError;
|
|||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
use datafusion::execution::TaskContext;
|
use datafusion::execution::TaskContext;
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
|
||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
use datatypes::schema::SchemaRef;
|
use datatypes::schema::SchemaRef;
|
||||||
use datatypes::vectors::VectorRef;
|
use datatypes::vectors::VectorRef;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
|||||||
@@ -34,10 +34,10 @@ use table::TableRef;
|
|||||||
pub use table_names::*;
|
pub use table_names::*;
|
||||||
|
|
||||||
use self::pg_namespace::oid_map::{PGNamespaceOidMap, PGNamespaceOidMapRef};
|
use self::pg_namespace::oid_map::{PGNamespaceOidMap, PGNamespaceOidMapRef};
|
||||||
use crate::CatalogManager;
|
|
||||||
use crate::system_schema::memory_table::MemoryTable;
|
use crate::system_schema::memory_table::MemoryTable;
|
||||||
use crate::system_schema::utils::tables::u32_column;
|
use crate::system_schema::utils::tables::u32_column;
|
||||||
use crate::system_schema::{SystemSchemaProvider, SystemSchemaProviderInner, SystemTableRef};
|
use crate::system_schema::{SystemSchemaProvider, SystemSchemaProviderInner, SystemTableRef};
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
static ref MEMORY_TABLES: &'static [&'static str] = &[table_names::PG_TYPE];
|
static ref MEMORY_TABLES: &'static [&'static str] = &[table_names::PG_TYPE];
|
||||||
|
|||||||
@@ -32,15 +32,15 @@ use snafu::{OptionExt, ResultExt};
|
|||||||
use store_api::storage::ScanRequest;
|
use store_api::storage::ScanRequest;
|
||||||
use table::metadata::TableType;
|
use table::metadata::TableType;
|
||||||
|
|
||||||
use crate::CatalogManager;
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||||
};
|
};
|
||||||
use crate::information_schema::Predicates;
|
use crate::information_schema::Predicates;
|
||||||
use crate::system_schema::SystemTable;
|
|
||||||
use crate::system_schema::pg_catalog::pg_namespace::oid_map::PGNamespaceOidMapRef;
|
use crate::system_schema::pg_catalog::pg_namespace::oid_map::PGNamespaceOidMapRef;
|
||||||
use crate::system_schema::pg_catalog::{OID_COLUMN_NAME, PG_CLASS, query_ctx};
|
use crate::system_schema::pg_catalog::{query_ctx, OID_COLUMN_NAME, PG_CLASS};
|
||||||
use crate::system_schema::utils::tables::{string_column, u32_column};
|
use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||||
|
use crate::system_schema::SystemTable;
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
// === column name ===
|
// === column name ===
|
||||||
pub const RELNAME: &str = "relname";
|
pub const RELNAME: &str = "relname";
|
||||||
|
|||||||
@@ -29,15 +29,15 @@ use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
|||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use store_api::storage::ScanRequest;
|
use store_api::storage::ScanRequest;
|
||||||
|
|
||||||
use crate::CatalogManager;
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||||
};
|
};
|
||||||
use crate::information_schema::Predicates;
|
use crate::information_schema::Predicates;
|
||||||
use crate::system_schema::SystemTable;
|
|
||||||
use crate::system_schema::pg_catalog::pg_namespace::oid_map::PGNamespaceOidMapRef;
|
use crate::system_schema::pg_catalog::pg_namespace::oid_map::PGNamespaceOidMapRef;
|
||||||
use crate::system_schema::pg_catalog::{OID_COLUMN_NAME, PG_DATABASE, query_ctx};
|
use crate::system_schema::pg_catalog::{query_ctx, OID_COLUMN_NAME, PG_DATABASE};
|
||||||
use crate::system_schema::utils::tables::{string_column, u32_column};
|
use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||||
|
use crate::system_schema::SystemTable;
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
// === column name ===
|
// === column name ===
|
||||||
pub const DATNAME: &str = "datname";
|
pub const DATNAME: &str = "datname";
|
||||||
|
|||||||
@@ -35,16 +35,16 @@ use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
|||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use store_api::storage::ScanRequest;
|
use store_api::storage::ScanRequest;
|
||||||
|
|
||||||
use crate::CatalogManager;
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||||
};
|
};
|
||||||
use crate::information_schema::Predicates;
|
use crate::information_schema::Predicates;
|
||||||
use crate::system_schema::SystemTable;
|
|
||||||
use crate::system_schema::pg_catalog::{
|
use crate::system_schema::pg_catalog::{
|
||||||
OID_COLUMN_NAME, PG_NAMESPACE, PGNamespaceOidMapRef, query_ctx,
|
query_ctx, PGNamespaceOidMapRef, OID_COLUMN_NAME, PG_NAMESPACE,
|
||||||
};
|
};
|
||||||
use crate::system_schema::utils::tables::{string_column, u32_column};
|
use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||||
|
use crate::system_schema::SystemTable;
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
const NSPNAME: &str = "nspname";
|
const NSPNAME: &str = "nspname";
|
||||||
const INIT_CAPACITY: usize = 42;
|
const INIT_CAPACITY: usize = 42;
|
||||||
|
|||||||
@@ -339,22 +339,18 @@ mod tests {
|
|||||||
assert!(!p.eval(&wrong_row).unwrap());
|
assert!(!p.eval(&wrong_row).unwrap());
|
||||||
assert!(p.eval(&[]).is_none());
|
assert!(p.eval(&[]).is_none());
|
||||||
assert!(p.eval(&[("c", &a_value)]).is_none());
|
assert!(p.eval(&[("c", &a_value)]).is_none());
|
||||||
assert!(
|
assert!(!p
|
||||||
!p.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &a_value)])
|
.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &a_value)])
|
||||||
.unwrap()
|
.unwrap());
|
||||||
);
|
assert!(!p
|
||||||
assert!(
|
.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &b_value)])
|
||||||
!p.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &b_value)])
|
.unwrap());
|
||||||
.unwrap()
|
assert!(p
|
||||||
);
|
.eval(&[(a_col.as_ref(), &a_value), ("c", &a_value)])
|
||||||
assert!(
|
.is_none());
|
||||||
p.eval(&[(a_col.as_ref(), &a_value), ("c", &a_value)])
|
assert!(!p
|
||||||
.is_none()
|
.eval(&[(a_col.as_ref(), &b_value), ("c", &a_value)])
|
||||||
);
|
.unwrap());
|
||||||
assert!(
|
|
||||||
!p.eval(&[(a_col.as_ref(), &b_value), ("c", &a_value)])
|
|
||||||
.unwrap()
|
|
||||||
);
|
|
||||||
|
|
||||||
//Predicate::Or
|
//Predicate::Or
|
||||||
let p = Predicate::Or(Box::new(p1), Box::new(p2));
|
let p = Predicate::Or(Box::new(p1), Box::new(p2));
|
||||||
@@ -362,22 +358,18 @@ mod tests {
|
|||||||
assert!(p.eval(&wrong_row).unwrap());
|
assert!(p.eval(&wrong_row).unwrap());
|
||||||
assert!(p.eval(&[]).is_none());
|
assert!(p.eval(&[]).is_none());
|
||||||
assert!(p.eval(&[("c", &a_value)]).is_none());
|
assert!(p.eval(&[("c", &a_value)]).is_none());
|
||||||
assert!(
|
assert!(!p
|
||||||
!p.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &a_value)])
|
.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &a_value)])
|
||||||
.unwrap()
|
.unwrap());
|
||||||
);
|
assert!(p
|
||||||
assert!(
|
.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &b_value)])
|
||||||
p.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &b_value)])
|
.unwrap());
|
||||||
.unwrap()
|
assert!(p
|
||||||
);
|
.eval(&[(a_col.as_ref(), &a_value), ("c", &a_value)])
|
||||||
assert!(
|
.unwrap());
|
||||||
p.eval(&[(a_col.as_ref(), &a_value), ("c", &a_value)])
|
assert!(p
|
||||||
.unwrap()
|
.eval(&[(a_col.as_ref(), &b_value), ("c", &a_value)])
|
||||||
);
|
.is_none());
|
||||||
assert!(
|
|
||||||
p.eval(&[(a_col.as_ref(), &b_value), ("c", &a_value)])
|
|
||||||
.is_none()
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -17,10 +17,10 @@ use std::sync::Weak;
|
|||||||
use common_meta::key::TableMetadataManagerRef;
|
use common_meta::key::TableMetadataManagerRef;
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
|
|
||||||
use crate::CatalogManager;
|
|
||||||
use crate::error::{GetInformationExtensionSnafu, Result, UpgradeWeakCatalogManagerRefSnafu};
|
use crate::error::{GetInformationExtensionSnafu, Result, UpgradeWeakCatalogManagerRefSnafu};
|
||||||
use crate::information_schema::InformationExtensionRef;
|
use crate::information_schema::InformationExtensionRef;
|
||||||
use crate::kvbackend::KvBackendCatalogManager;
|
use crate::kvbackend::KvBackendCatalogManager;
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
pub mod tables;
|
pub mod tables;
|
||||||
|
|
||||||
|
|||||||
@@ -17,27 +17,27 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use common_catalog::format_full_table_name;
|
use common_catalog::format_full_table_name;
|
||||||
use common_query::logical_plan::{SubstraitPlanDecoderRef, rename_logical_plan_columns};
|
use common_query::logical_plan::{rename_logical_plan_columns, SubstraitPlanDecoderRef};
|
||||||
use datafusion::common::{ResolvedTableReference, TableReference};
|
use datafusion::common::{ResolvedTableReference, TableReference};
|
||||||
use datafusion::datasource::view::ViewTable;
|
use datafusion::datasource::view::ViewTable;
|
||||||
use datafusion::datasource::{TableProvider, provider_as_source};
|
use datafusion::datasource::{provider_as_source, TableProvider};
|
||||||
use datafusion::logical_expr::TableSource;
|
use datafusion::logical_expr::TableSource;
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use session::context::QueryContextRef;
|
use session::context::QueryContextRef;
|
||||||
use snafu::{OptionExt, ResultExt, ensure};
|
use snafu::{ensure, OptionExt, ResultExt};
|
||||||
use table::metadata::TableType;
|
use table::metadata::TableType;
|
||||||
use table::table::adapter::DfTableProviderAdapter;
|
use table::table::adapter::DfTableProviderAdapter;
|
||||||
pub mod dummy_catalog;
|
pub mod dummy_catalog;
|
||||||
use dummy_catalog::DummyCatalogList;
|
use dummy_catalog::DummyCatalogList;
|
||||||
use table::TableRef;
|
use table::TableRef;
|
||||||
|
|
||||||
use crate::CatalogManagerRef;
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CastManagerSnafu, DecodePlanSnafu, GetViewCacheSnafu, ProjectViewColumnsSnafu,
|
CastManagerSnafu, DecodePlanSnafu, GetViewCacheSnafu, ProjectViewColumnsSnafu,
|
||||||
QueryAccessDeniedSnafu, Result, TableNotExistSnafu, ViewInfoNotFoundSnafu,
|
QueryAccessDeniedSnafu, Result, TableNotExistSnafu, ViewInfoNotFoundSnafu,
|
||||||
ViewPlanColumnsChangedSnafu,
|
ViewPlanColumnsChangedSnafu,
|
||||||
};
|
};
|
||||||
use crate::kvbackend::KvBackendCatalogManager;
|
use crate::kvbackend::KvBackendCatalogManager;
|
||||||
|
use crate::CatalogManagerRef;
|
||||||
|
|
||||||
pub struct DfTableSourceProvider {
|
pub struct DfTableSourceProvider {
|
||||||
catalog_manager: CatalogManagerRef,
|
catalog_manager: CatalogManagerRef,
|
||||||
@@ -272,7 +272,7 @@ mod tests {
|
|||||||
use common_query::logical_plan::SubstraitPlanDecoder;
|
use common_query::logical_plan::SubstraitPlanDecoder;
|
||||||
use datafusion::catalog::CatalogProviderList;
|
use datafusion::catalog::CatalogProviderList;
|
||||||
use datafusion::logical_expr::builder::LogicalTableSource;
|
use datafusion::logical_expr::builder::LogicalTableSource;
|
||||||
use datafusion::logical_expr::{LogicalPlan, LogicalPlanBuilder, col, lit};
|
use datafusion::logical_expr::{col, lit, LogicalPlan, LogicalPlanBuilder};
|
||||||
|
|
||||||
use crate::information_schema::NoopInformationExtension;
|
use crate::information_schema::NoopInformationExtension;
|
||||||
|
|
||||||
|
|||||||
@@ -25,8 +25,8 @@ use datafusion::datasource::TableProvider;
|
|||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
use table::table::adapter::DfTableProviderAdapter;
|
use table::table::adapter::DfTableProviderAdapter;
|
||||||
|
|
||||||
use crate::CatalogManagerRef;
|
|
||||||
use crate::error::TableNotExistSnafu;
|
use crate::error::TableNotExistSnafu;
|
||||||
|
use crate::CatalogManagerRef;
|
||||||
|
|
||||||
/// Delegate the resolving requests to the `[CatalogManager]` unconditionally.
|
/// Delegate the resolving requests to the `[CatalogManager]` unconditionally.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
|
|||||||
@@ -14,8 +14,8 @@
|
|||||||
|
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
|
||||||
use common_meta::key::TableMetadataManagerRef;
|
|
||||||
use common_meta::key::table_route::TableRouteValue;
|
use common_meta::key::table_route::TableRouteValue;
|
||||||
|
use common_meta::key::TableMetadataManagerRef;
|
||||||
use table::table_name::TableName;
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::bench::{
|
use crate::bench::{
|
||||||
|
|||||||
@@ -18,9 +18,9 @@ mod import;
|
|||||||
use clap::Subcommand;
|
use clap::Subcommand;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
|
|
||||||
use crate::Tool;
|
|
||||||
use crate::data::export::ExportCommand;
|
use crate::data::export::ExportCommand;
|
||||||
use crate::data::import::ImportCommand;
|
use crate::data::import::ImportCommand;
|
||||||
|
use crate::Tool;
|
||||||
|
|
||||||
/// Command for data operations including exporting data from and importing data into GreptimeDB.
|
/// Command for data operations including exporting data from and importing data into GreptimeDB.
|
||||||
#[derive(Subcommand)]
|
#[derive(Subcommand)]
|
||||||
|
|||||||
@@ -24,18 +24,18 @@ use common_error::ext::BoxedError;
|
|||||||
use common_telemetry::{debug, error, info};
|
use common_telemetry::{debug, error, info};
|
||||||
use object_store::layers::LoggingLayer;
|
use object_store::layers::LoggingLayer;
|
||||||
use object_store::services::Oss;
|
use object_store::services::Oss;
|
||||||
use object_store::{ObjectStore, services};
|
use object_store::{services, ObjectStore};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use tokio::sync::Semaphore;
|
use tokio::sync::Semaphore;
|
||||||
use tokio::time::Instant;
|
use tokio::time::Instant;
|
||||||
|
|
||||||
use crate::database::{DatabaseClient, parse_proxy_opts};
|
use crate::database::{parse_proxy_opts, DatabaseClient};
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
EmptyResultSnafu, Error, OpenDalSnafu, OutputDirNotSetSnafu, Result, S3ConfigNotSetSnafu,
|
EmptyResultSnafu, Error, OpenDalSnafu, OutputDirNotSetSnafu, Result, S3ConfigNotSetSnafu,
|
||||||
SchemaNotFoundSnafu,
|
SchemaNotFoundSnafu,
|
||||||
};
|
};
|
||||||
use crate::{Tool, database};
|
use crate::{database, Tool};
|
||||||
|
|
||||||
type TableReference = (String, String, String);
|
type TableReference = (String, String, String);
|
||||||
|
|
||||||
|
|||||||
@@ -25,9 +25,9 @@ use snafu::{OptionExt, ResultExt};
|
|||||||
use tokio::sync::Semaphore;
|
use tokio::sync::Semaphore;
|
||||||
use tokio::time::Instant;
|
use tokio::time::Instant;
|
||||||
|
|
||||||
use crate::database::{DatabaseClient, parse_proxy_opts};
|
use crate::database::{parse_proxy_opts, DatabaseClient};
|
||||||
use crate::error::{Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
|
use crate::error::{Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
|
||||||
use crate::{Tool, database};
|
use crate::{database, Tool};
|
||||||
|
|
||||||
#[derive(Debug, Default, Clone, ValueEnum)]
|
#[derive(Debug, Default, Clone, ValueEnum)]
|
||||||
enum ImportTarget {
|
enum ImportTarget {
|
||||||
|
|||||||
@@ -14,15 +14,15 @@
|
|||||||
|
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use base64::Engine;
|
|
||||||
use base64::engine::general_purpose;
|
use base64::engine::general_purpose;
|
||||||
|
use base64::Engine;
|
||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use humantime::format_duration;
|
use humantime::format_duration;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use servers::http::GreptimeQueryOutput;
|
|
||||||
use servers::http::header::constants::GREPTIME_DB_HEADER_TIMEOUT;
|
use servers::http::header::constants::GREPTIME_DB_HEADER_TIMEOUT;
|
||||||
use servers::http::result::greptime_result_v1::GreptimedbV1Response;
|
use servers::http::result::greptime_result_v1::GreptimedbV1Response;
|
||||||
|
use servers::http::GreptimeQueryOutput;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
|
|||||||
@@ -21,10 +21,10 @@ mod utils;
|
|||||||
use clap::Subcommand;
|
use clap::Subcommand;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
|
|
||||||
use crate::Tool;
|
|
||||||
use crate::metadata::control::{DelCommand, GetCommand};
|
use crate::metadata::control::{DelCommand, GetCommand};
|
||||||
use crate::metadata::repair::RepairLogicalTablesCommand;
|
use crate::metadata::repair::RepairLogicalTablesCommand;
|
||||||
use crate::metadata::snapshot::SnapshotCommand;
|
use crate::metadata::snapshot::SnapshotCommand;
|
||||||
|
use crate::Tool;
|
||||||
|
|
||||||
/// Command for managing metadata operations,
|
/// Command for managing metadata operations,
|
||||||
/// including saving and restoring metadata snapshots,
|
/// including saving and restoring metadata snapshots,
|
||||||
|
|||||||
@@ -16,9 +16,9 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
|
||||||
use common_meta::kv_backend::chroot::ChrootKvBackend;
|
use common_meta::kv_backend::chroot::ChrootKvBackend;
|
||||||
use common_meta::kv_backend::etcd::EtcdStore;
|
use common_meta::kv_backend::etcd::EtcdStore;
|
||||||
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
use meta_srv::bootstrap::create_etcd_client_with_tls;
|
use meta_srv::bootstrap::create_etcd_client_with_tls;
|
||||||
use meta_srv::metasrv::BackendImpl;
|
use meta_srv::metasrv::BackendImpl;
|
||||||
use servers::tls::{TlsMode, TlsOption};
|
use servers::tls::{TlsMode, TlsOption};
|
||||||
@@ -83,6 +83,20 @@ pub(crate) struct StoreConfig {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl StoreConfig {
|
impl StoreConfig {
|
||||||
|
pub fn tls_config(&self) -> Option<TlsOption> {
|
||||||
|
if self.backend_tls_mode != TlsMode::Disable {
|
||||||
|
Some(TlsOption {
|
||||||
|
mode: self.backend_tls_mode.clone(),
|
||||||
|
cert_path: self.backend_tls_cert_path.clone(),
|
||||||
|
key_path: self.backend_tls_key_path.clone(),
|
||||||
|
ca_cert_path: self.backend_tls_ca_cert_path.clone(),
|
||||||
|
watch: self.backend_tls_watch,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Builds a [`KvBackendRef`] from the store configuration.
|
/// Builds a [`KvBackendRef`] from the store configuration.
|
||||||
pub async fn build(&self) -> Result<KvBackendRef, BoxedError> {
|
pub async fn build(&self) -> Result<KvBackendRef, BoxedError> {
|
||||||
let max_txn_ops = self.max_txn_ops;
|
let max_txn_ops = self.max_txn_ops;
|
||||||
@@ -92,17 +106,7 @@ impl StoreConfig {
|
|||||||
} else {
|
} else {
|
||||||
let kvbackend = match self.backend {
|
let kvbackend = match self.backend {
|
||||||
BackendImpl::EtcdStore => {
|
BackendImpl::EtcdStore => {
|
||||||
let tls_config = if self.backend_tls_mode != TlsMode::Disable {
|
let tls_config = self.tls_config();
|
||||||
Some(TlsOption {
|
|
||||||
mode: self.backend_tls_mode.clone(),
|
|
||||||
cert_path: self.backend_tls_cert_path.clone(),
|
|
||||||
key_path: self.backend_tls_key_path.clone(),
|
|
||||||
ca_cert_path: self.backend_tls_ca_cert_path.clone(),
|
|
||||||
watch: self.backend_tls_watch,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
let etcd_client = create_etcd_client_with_tls(store_addrs, tls_config.as_ref())
|
let etcd_client = create_etcd_client_with_tls(store_addrs, tls_config.as_ref())
|
||||||
.await
|
.await
|
||||||
.map_err(BoxedError::new)?;
|
.map_err(BoxedError::new)?;
|
||||||
@@ -111,7 +115,8 @@ impl StoreConfig {
|
|||||||
#[cfg(feature = "pg_kvbackend")]
|
#[cfg(feature = "pg_kvbackend")]
|
||||||
BackendImpl::PostgresStore => {
|
BackendImpl::PostgresStore => {
|
||||||
let table_name = &self.meta_table_name;
|
let table_name = &self.meta_table_name;
|
||||||
let pool = meta_srv::bootstrap::create_postgres_pool(store_addrs, None)
|
let tls_config = self.tls_config();
|
||||||
|
let pool = meta_srv::bootstrap::create_postgres_pool(store_addrs, tls_config)
|
||||||
.await
|
.await
|
||||||
.map_err(BoxedError::new)?;
|
.map_err(BoxedError::new)?;
|
||||||
let schema_name = self.meta_schema_name.as_deref();
|
let schema_name = self.meta_schema_name.as_deref();
|
||||||
|
|||||||
@@ -18,9 +18,9 @@ mod table;
|
|||||||
use clap::Subcommand;
|
use clap::Subcommand;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
|
|
||||||
use crate::Tool;
|
|
||||||
use crate::metadata::control::del::key::DelKeyCommand;
|
use crate::metadata::control::del::key::DelKeyCommand;
|
||||||
use crate::metadata::control::del::table::DelTableCommand;
|
use crate::metadata::control::del::table::DelTableCommand;
|
||||||
|
use crate::Tool;
|
||||||
|
|
||||||
/// The prefix of the tombstone keys.
|
/// The prefix of the tombstone keys.
|
||||||
pub(crate) const CLI_TOMBSTONE_PREFIX: &str = "__cli_tombstone/";
|
pub(crate) const CLI_TOMBSTONE_PREFIX: &str = "__cli_tombstone/";
|
||||||
|
|||||||
@@ -19,9 +19,9 @@ use common_meta::key::tombstone::TombstoneManager;
|
|||||||
use common_meta::kv_backend::KvBackendRef;
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
use common_meta::rpc::store::RangeRequest;
|
use common_meta::rpc::store::RangeRequest;
|
||||||
|
|
||||||
use crate::Tool;
|
|
||||||
use crate::metadata::common::StoreConfig;
|
use crate::metadata::common::StoreConfig;
|
||||||
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
|
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
|
||||||
|
use crate::Tool;
|
||||||
|
|
||||||
/// Delete key-value pairs logically from the metadata store.
|
/// Delete key-value pairs logically from the metadata store.
|
||||||
#[derive(Debug, Default, Parser)]
|
#[derive(Debug, Default, Parser)]
|
||||||
@@ -102,8 +102,8 @@ mod tests {
|
|||||||
use common_meta::kv_backend::{KvBackend, KvBackendRef};
|
use common_meta::kv_backend::{KvBackend, KvBackendRef};
|
||||||
use common_meta::rpc::store::RangeRequest;
|
use common_meta::rpc::store::RangeRequest;
|
||||||
|
|
||||||
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
|
|
||||||
use crate::metadata::control::del::key::KeyDeleter;
|
use crate::metadata::control::del::key::KeyDeleter;
|
||||||
|
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
|
||||||
use crate::metadata::control::test_utils::put_key;
|
use crate::metadata::control::test_utils::put_key;
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
|
|||||||
@@ -18,16 +18,16 @@ use client::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
|||||||
use common_catalog::format_full_table_name;
|
use common_catalog::format_full_table_name;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::ddl::utils::get_region_wal_options;
|
use common_meta::ddl::utils::get_region_wal_options;
|
||||||
use common_meta::key::TableMetadataManager;
|
|
||||||
use common_meta::key::table_name::TableNameManager;
|
use common_meta::key::table_name::TableNameManager;
|
||||||
|
use common_meta::key::TableMetadataManager;
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
use store_api::storage::TableId;
|
use store_api::storage::TableId;
|
||||||
|
|
||||||
use crate::Tool;
|
|
||||||
use crate::error::{InvalidArgumentsSnafu, TableNotFoundSnafu};
|
use crate::error::{InvalidArgumentsSnafu, TableNotFoundSnafu};
|
||||||
use crate::metadata::common::StoreConfig;
|
use crate::metadata::common::StoreConfig;
|
||||||
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
|
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
|
||||||
use crate::metadata::control::utils::get_table_id_by_name;
|
use crate::metadata::control::utils::get_table_id_by_name;
|
||||||
|
use crate::Tool;
|
||||||
|
|
||||||
/// Delete table metadata logically from the metadata store.
|
/// Delete table metadata logically from the metadata store.
|
||||||
#[derive(Debug, Default, Parser)]
|
#[derive(Debug, Default, Parser)]
|
||||||
@@ -183,15 +183,15 @@ mod tests {
|
|||||||
|
|
||||||
use common_error::ext::ErrorExt;
|
use common_error::ext::ErrorExt;
|
||||||
use common_error::status_code::StatusCode;
|
use common_error::status_code::StatusCode;
|
||||||
use common_meta::key::TableMetadataManager;
|
|
||||||
use common_meta::key::table_route::TableRouteValue;
|
use common_meta::key::table_route::TableRouteValue;
|
||||||
|
use common_meta::key::TableMetadataManager;
|
||||||
use common_meta::kv_backend::chroot::ChrootKvBackend;
|
use common_meta::kv_backend::chroot::ChrootKvBackend;
|
||||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||||
use common_meta::kv_backend::{KvBackend, KvBackendRef};
|
use common_meta::kv_backend::{KvBackend, KvBackendRef};
|
||||||
use common_meta::rpc::store::RangeRequest;
|
use common_meta::rpc::store::RangeRequest;
|
||||||
|
|
||||||
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
|
|
||||||
use crate::metadata::control::del::table::TableMetadataDeleter;
|
use crate::metadata::control::del::table::TableMetadataDeleter;
|
||||||
|
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
|
||||||
use crate::metadata::control::test_utils::prepare_physical_table_metadata;
|
use crate::metadata::control::test_utils::prepare_physical_table_metadata;
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
|
|||||||
@@ -19,18 +19,18 @@ use clap::{Parser, Subcommand};
|
|||||||
use client::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use client::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use common_catalog::format_full_table_name;
|
use common_catalog::format_full_table_name;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::key::TableMetadataManager;
|
|
||||||
use common_meta::key::table_info::TableInfoKey;
|
use common_meta::key::table_info::TableInfoKey;
|
||||||
use common_meta::key::table_route::TableRouteKey;
|
use common_meta::key::table_route::TableRouteKey;
|
||||||
|
use common_meta::key::TableMetadataManager;
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
use common_meta::range_stream::{DEFAULT_PAGE_SIZE, PaginationStream};
|
use common_meta::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
|
||||||
use common_meta::rpc::store::RangeRequest;
|
use common_meta::rpc::store::RangeRequest;
|
||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
|
|
||||||
use crate::Tool;
|
|
||||||
use crate::error::InvalidArgumentsSnafu;
|
use crate::error::InvalidArgumentsSnafu;
|
||||||
use crate::metadata::common::StoreConfig;
|
use crate::metadata::common::StoreConfig;
|
||||||
use crate::metadata::control::utils::{decode_key_value, get_table_id_by_name, json_fromatter};
|
use crate::metadata::control::utils::{decode_key_value, get_table_id_by_name, json_fromatter};
|
||||||
|
use crate::Tool;
|
||||||
|
|
||||||
/// Getting metadata from metadata store.
|
/// Getting metadata from metadata store.
|
||||||
#[derive(Subcommand)]
|
#[derive(Subcommand)]
|
||||||
|
|||||||
@@ -31,18 +31,18 @@ use common_meta::key::TableMetadataManager;
|
|||||||
use common_meta::kv_backend::KvBackendRef;
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
use common_meta::node_manager::NodeManagerRef;
|
use common_meta::node_manager::NodeManagerRef;
|
||||||
use common_meta::peer::Peer;
|
use common_meta::peer::Peer;
|
||||||
use common_meta::rpc::router::{RegionRoute, find_leaders};
|
use common_meta::rpc::router::{find_leaders, RegionRoute};
|
||||||
use common_telemetry::{error, info, warn};
|
use common_telemetry::{error, info, warn};
|
||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
use snafu::{ResultExt, ensure};
|
use snafu::{ensure, ResultExt};
|
||||||
use store_api::storage::TableId;
|
use store_api::storage::TableId;
|
||||||
|
|
||||||
use crate::Tool;
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
InvalidArgumentsSnafu, Result, SendRequestToDatanodeSnafu, TableMetadataSnafu, UnexpectedSnafu,
|
InvalidArgumentsSnafu, Result, SendRequestToDatanodeSnafu, TableMetadataSnafu, UnexpectedSnafu,
|
||||||
};
|
};
|
||||||
use crate::metadata::common::StoreConfig;
|
use crate::metadata::common::StoreConfig;
|
||||||
use crate::metadata::utils::{FullTableMetadata, IteratorInput, TableMetadataIterator};
|
use crate::metadata::utils::{FullTableMetadata, IteratorInput, TableMetadataIterator};
|
||||||
|
use crate::Tool;
|
||||||
|
|
||||||
/// Repair metadata of logical tables.
|
/// Repair metadata of logical tables.
|
||||||
#[derive(Debug, Default, Parser)]
|
#[derive(Debug, Default, Parser)]
|
||||||
@@ -301,10 +301,7 @@ impl RepairTool {
|
|||||||
warn!(
|
warn!(
|
||||||
"Sending alter table requests to datanodes for table: {} failed for the datanodes: {:?}",
|
"Sending alter table requests to datanodes for table: {} failed for the datanodes: {:?}",
|
||||||
full_table_name,
|
full_table_name,
|
||||||
failed_peers
|
failed_peers.iter().map(|(peer, _)| peer.id).collect::<Vec<_>>()
|
||||||
.iter()
|
|
||||||
.map(|(peer, _)| peer.id)
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
);
|
);
|
||||||
|
|
||||||
let create_table_expr =
|
let create_table_expr =
|
||||||
@@ -323,7 +320,8 @@ impl RepairTool {
|
|||||||
}
|
}
|
||||||
info!(
|
info!(
|
||||||
"Region not found for table: {}, datanode: {}, trying to create the logical table on that datanode",
|
"Region not found for table: {}, datanode: {}, trying to create the logical table on that datanode",
|
||||||
full_table_name, peer.id
|
full_table_name,
|
||||||
|
peer.id
|
||||||
);
|
);
|
||||||
|
|
||||||
// If the alter table request fails for any datanode, we attempt to create the table on that datanode
|
// If the alter table request fails for any datanode, we attempt to create the table on that datanode
|
||||||
|
|||||||
@@ -13,11 +13,11 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use client::api::v1::alter_table_expr::Kind;
|
use client::api::v1::alter_table_expr::Kind;
|
||||||
use client::api::v1::region::{AlterRequests, RegionRequest, RegionRequestHeader, region_request};
|
use client::api::v1::region::{region_request, AlterRequests, RegionRequest, RegionRequestHeader};
|
||||||
use client::api::v1::{AddColumn, AddColumns, AlterTableExpr};
|
use client::api::v1::{AddColumn, AddColumns, AlterTableExpr};
|
||||||
use common_meta::ddl::alter_logical_tables::make_alter_region_request;
|
use common_meta::ddl::alter_logical_tables::make_alter_region_request;
|
||||||
use common_meta::peer::Peer;
|
use common_meta::peer::Peer;
|
||||||
use common_meta::rpc::router::{RegionRoute, find_leader_regions};
|
use common_meta::rpc::router::{find_leader_regions, RegionRoute};
|
||||||
use operator::expr_helper::column_schemas_to_defs;
|
use operator::expr_helper::column_schemas_to_defs;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use store_api::storage::{RegionId, TableId};
|
use store_api::storage::{RegionId, TableId};
|
||||||
|
|||||||
@@ -14,12 +14,12 @@
|
|||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use client::api::v1::region::{region_request, CreateRequests, RegionRequest, RegionRequestHeader};
|
||||||
use client::api::v1::CreateTableExpr;
|
use client::api::v1::CreateTableExpr;
|
||||||
use client::api::v1::region::{CreateRequests, RegionRequest, RegionRequestHeader, region_request};
|
|
||||||
use common_meta::ddl::create_logical_tables::create_region_request_builder;
|
use common_meta::ddl::create_logical_tables::create_region_request_builder;
|
||||||
use common_meta::ddl::utils::region_storage_path;
|
use common_meta::ddl::utils::region_storage_path;
|
||||||
use common_meta::peer::Peer;
|
use common_meta::peer::Peer;
|
||||||
use common_meta::rpc::router::{RegionRoute, find_leader_regions};
|
use common_meta::rpc::router::{find_leader_regions, RegionRoute};
|
||||||
use operator::expr_helper::column_schemas_to_defs;
|
use operator::expr_helper::column_schemas_to_defs;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use store_api::storage::{RegionId, TableId};
|
use store_api::storage::{RegionId, TableId};
|
||||||
|
|||||||
@@ -19,13 +19,13 @@ use clap::{Parser, Subcommand};
|
|||||||
use common_base::secrets::{ExposeSecret, SecretString};
|
use common_base::secrets::{ExposeSecret, SecretString};
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::snapshot::MetadataSnapshotManager;
|
use common_meta::snapshot::MetadataSnapshotManager;
|
||||||
use object_store::ObjectStore;
|
|
||||||
use object_store::services::{Fs, S3};
|
use object_store::services::{Fs, S3};
|
||||||
|
use object_store::ObjectStore;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
|
||||||
use crate::Tool;
|
|
||||||
use crate::error::{InvalidFilePathSnafu, OpenDalSnafu, S3ConfigNotSetSnafu};
|
use crate::error::{InvalidFilePathSnafu, OpenDalSnafu, S3ConfigNotSetSnafu};
|
||||||
use crate::metadata::common::StoreConfig;
|
use crate::metadata::common::StoreConfig;
|
||||||
|
use crate::Tool;
|
||||||
|
|
||||||
/// Subcommand for metadata snapshot operations, including saving snapshots, restoring from snapshots, and viewing snapshot information.
|
/// Subcommand for metadata snapshot operations, including saving snapshots, restoring from snapshots, and viewing snapshot information.
|
||||||
#[derive(Subcommand)]
|
#[derive(Subcommand)]
|
||||||
@@ -258,13 +258,11 @@ impl Tool for MetaRestoreTool {
|
|||||||
Ok(())
|
Ok(())
|
||||||
} else if !self.force {
|
} else if !self.force {
|
||||||
common_telemetry::warn!(
|
common_telemetry::warn!(
|
||||||
"The target source is not clean, if you want to restore the metadata snapshot forcefully, please use --force option."
|
"The target source is not clean, if you want to restore the metadata snapshot forcefully, please use --force option."
|
||||||
);
|
);
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
common_telemetry::info!(
|
common_telemetry::info!("The target source is not clean, We will restore the metadata snapshot with --force.");
|
||||||
"The target source is not clean, We will restore the metadata snapshot with --force."
|
|
||||||
);
|
|
||||||
self.inner
|
self.inner
|
||||||
.restore(&self.source_file)
|
.restore(&self.source_file)
|
||||||
.await
|
.await
|
||||||
|
|||||||
@@ -17,9 +17,9 @@ use std::collections::VecDeque;
|
|||||||
use async_stream::try_stream;
|
use async_stream::try_stream;
|
||||||
use common_catalog::consts::METRIC_ENGINE;
|
use common_catalog::consts::METRIC_ENGINE;
|
||||||
use common_catalog::format_full_table_name;
|
use common_catalog::format_full_table_name;
|
||||||
use common_meta::key::TableMetadataManager;
|
|
||||||
use common_meta::key::table_name::TableNameKey;
|
use common_meta::key::table_name::TableNameKey;
|
||||||
use common_meta::key::table_route::TableRouteValue;
|
use common_meta::key::table_route::TableRouteValue;
|
||||||
|
use common_meta::key::TableMetadataManager;
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
use futures::Stream;
|
use futures::Stream;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
|||||||
@@ -14,11 +14,11 @@
|
|||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use api::v1::HealthCheckRequest;
|
|
||||||
use api::v1::flow::flow_client::FlowClient as PbFlowClient;
|
use api::v1::flow::flow_client::FlowClient as PbFlowClient;
|
||||||
use api::v1::health_check_client::HealthCheckClient;
|
use api::v1::health_check_client::HealthCheckClient;
|
||||||
use api::v1::prometheus_gateway_client::PrometheusGatewayClient;
|
use api::v1::prometheus_gateway_client::PrometheusGatewayClient;
|
||||||
use api::v1::region::region_client::RegionClient as PbRegionClient;
|
use api::v1::region::region_client::RegionClient as PbRegionClient;
|
||||||
|
use api::v1::HealthCheckRequest;
|
||||||
use arrow_flight::flight_service_client::FlightServiceClient;
|
use arrow_flight::flight_service_client::FlightServiceClient;
|
||||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager, ClientTlsOption};
|
use common_grpc::channel_manager::{ChannelConfig, ChannelManager, ClientTlsOption};
|
||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
@@ -27,7 +27,7 @@ use tonic::codec::CompressionEncoding;
|
|||||||
use tonic::transport::Channel;
|
use tonic::transport::Channel;
|
||||||
|
|
||||||
use crate::load_balance::{LoadBalance, Loadbalancer};
|
use crate::load_balance::{LoadBalance, Loadbalancer};
|
||||||
use crate::{Result, error};
|
use crate::{error, Result};
|
||||||
|
|
||||||
pub struct FlightClient {
|
pub struct FlightClient {
|
||||||
addr: String,
|
addr: String,
|
||||||
|
|||||||
@@ -21,9 +21,9 @@ use common_meta::node_manager::{DatanodeManager, DatanodeRef, FlownodeManager, F
|
|||||||
use common_meta::peer::Peer;
|
use common_meta::peer::Peer;
|
||||||
use moka::future::{Cache, CacheBuilder};
|
use moka::future::{Cache, CacheBuilder};
|
||||||
|
|
||||||
use crate::Client;
|
|
||||||
use crate::flow::FlowRequester;
|
use crate::flow::FlowRequester;
|
||||||
use crate::region::RegionRequester;
|
use crate::region::RegionRequester;
|
||||||
|
use crate::Client;
|
||||||
|
|
||||||
pub struct NodeClients {
|
pub struct NodeClients {
|
||||||
channel_manager: ChannelManager,
|
channel_manager: ChannelManager,
|
||||||
|
|||||||
@@ -27,8 +27,8 @@ use api::v1::{
|
|||||||
};
|
};
|
||||||
use arrow_flight::{FlightData, Ticket};
|
use arrow_flight::{FlightData, Ticket};
|
||||||
use async_stream::stream;
|
use async_stream::stream;
|
||||||
use base64::Engine;
|
|
||||||
use base64::prelude::BASE64_STANDARD;
|
use base64::prelude::BASE64_STANDARD;
|
||||||
|
use base64::Engine;
|
||||||
use common_catalog::build_db_string;
|
use common_catalog::build_db_string;
|
||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
@@ -42,7 +42,7 @@ use common_telemetry::{error, warn};
|
|||||||
use futures::future;
|
use futures::future;
|
||||||
use futures_util::{Stream, StreamExt, TryStreamExt};
|
use futures_util::{Stream, StreamExt, TryStreamExt};
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use snafu::{OptionExt, ResultExt, ensure};
|
use snafu::{ensure, OptionExt, ResultExt};
|
||||||
use tonic::metadata::{AsciiMetadataKey, AsciiMetadataValue, MetadataMap, MetadataValue};
|
use tonic::metadata::{AsciiMetadataKey, AsciiMetadataValue, MetadataMap, MetadataValue};
|
||||||
use tonic::transport::Channel;
|
use tonic::transport::Channel;
|
||||||
|
|
||||||
@@ -50,7 +50,7 @@ use crate::error::{
|
|||||||
ConvertFlightDataSnafu, Error, FlightGetSnafu, IllegalFlightMessagesSnafu,
|
ConvertFlightDataSnafu, Error, FlightGetSnafu, IllegalFlightMessagesSnafu,
|
||||||
InvalidTonicMetadataValueSnafu,
|
InvalidTonicMetadataValueSnafu,
|
||||||
};
|
};
|
||||||
use crate::{Client, Result, error, from_grpc_response};
|
use crate::{error, from_grpc_response, Client, Result};
|
||||||
|
|
||||||
type FlightDataStream = Pin<Box<dyn Stream<Item = FlightData> + Send>>;
|
type FlightDataStream = Pin<Box<dyn Stream<Item = FlightData> + Send>>;
|
||||||
|
|
||||||
@@ -379,10 +379,11 @@ impl Database {
|
|||||||
tonic_code,
|
tonic_code,
|
||||||
e
|
e
|
||||||
);
|
);
|
||||||
Err(BoxedError::new(e)).with_context(|_| FlightGetSnafu {
|
let error = Err(BoxedError::new(e)).with_context(|_| FlightGetSnafu {
|
||||||
addr: client.addr().to_string(),
|
addr: client.addr().to_string(),
|
||||||
tonic_code,
|
tonic_code,
|
||||||
})
|
});
|
||||||
|
error
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let flight_data_stream = response.into_inner();
|
let flight_data_stream = response.into_inner();
|
||||||
|
|||||||
@@ -18,9 +18,9 @@ use common_error::define_from_tonic_status;
|
|||||||
use common_error::ext::{BoxedError, ErrorExt};
|
use common_error::ext::{BoxedError, ErrorExt};
|
||||||
use common_error::status_code::StatusCode;
|
use common_error::status_code::StatusCode;
|
||||||
use common_macro::stack_trace_debug;
|
use common_macro::stack_trace_debug;
|
||||||
use snafu::{Location, Snafu, location};
|
use snafu::{location, Location, Snafu};
|
||||||
use tonic::Code;
|
|
||||||
use tonic::metadata::errors::InvalidMetadataValue;
|
use tonic::metadata::errors::InvalidMetadataValue;
|
||||||
|
use tonic::Code;
|
||||||
|
|
||||||
#[derive(Snafu)]
|
#[derive(Snafu)]
|
||||||
#[snafu(visibility(pub))]
|
#[snafu(visibility(pub))]
|
||||||
|
|||||||
@@ -12,14 +12,14 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use api::v1::flow::{DirtyWindowRequests, FlowRequest, FlowResponse};
|
use api::v1::flow::{DirtyWindowRequest, DirtyWindowRequests, FlowRequest, FlowResponse};
|
||||||
use api::v1::region::InsertRequests;
|
use api::v1::region::InsertRequests;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::node_manager::Flownode;
|
use common_meta::node_manager::Flownode;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
|
||||||
use crate::Client;
|
|
||||||
use crate::error::{FlowServerSnafu, Result};
|
use crate::error::{FlowServerSnafu, Result};
|
||||||
|
use crate::Client;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct FlowRequester {
|
pub struct FlowRequester {
|
||||||
@@ -47,7 +47,7 @@ impl Flownode for FlowRequester {
|
|||||||
|
|
||||||
async fn handle_mark_window_dirty(
|
async fn handle_mark_window_dirty(
|
||||||
&self,
|
&self,
|
||||||
req: DirtyWindowRequests,
|
req: DirtyWindowRequest,
|
||||||
) -> common_meta::error::Result<FlowResponse> {
|
) -> common_meta::error::Result<FlowResponse> {
|
||||||
self.handle_mark_window_dirty(req)
|
self.handle_mark_window_dirty(req)
|
||||||
.await
|
.await
|
||||||
@@ -102,10 +102,12 @@ impl FlowRequester {
|
|||||||
Ok(response)
|
Ok(response)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_mark_window_dirty(&self, req: DirtyWindowRequests) -> Result<FlowResponse> {
|
async fn handle_mark_window_dirty(&self, req: DirtyWindowRequest) -> Result<FlowResponse> {
|
||||||
let (addr, mut client) = self.client.raw_flow_client()?;
|
let (addr, mut client) = self.client.raw_flow_client()?;
|
||||||
let response = client
|
let response = client
|
||||||
.handle_mark_dirty_time_window(req)
|
.handle_mark_dirty_time_window(DirtyWindowRequests {
|
||||||
|
requests: vec![req],
|
||||||
|
})
|
||||||
.await
|
.await
|
||||||
.or_else(|e| {
|
.or_else(|e| {
|
||||||
let code = e.code();
|
let code = e.code();
|
||||||
|
|||||||
@@ -15,8 +15,8 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use api::region::RegionResponse;
|
use api::region::RegionResponse;
|
||||||
use api::v1::ResponseHeader;
|
|
||||||
use api::v1::region::RegionRequest;
|
use api::v1::region::RegionRequest;
|
||||||
|
use api::v1::ResponseHeader;
|
||||||
use arc_swap::ArcSwapOption;
|
use arc_swap::ArcSwapOption;
|
||||||
use arrow_flight::Ticket;
|
use arrow_flight::Ticket;
|
||||||
use async_stream::stream;
|
use async_stream::stream;
|
||||||
@@ -33,7 +33,7 @@ use common_telemetry::error;
|
|||||||
use common_telemetry::tracing_context::TracingContext;
|
use common_telemetry::tracing_context::TracingContext;
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use query::query_engine::DefaultSerializer;
|
use query::query_engine::DefaultSerializer;
|
||||||
use snafu::{OptionExt, ResultExt, location};
|
use snafu::{location, OptionExt, ResultExt};
|
||||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||||
use tokio_stream::StreamExt;
|
use tokio_stream::StreamExt;
|
||||||
|
|
||||||
@@ -41,7 +41,7 @@ use crate::error::{
|
|||||||
self, ConvertFlightDataSnafu, FlightGetSnafu, IllegalDatabaseResponseSnafu,
|
self, ConvertFlightDataSnafu, FlightGetSnafu, IllegalDatabaseResponseSnafu,
|
||||||
IllegalFlightMessagesSnafu, MissingFieldSnafu, Result, ServerSnafu,
|
IllegalFlightMessagesSnafu, MissingFieldSnafu, Result, ServerSnafu,
|
||||||
};
|
};
|
||||||
use crate::{Client, Error, metrics};
|
use crate::{metrics, Client, Error};
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct RegionRequester {
|
pub struct RegionRequester {
|
||||||
@@ -115,10 +115,11 @@ impl RegionRequester {
|
|||||||
flight_client.addr(),
|
flight_client.addr(),
|
||||||
tonic_code
|
tonic_code
|
||||||
);
|
);
|
||||||
Err(BoxedError::new(e)).with_context(|_| FlightGetSnafu {
|
let error = Err(BoxedError::new(e)).with_context(|_| FlightGetSnafu {
|
||||||
addr: flight_client.addr().to_string(),
|
addr: flight_client.addr().to_string(),
|
||||||
tonic_code,
|
tonic_code,
|
||||||
})
|
});
|
||||||
|
error
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let flight_data_stream = response.into_inner();
|
let flight_data_stream = response.into_inner();
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ use clap::{Parser, Subcommand};
|
|||||||
use cmd::datanode::builder::InstanceBuilder;
|
use cmd::datanode::builder::InstanceBuilder;
|
||||||
use cmd::error::{InitTlsProviderSnafu, Result};
|
use cmd::error::{InitTlsProviderSnafu, Result};
|
||||||
use cmd::options::GlobalOptions;
|
use cmd::options::GlobalOptions;
|
||||||
use cmd::{App, cli, datanode, flownode, frontend, metasrv, standalone};
|
use cmd::{cli, datanode, flownode, frontend, metasrv, standalone, App};
|
||||||
use common_base::Plugins;
|
use common_base::Plugins;
|
||||||
use common_version::{verbose_version, version};
|
use common_version::{verbose_version, version};
|
||||||
use servers::install_ring_crypto_provider;
|
use servers::install_ring_crypto_provider;
|
||||||
@@ -143,10 +143,8 @@ async fn start(cli: Command) -> Result<()> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn setup_human_panic() {
|
fn setup_human_panic() {
|
||||||
human_panic::setup_panic!(
|
human_panic::setup_panic!(human_panic::Metadata::new("GreptimeDB", version())
|
||||||
human_panic::Metadata::new("GreptimeDB", version())
|
.homepage("https://github.com/GreptimeTeam/greptimedb/discussions"));
|
||||||
.homepage("https://github.com/GreptimeTeam/greptimedb/discussions")
|
|
||||||
);
|
|
||||||
|
|
||||||
common_telemetry::set_panic_hook();
|
common_telemetry::set_panic_hook();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ use snafu::ResultExt;
|
|||||||
use tracing_appender::non_blocking::WorkerGuard;
|
use tracing_appender::non_blocking::WorkerGuard;
|
||||||
|
|
||||||
use crate::options::GlobalOptions;
|
use crate::options::GlobalOptions;
|
||||||
use crate::{App, Result, error};
|
use crate::{error, App, Result};
|
||||||
pub const APP_NAME: &str = "greptime-cli";
|
pub const APP_NAME: &str = "greptime-cli";
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
|
||||||
@@ -109,7 +109,7 @@ mod tests {
|
|||||||
|
|
||||||
use crate::error::Result as CmdResult;
|
use crate::error::Result as CmdResult;
|
||||||
use crate::options::GlobalOptions;
|
use crate::options::GlobalOptions;
|
||||||
use crate::{App, cli, standalone};
|
use crate::{cli, standalone, App};
|
||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread")]
|
#[tokio::test(flavor = "multi_thread")]
|
||||||
async fn test_export_create_table_with_quoted_names() -> CmdResult<()> {
|
async fn test_export_create_table_with_quoted_names() -> CmdResult<()> {
|
||||||
|
|||||||
@@ -20,20 +20,20 @@ use std::time::Duration;
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use common_config::Configurable;
|
use common_config::Configurable;
|
||||||
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
|
use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR};
|
||||||
use common_telemetry::{info, warn};
|
use common_telemetry::{info, warn};
|
||||||
use common_wal::config::DatanodeWalConfig;
|
use common_wal::config::DatanodeWalConfig;
|
||||||
use datanode::datanode::Datanode;
|
use datanode::datanode::Datanode;
|
||||||
use meta_client::MetaClientOptions;
|
use meta_client::MetaClientOptions;
|
||||||
use snafu::{ResultExt, ensure};
|
use snafu::{ensure, ResultExt};
|
||||||
use tracing_appender::non_blocking::WorkerGuard;
|
use tracing_appender::non_blocking::WorkerGuard;
|
||||||
|
|
||||||
use crate::App;
|
|
||||||
use crate::datanode::builder::InstanceBuilder;
|
use crate::datanode::builder::InstanceBuilder;
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
LoadLayeredConfigSnafu, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu,
|
LoadLayeredConfigSnafu, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu,
|
||||||
};
|
};
|
||||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||||
|
use crate::App;
|
||||||
|
|
||||||
pub const APP_NAME: &str = "greptime-datanode";
|
pub const APP_NAME: &str = "greptime-datanode";
|
||||||
|
|
||||||
@@ -187,39 +187,29 @@ impl StartCommand {
|
|||||||
if let Some(addr) = &self.rpc_bind_addr {
|
if let Some(addr) = &self.rpc_bind_addr {
|
||||||
opts.grpc.bind_addr.clone_from(addr);
|
opts.grpc.bind_addr.clone_from(addr);
|
||||||
} else if let Some(addr) = &opts.rpc_addr {
|
} else if let Some(addr) = &opts.rpc_addr {
|
||||||
warn!(
|
warn!("Use the deprecated attribute `DatanodeOptions.rpc_addr`, please use `grpc.addr` instead.");
|
||||||
"Use the deprecated attribute `DatanodeOptions.rpc_addr`, please use `grpc.addr` instead."
|
|
||||||
);
|
|
||||||
opts.grpc.bind_addr.clone_from(addr);
|
opts.grpc.bind_addr.clone_from(addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(server_addr) = &self.rpc_server_addr {
|
if let Some(server_addr) = &self.rpc_server_addr {
|
||||||
opts.grpc.server_addr.clone_from(server_addr);
|
opts.grpc.server_addr.clone_from(server_addr);
|
||||||
} else if let Some(server_addr) = &opts.rpc_hostname {
|
} else if let Some(server_addr) = &opts.rpc_hostname {
|
||||||
warn!(
|
warn!("Use the deprecated attribute `DatanodeOptions.rpc_hostname`, please use `grpc.hostname` instead.");
|
||||||
"Use the deprecated attribute `DatanodeOptions.rpc_hostname`, please use `grpc.hostname` instead."
|
|
||||||
);
|
|
||||||
opts.grpc.server_addr.clone_from(server_addr);
|
opts.grpc.server_addr.clone_from(server_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(runtime_size) = opts.rpc_runtime_size {
|
if let Some(runtime_size) = opts.rpc_runtime_size {
|
||||||
warn!(
|
warn!("Use the deprecated attribute `DatanodeOptions.rpc_runtime_size`, please use `grpc.runtime_size` instead.");
|
||||||
"Use the deprecated attribute `DatanodeOptions.rpc_runtime_size`, please use `grpc.runtime_size` instead."
|
|
||||||
);
|
|
||||||
opts.grpc.runtime_size = runtime_size;
|
opts.grpc.runtime_size = runtime_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(max_recv_message_size) = opts.rpc_max_recv_message_size {
|
if let Some(max_recv_message_size) = opts.rpc_max_recv_message_size {
|
||||||
warn!(
|
warn!("Use the deprecated attribute `DatanodeOptions.rpc_max_recv_message_size`, please use `grpc.max_recv_message_size` instead.");
|
||||||
"Use the deprecated attribute `DatanodeOptions.rpc_max_recv_message_size`, please use `grpc.max_recv_message_size` instead."
|
|
||||||
);
|
|
||||||
opts.grpc.max_recv_message_size = max_recv_message_size;
|
opts.grpc.max_recv_message_size = max_recv_message_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(max_send_message_size) = opts.rpc_max_send_message_size {
|
if let Some(max_send_message_size) = opts.rpc_max_send_message_size {
|
||||||
warn!(
|
warn!("Use the deprecated attribute `DatanodeOptions.rpc_max_send_message_size`, please use `grpc.max_send_message_size` instead.");
|
||||||
"Use the deprecated attribute `DatanodeOptions.rpc_max_send_message_size`, please use `grpc.max_send_message_size` instead."
|
|
||||||
);
|
|
||||||
opts.grpc.max_send_message_size = max_send_message_size;
|
opts.grpc.max_send_message_size = max_send_message_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -440,24 +430,20 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_try_from_cmd() {
|
fn test_try_from_cmd() {
|
||||||
assert!(
|
assert!((StartCommand {
|
||||||
(StartCommand {
|
metasrv_addrs: Some(vec!["127.0.0.1:3002".to_string()]),
|
||||||
metasrv_addrs: Some(vec!["127.0.0.1:3002".to_string()]),
|
..Default::default()
|
||||||
..Default::default()
|
})
|
||||||
})
|
.load_options(&GlobalOptions::default())
|
||||||
.load_options(&GlobalOptions::default())
|
.is_err());
|
||||||
.is_err()
|
|
||||||
);
|
|
||||||
|
|
||||||
// Providing node_id but leave metasrv_addr absent is ok since metasrv_addr has default value
|
// Providing node_id but leave metasrv_addr absent is ok since metasrv_addr has default value
|
||||||
assert!(
|
assert!((StartCommand {
|
||||||
(StartCommand {
|
node_id: Some(42),
|
||||||
node_id: Some(42),
|
..Default::default()
|
||||||
..Default::default()
|
})
|
||||||
})
|
.load_options(&GlobalOptions::default())
|
||||||
.load_options(&GlobalOptions::default())
|
.is_ok());
|
||||||
.is_ok()
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ use meta_client::MetaClientType;
|
|||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use tracing_appender::non_blocking::WorkerGuard;
|
use tracing_appender::non_blocking::WorkerGuard;
|
||||||
|
|
||||||
use crate::datanode::{APP_NAME, DatanodeOptions, Instance};
|
use crate::datanode::{DatanodeOptions, Instance, APP_NAME};
|
||||||
use crate::error::{MetaClientInitSnafu, MissingConfigSnafu, Result, StartDatanodeSnafu};
|
use crate::error::{MetaClientInitSnafu, MissingConfigSnafu, Result, StartDatanodeSnafu};
|
||||||
use crate::{create_resource_limit_metrics, log_versions, maybe_activate_heap_profile};
|
use crate::{create_resource_limit_metrics, log_versions, maybe_activate_heap_profile};
|
||||||
|
|
||||||
|
|||||||
@@ -25,20 +25,20 @@ use common_base::Plugins;
|
|||||||
use common_config::{Configurable, DEFAULT_DATA_HOME};
|
use common_config::{Configurable, DEFAULT_DATA_HOME};
|
||||||
use common_grpc::channel_manager::ChannelConfig;
|
use common_grpc::channel_manager::ChannelConfig;
|
||||||
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||||
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
|
||||||
use common_meta::heartbeat::handler::invalidate_table_cache::InvalidateCacheHandler;
|
use common_meta::heartbeat::handler::invalidate_table_cache::InvalidateCacheHandler;
|
||||||
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||||
use common_meta::key::TableMetadataManager;
|
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
||||||
use common_meta::key::flow::FlowMetadataManager;
|
use common_meta::key::flow::FlowMetadataManager;
|
||||||
|
use common_meta::key::TableMetadataManager;
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
|
use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR};
|
||||||
use common_version::{short_version, verbose_version};
|
use common_version::{short_version, verbose_version};
|
||||||
use flow::{
|
use flow::{
|
||||||
FlownodeBuilder, FlownodeInstance, FlownodeServiceBuilder, FrontendClient, FrontendInvoker,
|
get_flow_auth_options, FlownodeBuilder, FlownodeInstance, FlownodeServiceBuilder,
|
||||||
get_flow_auth_options,
|
FrontendClient, FrontendInvoker,
|
||||||
};
|
};
|
||||||
use meta_client::{MetaClientOptions, MetaClientType};
|
use meta_client::{MetaClientOptions, MetaClientType};
|
||||||
use snafu::{OptionExt, ResultExt, ensure};
|
use snafu::{ensure, OptionExt, ResultExt};
|
||||||
use tracing_appender::non_blocking::WorkerGuard;
|
use tracing_appender::non_blocking::WorkerGuard;
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
@@ -46,7 +46,7 @@ use crate::error::{
|
|||||||
MissingConfigSnafu, Result, ShutdownFlownodeSnafu, StartFlownodeSnafu,
|
MissingConfigSnafu, Result, ShutdownFlownodeSnafu, StartFlownodeSnafu,
|
||||||
};
|
};
|
||||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||||
use crate::{App, create_resource_limit_metrics, log_versions, maybe_activate_heap_profile};
|
use crate::{create_resource_limit_metrics, log_versions, maybe_activate_heap_profile, App};
|
||||||
|
|
||||||
pub const APP_NAME: &str = "greptime-flownode";
|
pub const APP_NAME: &str = "greptime-flownode";
|
||||||
|
|
||||||
@@ -341,18 +341,8 @@ impl StartCommand {
|
|||||||
.build(),
|
.build(),
|
||||||
);
|
);
|
||||||
|
|
||||||
// flownode's frontend to datanode need not timeout.
|
let information_extension =
|
||||||
// Some queries are expected to take long time.
|
Arc::new(DistributedInformationExtension::new(meta_client.clone()));
|
||||||
let channel_config = ChannelConfig {
|
|
||||||
timeout: None,
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
let client = Arc::new(NodeClients::new(channel_config));
|
|
||||||
|
|
||||||
let information_extension = Arc::new(DistributedInformationExtension::new(
|
|
||||||
meta_client.clone(),
|
|
||||||
client.clone(),
|
|
||||||
));
|
|
||||||
let catalog_manager = KvBackendCatalogManagerBuilder::new(
|
let catalog_manager = KvBackendCatalogManagerBuilder::new(
|
||||||
information_extension,
|
information_extension,
|
||||||
cached_meta_backend.clone(),
|
cached_meta_backend.clone(),
|
||||||
@@ -408,6 +398,14 @@ impl StartCommand {
|
|||||||
flownode.setup_services(services);
|
flownode.setup_services(services);
|
||||||
let flownode = flownode;
|
let flownode = flownode;
|
||||||
|
|
||||||
|
// flownode's frontend to datanode need not timeout.
|
||||||
|
// Some queries are expected to take long time.
|
||||||
|
let channel_config = ChannelConfig {
|
||||||
|
timeout: None,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let client = Arc::new(NodeClients::new(channel_config));
|
||||||
|
|
||||||
let invoker = FrontendInvoker::build_from(
|
let invoker = FrontendInvoker::build_from(
|
||||||
flownode.flow_engine().streaming_engine(),
|
flownode.flow_engine().streaming_engine(),
|
||||||
catalog_manager.clone(),
|
catalog_manager.clone(),
|
||||||
|
|||||||
@@ -27,11 +27,11 @@ use common_base::Plugins;
|
|||||||
use common_config::{Configurable, DEFAULT_DATA_HOME};
|
use common_config::{Configurable, DEFAULT_DATA_HOME};
|
||||||
use common_grpc::channel_manager::ChannelConfig;
|
use common_grpc::channel_manager::ChannelConfig;
|
||||||
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||||
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
|
||||||
use common_meta::heartbeat::handler::invalidate_table_cache::InvalidateCacheHandler;
|
use common_meta::heartbeat::handler::invalidate_table_cache::InvalidateCacheHandler;
|
||||||
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||||
|
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
|
use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR};
|
||||||
use common_time::timezone::set_default_timezone;
|
use common_time::timezone::set_default_timezone;
|
||||||
use common_version::{short_version, verbose_version};
|
use common_version::{short_version, verbose_version};
|
||||||
use frontend::frontend::Frontend;
|
use frontend::frontend::Frontend;
|
||||||
@@ -48,7 +48,7 @@ use tracing_appender::non_blocking::WorkerGuard;
|
|||||||
|
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||||
use crate::{App, create_resource_limit_metrics, log_versions, maybe_activate_heap_profile};
|
use crate::{create_resource_limit_metrics, log_versions, maybe_activate_heap_profile, App};
|
||||||
|
|
||||||
type FrontendOptions = GreptimeOptions<frontend::frontend::FrontendOptions>;
|
type FrontendOptions = GreptimeOptions<frontend::frontend::FrontendOptions>;
|
||||||
|
|
||||||
@@ -378,24 +378,8 @@ impl StartCommand {
|
|||||||
.build(),
|
.build(),
|
||||||
);
|
);
|
||||||
|
|
||||||
// frontend to datanode need not timeout.
|
let information_extension =
|
||||||
// Some queries are expected to take long time.
|
Arc::new(DistributedInformationExtension::new(meta_client.clone()));
|
||||||
let mut channel_config = ChannelConfig {
|
|
||||||
timeout: None,
|
|
||||||
tcp_nodelay: opts.datanode.client.tcp_nodelay,
|
|
||||||
connect_timeout: Some(opts.datanode.client.connect_timeout),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
if opts.grpc.flight_compression.transport_compression() {
|
|
||||||
channel_config.accept_compression = true;
|
|
||||||
channel_config.send_compression = true;
|
|
||||||
}
|
|
||||||
let client = Arc::new(NodeClients::new(channel_config));
|
|
||||||
|
|
||||||
let information_extension = Arc::new(DistributedInformationExtension::new(
|
|
||||||
meta_client.clone(),
|
|
||||||
client.clone(),
|
|
||||||
));
|
|
||||||
|
|
||||||
let process_manager = Arc::new(ProcessManager::new(
|
let process_manager = Arc::new(ProcessManager::new(
|
||||||
addrs::resolve_addr(&opts.grpc.bind_addr, Some(&opts.grpc.server_addr)),
|
addrs::resolve_addr(&opts.grpc.bind_addr, Some(&opts.grpc.server_addr)),
|
||||||
@@ -429,12 +413,26 @@ impl StartCommand {
|
|||||||
);
|
);
|
||||||
let heartbeat_task = Some(heartbeat_task);
|
let heartbeat_task = Some(heartbeat_task);
|
||||||
|
|
||||||
|
// frontend to datanode need not timeout.
|
||||||
|
// Some queries are expected to take long time.
|
||||||
|
let mut channel_config = ChannelConfig {
|
||||||
|
timeout: None,
|
||||||
|
tcp_nodelay: opts.datanode.client.tcp_nodelay,
|
||||||
|
connect_timeout: Some(opts.datanode.client.connect_timeout),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
if opts.grpc.flight_compression.transport_compression() {
|
||||||
|
channel_config.accept_compression = true;
|
||||||
|
channel_config.send_compression = true;
|
||||||
|
}
|
||||||
|
let client = NodeClients::new(channel_config);
|
||||||
|
|
||||||
let instance = FrontendBuilder::new(
|
let instance = FrontendBuilder::new(
|
||||||
opts.clone(),
|
opts.clone(),
|
||||||
cached_meta_backend.clone(),
|
cached_meta_backend.clone(),
|
||||||
layered_cache_registry.clone(),
|
layered_cache_registry.clone(),
|
||||||
catalog_manager,
|
catalog_manager,
|
||||||
client,
|
Arc::new(client),
|
||||||
meta_client,
|
meta_client,
|
||||||
process_manager,
|
process_manager,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ lazy_static::lazy_static! {
|
|||||||
/// wait for the close signal, for unix platform it's SIGINT or SIGTERM
|
/// wait for the close signal, for unix platform it's SIGINT or SIGTERM
|
||||||
#[cfg(unix)]
|
#[cfg(unix)]
|
||||||
async fn start_wait_for_close_signal() -> std::io::Result<()> {
|
async fn start_wait_for_close_signal() -> std::io::Result<()> {
|
||||||
use tokio::signal::unix::{SignalKind, signal};
|
use tokio::signal::unix::{signal, SignalKind};
|
||||||
let mut sigint = signal(SignalKind::interrupt())?;
|
let mut sigint = signal(SignalKind::interrupt())?;
|
||||||
let mut sigterm = signal(SignalKind::terminate())?;
|
let mut sigterm = signal(SignalKind::terminate())?;
|
||||||
|
|
||||||
@@ -93,13 +93,13 @@ pub trait App: Send {
|
|||||||
|
|
||||||
self.start().await?;
|
self.start().await?;
|
||||||
|
|
||||||
if self.wait_signal()
|
if self.wait_signal() {
|
||||||
&& let Err(e) = start_wait_for_close_signal().await
|
if let Err(e) = start_wait_for_close_signal().await {
|
||||||
{
|
error!(e; "Failed to listen for close signal");
|
||||||
error!(e; "Failed to listen for close signal");
|
// It's unusual to fail to listen for close signal, maybe there's something unexpected in
|
||||||
// It's unusual to fail to listen for close signal, maybe there's something unexpected in
|
// the underlying system. So we stop the app instead of running nonetheless to let people
|
||||||
// the underlying system. So we stop the app instead of running nonetheless to let people
|
// investigate the issue.
|
||||||
// investigate the issue.
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.stop().await?;
|
self.stop().await?;
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ use clap::Parser;
|
|||||||
use common_base::Plugins;
|
use common_base::Plugins;
|
||||||
use common_config::Configurable;
|
use common_config::Configurable;
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
|
use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR};
|
||||||
use common_version::{short_version, verbose_version};
|
use common_version::{short_version, verbose_version};
|
||||||
use meta_srv::bootstrap::MetasrvInstance;
|
use meta_srv::bootstrap::MetasrvInstance;
|
||||||
use meta_srv::metasrv::BackendImpl;
|
use meta_srv::metasrv::BackendImpl;
|
||||||
@@ -30,7 +30,7 @@ use tracing_appender::non_blocking::WorkerGuard;
|
|||||||
|
|
||||||
use crate::error::{self, LoadLayeredConfigSnafu, Result, StartMetaServerSnafu};
|
use crate::error::{self, LoadLayeredConfigSnafu, Result, StartMetaServerSnafu};
|
||||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||||
use crate::{App, create_resource_limit_metrics, log_versions, maybe_activate_heap_profile};
|
use crate::{create_resource_limit_metrics, log_versions, maybe_activate_heap_profile, App};
|
||||||
|
|
||||||
type MetasrvOptions = GreptimeOptions<meta_srv::metasrv::MetasrvOptions>;
|
type MetasrvOptions = GreptimeOptions<meta_srv::metasrv::MetasrvOptions>;
|
||||||
|
|
||||||
|
|||||||
@@ -19,16 +19,15 @@ use std::{fs, path};
|
|||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||||
use catalog::information_schema::{DatanodeInspectRequest, InformationExtension};
|
use catalog::information_schema::InformationExtension;
|
||||||
use catalog::kvbackend::KvBackendCatalogManagerBuilder;
|
use catalog::kvbackend::KvBackendCatalogManagerBuilder;
|
||||||
use catalog::process_manager::ProcessManager;
|
use catalog::process_manager::ProcessManager;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use client::SendableRecordBatchStream;
|
|
||||||
use client::api::v1::meta::RegionRole;
|
use client::api::v1::meta::RegionRole;
|
||||||
use common_base::Plugins;
|
|
||||||
use common_base::readable_size::ReadableSize;
|
use common_base::readable_size::ReadableSize;
|
||||||
|
use common_base::Plugins;
|
||||||
use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
|
use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
|
||||||
use common_config::{Configurable, KvBackendConfig, metadata_store_dir};
|
use common_config::{metadata_store_dir, Configurable, KvBackendConfig};
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::cache::LayeredCacheRegistryBuilder;
|
use common_meta::cache::LayeredCacheRegistryBuilder;
|
||||||
use common_meta::cluster::{NodeInfo, NodeStatus};
|
use common_meta::cluster::{NodeInfo, NodeStatus};
|
||||||
@@ -37,8 +36,8 @@ use common_meta::ddl::flow_meta::FlowMetadataAllocator;
|
|||||||
use common_meta::ddl::table_meta::TableMetadataAllocator;
|
use common_meta::ddl::table_meta::TableMetadataAllocator;
|
||||||
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl};
|
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl};
|
||||||
use common_meta::ddl_manager::DdlManager;
|
use common_meta::ddl_manager::DdlManager;
|
||||||
use common_meta::key::flow::FlowMetadataManager;
|
|
||||||
use common_meta::key::flow::flow_state::FlowStat;
|
use common_meta::key::flow::flow_state::FlowStat;
|
||||||
|
use common_meta::key::flow::FlowMetadataManager;
|
||||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
use common_meta::peer::Peer;
|
use common_meta::peer::Peer;
|
||||||
@@ -46,13 +45,12 @@ use common_meta::procedure_executor::LocalProcedureExecutor;
|
|||||||
use common_meta::region_keeper::MemoryRegionKeeper;
|
use common_meta::region_keeper::MemoryRegionKeeper;
|
||||||
use common_meta::region_registry::LeaderRegionRegistry;
|
use common_meta::region_registry::LeaderRegionRegistry;
|
||||||
use common_meta::sequence::SequenceBuilder;
|
use common_meta::sequence::SequenceBuilder;
|
||||||
use common_meta::wal_options_allocator::{WalOptionsAllocatorRef, build_wal_options_allocator};
|
use common_meta::wal_options_allocator::{build_wal_options_allocator, WalOptionsAllocatorRef};
|
||||||
use common_options::memory::MemoryOptions;
|
use common_options::memory::MemoryOptions;
|
||||||
use common_procedure::{ProcedureInfo, ProcedureManagerRef};
|
use common_procedure::{ProcedureInfo, ProcedureManagerRef};
|
||||||
use common_query::request::QueryRequest;
|
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use common_telemetry::logging::{
|
use common_telemetry::logging::{
|
||||||
DEFAULT_LOGGING_DIR, LoggingOptions, SlowQueryOptions, TracingOptions,
|
LoggingOptions, SlowQueryOptions, TracingOptions, DEFAULT_LOGGING_DIR,
|
||||||
};
|
};
|
||||||
use common_time::timezone::set_default_timezone;
|
use common_time::timezone::set_default_timezone;
|
||||||
use common_version::{short_version, verbose_version};
|
use common_version::{short_version, verbose_version};
|
||||||
@@ -82,13 +80,12 @@ use servers::grpc::GrpcOptions;
|
|||||||
use servers::http::HttpOptions;
|
use servers::http::HttpOptions;
|
||||||
use servers::tls::{TlsMode, TlsOption};
|
use servers::tls::{TlsMode, TlsOption};
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use store_api::storage::RegionId;
|
|
||||||
use tokio::sync::RwLock;
|
use tokio::sync::RwLock;
|
||||||
use tracing_appender::non_blocking::WorkerGuard;
|
use tracing_appender::non_blocking::WorkerGuard;
|
||||||
|
|
||||||
use crate::error::{Result, StartFlownodeSnafu};
|
use crate::error::{Result, StartFlownodeSnafu};
|
||||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||||
use crate::{App, create_resource_limit_metrics, error, log_versions, maybe_activate_heap_profile};
|
use crate::{create_resource_limit_metrics, error, log_versions, maybe_activate_heap_profile, App};
|
||||||
|
|
||||||
pub const APP_NAME: &str = "greptime-standalone";
|
pub const APP_NAME: &str = "greptime-standalone";
|
||||||
|
|
||||||
@@ -218,7 +215,6 @@ impl Into<FrontendOptions> for StandaloneOptions {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl StandaloneOptions {
|
impl StandaloneOptions {
|
||||||
/// Returns the `FrontendOptions` for the standalone instance.
|
|
||||||
pub fn frontend_options(&self) -> FrontendOptions {
|
pub fn frontend_options(&self) -> FrontendOptions {
|
||||||
let cloned_opts = self.clone();
|
let cloned_opts = self.clone();
|
||||||
FrontendOptions {
|
FrontendOptions {
|
||||||
@@ -242,7 +238,6 @@ impl StandaloneOptions {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `DatanodeOptions` for the standalone instance.
|
|
||||||
pub fn datanode_options(&self) -> DatanodeOptions {
|
pub fn datanode_options(&self) -> DatanodeOptions {
|
||||||
let cloned_opts = self.clone();
|
let cloned_opts = self.clone();
|
||||||
DatanodeOptions {
|
DatanodeOptions {
|
||||||
@@ -258,17 +253,6 @@ impl StandaloneOptions {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sanitize the `StandaloneOptions` to ensure the config is valid.
|
|
||||||
pub fn sanitize(&mut self) {
|
|
||||||
if self.storage.is_object_storage() {
|
|
||||||
self.storage
|
|
||||||
.store
|
|
||||||
.cache_config_mut()
|
|
||||||
.unwrap()
|
|
||||||
.sanitize(&self.storage.data_home);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct Instance {
|
pub struct Instance {
|
||||||
@@ -409,7 +393,6 @@ impl StartCommand {
|
|||||||
.context(error::LoadLayeredConfigSnafu)?;
|
.context(error::LoadLayeredConfigSnafu)?;
|
||||||
|
|
||||||
self.merge_with_cli_options(global_options, &mut opts.component)?;
|
self.merge_with_cli_options(global_options, &mut opts.component)?;
|
||||||
opts.component.sanitize();
|
|
||||||
|
|
||||||
Ok(opts)
|
Ok(opts)
|
||||||
}
|
}
|
||||||
@@ -804,10 +787,6 @@ impl InformationExtension for StandaloneInformationExtension {
|
|||||||
// Use `self.start_time_ms` instead.
|
// Use `self.start_time_ms` instead.
|
||||||
// It's not precise but enough.
|
// It's not precise but enough.
|
||||||
start_time_ms: self.start_time_ms,
|
start_time_ms: self.start_time_ms,
|
||||||
cpus: common_config::utils::get_cpus() as u32,
|
|
||||||
memory_bytes: common_config::utils::get_sys_total_memory()
|
|
||||||
.unwrap_or_default()
|
|
||||||
.as_bytes(),
|
|
||||||
};
|
};
|
||||||
Ok(vec![node_info])
|
Ok(vec![node_info])
|
||||||
}
|
}
|
||||||
@@ -855,7 +834,7 @@ impl InformationExtension for StandaloneInformationExtension {
|
|||||||
region_manifest: region_stat.manifest.into(),
|
region_manifest: region_stat.manifest.into(),
|
||||||
data_topic_latest_entry_id: region_stat.data_topic_latest_entry_id,
|
data_topic_latest_entry_id: region_stat.data_topic_latest_entry_id,
|
||||||
metadata_topic_latest_entry_id: region_stat.metadata_topic_latest_entry_id,
|
metadata_topic_latest_entry_id: region_stat.metadata_topic_latest_entry_id,
|
||||||
written_bytes: region_stat.written_bytes,
|
write_bytes: 0,
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
@@ -873,25 +852,6 @@ impl InformationExtension for StandaloneInformationExtension {
|
|||||||
.await,
|
.await,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn inspect_datanode(
|
|
||||||
&self,
|
|
||||||
request: DatanodeInspectRequest,
|
|
||||||
) -> std::result::Result<SendableRecordBatchStream, Self::Error> {
|
|
||||||
let req = QueryRequest {
|
|
||||||
plan: request
|
|
||||||
.build_plan()
|
|
||||||
.context(catalog::error::DatafusionSnafu)?,
|
|
||||||
region_id: RegionId::default(),
|
|
||||||
header: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
self.region_server
|
|
||||||
.handle_read(req)
|
|
||||||
.await
|
|
||||||
.map_err(BoxedError::new)
|
|
||||||
.context(catalog::error::InternalSnafu)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@@ -1161,22 +1121,4 @@ mod tests {
|
|||||||
assert_eq!(options.logging, default_options.logging);
|
assert_eq!(options.logging, default_options.logging);
|
||||||
assert_eq!(options.region_engine, default_options.region_engine);
|
assert_eq!(options.region_engine, default_options.region_engine);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_cache_config() {
|
|
||||||
let toml_str = r#"
|
|
||||||
[storage]
|
|
||||||
data_home = "test_data_home"
|
|
||||||
type = "S3"
|
|
||||||
[storage.cache_config]
|
|
||||||
enable_read_cache = true
|
|
||||||
"#;
|
|
||||||
let mut opts: StandaloneOptions = toml::from_str(toml_str).unwrap();
|
|
||||||
opts.sanitize();
|
|
||||||
assert!(opts.storage.store.cache_config().unwrap().enable_read_cache);
|
|
||||||
assert_eq!(
|
|
||||||
opts.storage.store.cache_config().unwrap().cache_path,
|
|
||||||
"test_data_home"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,9 +18,9 @@ use cmd::options::GreptimeOptions;
|
|||||||
use cmd::standalone::StandaloneOptions;
|
use cmd::standalone::StandaloneOptions;
|
||||||
use common_config::{Configurable, DEFAULT_DATA_HOME};
|
use common_config::{Configurable, DEFAULT_DATA_HOME};
|
||||||
use common_options::datanode::{ClientOptions, DatanodeClientOptions};
|
use common_options::datanode::{ClientOptions, DatanodeClientOptions};
|
||||||
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, DEFAULT_OTLP_HTTP_ENDPOINT, LoggingOptions};
|
use common_telemetry::logging::{LoggingOptions, DEFAULT_LOGGING_DIR, DEFAULT_OTLP_HTTP_ENDPOINT};
|
||||||
use common_wal::config::DatanodeWalConfig;
|
|
||||||
use common_wal::config::raft_engine::RaftEngineConfig;
|
use common_wal::config::raft_engine::RaftEngineConfig;
|
||||||
|
use common_wal::config::DatanodeWalConfig;
|
||||||
use datanode::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
|
use datanode::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
|
||||||
use file_engine::config::EngineConfig as FileEngineConfig;
|
use file_engine::config::EngineConfig as FileEngineConfig;
|
||||||
use flow::FlownodeOptions;
|
use flow::FlownodeOptions;
|
||||||
|
|||||||
@@ -19,8 +19,8 @@
|
|||||||
use std::fmt::{Debug, Display, Formatter};
|
use std::fmt::{Debug, Display, Formatter};
|
||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::Arc;
|
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
|
use std::sync::Arc;
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
use futures::task::AtomicWaker;
|
use futures::task::AtomicWaker;
|
||||||
|
|||||||
@@ -17,8 +17,8 @@ use std::io;
|
|||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::Arc;
|
|
||||||
use std::sync::atomic::{AtomicU64, Ordering};
|
use std::sync::atomic::{AtomicU64, Ordering};
|
||||||
|
use std::sync::Arc;
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ use std::ops::{Div, Mul};
|
|||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
use serde::de::{Unexpected, Visitor};
|
use serde::de::{Unexpected, Visitor};
|
||||||
use serde::{Deserialize, Deserializer, Serialize, Serializer, de};
|
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
|
||||||
|
|
||||||
const UNIT: u64 = 1;
|
const UNIT: u64 = 1;
|
||||||
|
|
||||||
|
|||||||
@@ -34,7 +34,7 @@
|
|||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use std::{any, fmt};
|
use std::{any, fmt};
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize, de, ser};
|
use serde::{de, ser, Deserialize, Serialize};
|
||||||
use zeroize::{Zeroize, ZeroizeOnDrop};
|
use zeroize::{Zeroize, ZeroizeOnDrop};
|
||||||
|
|
||||||
/// Wrapper type for strings that contains secrets. See also [SecretBox].
|
/// Wrapper type for strings that contains secrets. See also [SecretBox].
|
||||||
|
|||||||
@@ -13,8 +13,8 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use config::{Environment, File, FileFormat};
|
use config::{Environment, File, FileFormat};
|
||||||
use serde::Serialize;
|
|
||||||
use serde::de::DeserializeOwned;
|
use serde::de::DeserializeOwned;
|
||||||
|
use serde::Serialize;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
|
||||||
use crate::error::{LoadLayeredConfigSnafu, Result, SerdeJsonSnafu, TomlFormatSnafu};
|
use crate::error::{LoadLayeredConfigSnafu, Result, SerdeJsonSnafu, TomlFormatSnafu};
|
||||||
|
|||||||
@@ -39,24 +39,6 @@ pub fn get_sys_total_memory() -> Option<ReadableSize> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// `ResourceSpec` holds the static resource specifications of a node,
|
|
||||||
/// such as CPU cores and memory capacity. These values are fixed
|
|
||||||
/// at startup and do not change dynamically during runtime.
|
|
||||||
#[derive(Debug, Clone, Copy)]
|
|
||||||
pub struct ResourceSpec {
|
|
||||||
pub cpus: usize,
|
|
||||||
pub memory: Option<ReadableSize>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for ResourceSpec {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
cpus: get_cpus(),
|
|
||||||
memory: get_sys_total_memory(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|||||||
@@ -45,11 +45,11 @@ pub trait ArrowWriterCloser {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<
|
impl<
|
||||||
T: AsyncWrite + Send + Unpin,
|
T: AsyncWrite + Send + Unpin,
|
||||||
U: DfRecordBatchEncoder + ArrowWriterCloser,
|
U: DfRecordBatchEncoder + ArrowWriterCloser,
|
||||||
F: Fn(String) -> Fut,
|
F: Fn(String) -> Fut,
|
||||||
Fut: Future<Output = Result<T>>,
|
Fut: Future<Output = Result<T>>,
|
||||||
> LazyBufferedWriter<T, U, F>
|
> LazyBufferedWriter<T, U, F>
|
||||||
{
|
{
|
||||||
/// Closes `LazyBufferedWriter` and optionally flushes all data to underlying storage
|
/// Closes `LazyBufferedWriter` and optionally flushes all data to underlying storage
|
||||||
/// if any row's been written.
|
/// if any row's been written.
|
||||||
@@ -67,11 +67,11 @@ impl<
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<
|
impl<
|
||||||
T: AsyncWrite + Send + Unpin,
|
T: AsyncWrite + Send + Unpin,
|
||||||
U: DfRecordBatchEncoder,
|
U: DfRecordBatchEncoder,
|
||||||
F: Fn(String) -> Fut,
|
F: Fn(String) -> Fut,
|
||||||
Fut: Future<Output = Result<T>>,
|
Fut: Future<Output = Result<T>>,
|
||||||
> LazyBufferedWriter<T, U, F>
|
> LazyBufferedWriter<T, U, F>
|
||||||
{
|
{
|
||||||
/// Closes the writer and flushes the buffer data.
|
/// Closes the writer and flushes the buffer data.
|
||||||
pub async fn close_inner_writer(&mut self) -> Result<()> {
|
pub async fn close_inner_writer(&mut self) -> Result<()> {
|
||||||
|
|||||||
@@ -42,11 +42,11 @@ use self::csv::CsvFormat;
|
|||||||
use self::json::JsonFormat;
|
use self::json::JsonFormat;
|
||||||
use self::orc::OrcFormat;
|
use self::orc::OrcFormat;
|
||||||
use self::parquet::ParquetFormat;
|
use self::parquet::ParquetFormat;
|
||||||
use crate::DEFAULT_WRITE_BUFFER_SIZE;
|
|
||||||
use crate::buffered_writer::{DfRecordBatchEncoder, LazyBufferedWriter};
|
use crate::buffered_writer::{DfRecordBatchEncoder, LazyBufferedWriter};
|
||||||
use crate::compression::CompressionType;
|
use crate::compression::CompressionType;
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
use crate::share_buffer::SharedBuffer;
|
use crate::share_buffer::SharedBuffer;
|
||||||
|
use crate::DEFAULT_WRITE_BUFFER_SIZE;
|
||||||
|
|
||||||
pub const FORMAT_COMPRESSION_TYPE: &str = "compression_type";
|
pub const FORMAT_COMPRESSION_TYPE: &str = "compression_type";
|
||||||
pub const FORMAT_DELIMITER: &str = "delimiter";
|
pub const FORMAT_DELIMITER: &str = "delimiter";
|
||||||
@@ -158,10 +158,10 @@ pub fn open_with_decoder<T: ArrowDecoder, F: Fn() -> DataFusionResult<T>>(
|
|||||||
|
|
||||||
let stream = futures::stream::poll_fn(move |cx| {
|
let stream = futures::stream::poll_fn(move |cx| {
|
||||||
loop {
|
loop {
|
||||||
if buffered.is_empty()
|
if buffered.is_empty() {
|
||||||
&& let Some(result) = futures::ready!(upstream.poll_next_unpin(cx))
|
if let Some(result) = futures::ready!(upstream.poll_next_unpin(cx)) {
|
||||||
{
|
buffered = result?;
|
||||||
buffered = result?;
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
let decoded = decoder.decode(buffered.as_ref())?;
|
let decoded = decoder.decode(buffered.as_ref())?;
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ use tokio_util::io::SyncIoBridge;
|
|||||||
use crate::buffered_writer::DfRecordBatchEncoder;
|
use crate::buffered_writer::DfRecordBatchEncoder;
|
||||||
use crate::compression::CompressionType;
|
use crate::compression::CompressionType;
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
use crate::file_format::{self, FileFormat, stream_to_file};
|
use crate::file_format::{self, stream_to_file, FileFormat};
|
||||||
use crate::share_buffer::SharedBuffer;
|
use crate::share_buffer::SharedBuffer;
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
@@ -160,8 +160,8 @@ mod tests {
|
|||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::file_format::{
|
use crate::file_format::{
|
||||||
FORMAT_COMPRESSION_TYPE, FORMAT_DELIMITER, FORMAT_HAS_HEADER,
|
FileFormat, FORMAT_COMPRESSION_TYPE, FORMAT_DELIMITER, FORMAT_HAS_HEADER,
|
||||||
FORMAT_SCHEMA_INFER_MAX_RECORD, FileFormat,
|
FORMAT_SCHEMA_INFER_MAX_RECORD,
|
||||||
};
|
};
|
||||||
use crate::test_util::{format_schema, test_store};
|
use crate::test_util::{format_schema, test_store};
|
||||||
|
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ use std::io::BufReader;
|
|||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
use arrow::json;
|
use arrow::json;
|
||||||
use arrow::json::reader::{ValueIter, infer_json_schema_from_iterator};
|
use arrow::json::reader::{infer_json_schema_from_iterator, ValueIter};
|
||||||
use arrow::json::writer::LineDelimited;
|
use arrow::json::writer::LineDelimited;
|
||||||
use arrow::record_batch::RecordBatch;
|
use arrow::record_batch::RecordBatch;
|
||||||
use arrow_schema::Schema;
|
use arrow_schema::Schema;
|
||||||
@@ -32,7 +32,7 @@ use tokio_util::io::SyncIoBridge;
|
|||||||
use crate::buffered_writer::DfRecordBatchEncoder;
|
use crate::buffered_writer::DfRecordBatchEncoder;
|
||||||
use crate::compression::CompressionType;
|
use crate::compression::CompressionType;
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
use crate::file_format::{self, FileFormat, stream_to_file};
|
use crate::file_format::{self, stream_to_file, FileFormat};
|
||||||
use crate::share_buffer::SharedBuffer;
|
use crate::share_buffer::SharedBuffer;
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
@@ -133,7 +133,7 @@ mod tests {
|
|||||||
use common_test_util::find_workspace_path;
|
use common_test_util::find_workspace_path;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::file_format::{FORMAT_COMPRESSION_TYPE, FORMAT_SCHEMA_INFER_MAX_RECORD, FileFormat};
|
use crate::file_format::{FileFormat, FORMAT_COMPRESSION_TYPE, FORMAT_SCHEMA_INFER_MAX_RECORD};
|
||||||
use crate::test_util::{format_schema, test_store};
|
use crate::test_util::{format_schema, test_store};
|
||||||
|
|
||||||
fn test_data_root() -> String {
|
fn test_data_root() -> String {
|
||||||
|
|||||||
@@ -15,8 +15,8 @@
|
|||||||
use arrow_schema::Schema;
|
use arrow_schema::Schema;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use futures::FutureExt;
|
|
||||||
use futures::future::BoxFuture;
|
use futures::future::BoxFuture;
|
||||||
|
use futures::FutureExt;
|
||||||
use object_store::ObjectStore;
|
use object_store::ObjectStore;
|
||||||
use orc_rust::arrow_reader::ArrowReaderBuilder;
|
use orc_rust::arrow_reader::ArrowReaderBuilder;
|
||||||
use orc_rust::async_arrow_reader::ArrowStreamReader;
|
use orc_rust::async_arrow_reader::ArrowStreamReader;
|
||||||
|
|||||||
@@ -21,29 +21,29 @@ use async_trait::async_trait;
|
|||||||
use datafusion::datasource::physical_plan::{FileMeta, ParquetFileReaderFactory};
|
use datafusion::datasource::physical_plan::{FileMeta, ParquetFileReaderFactory};
|
||||||
use datafusion::error::Result as DatafusionResult;
|
use datafusion::error::Result as DatafusionResult;
|
||||||
use datafusion::parquet::arrow::async_reader::AsyncFileReader;
|
use datafusion::parquet::arrow::async_reader::AsyncFileReader;
|
||||||
use datafusion::parquet::arrow::{ArrowWriter, parquet_to_arrow_schema};
|
use datafusion::parquet::arrow::{parquet_to_arrow_schema, ArrowWriter};
|
||||||
use datafusion::parquet::errors::{ParquetError, Result as ParquetResult};
|
use datafusion::parquet::errors::{ParquetError, Result as ParquetResult};
|
||||||
use datafusion::parquet::file::metadata::ParquetMetaData;
|
use datafusion::parquet::file::metadata::ParquetMetaData;
|
||||||
use datafusion::parquet::format::FileMetaData;
|
use datafusion::parquet::format::FileMetaData;
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream;
|
|
||||||
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
|
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
|
||||||
|
use datafusion::physical_plan::SendableRecordBatchStream;
|
||||||
use datatypes::schema::SchemaRef;
|
use datatypes::schema::SchemaRef;
|
||||||
use futures::StreamExt;
|
|
||||||
use futures::future::BoxFuture;
|
use futures::future::BoxFuture;
|
||||||
|
use futures::StreamExt;
|
||||||
use object_store::{FuturesAsyncReader, ObjectStore};
|
use object_store::{FuturesAsyncReader, ObjectStore};
|
||||||
use parquet::arrow::AsyncArrowWriter;
|
|
||||||
use parquet::arrow::arrow_reader::ArrowReaderOptions;
|
use parquet::arrow::arrow_reader::ArrowReaderOptions;
|
||||||
|
use parquet::arrow::AsyncArrowWriter;
|
||||||
use parquet::basic::{Compression, Encoding, ZstdLevel};
|
use parquet::basic::{Compression, Encoding, ZstdLevel};
|
||||||
use parquet::file::properties::{WriterProperties, WriterPropertiesBuilder};
|
use parquet::file::properties::{WriterProperties, WriterPropertiesBuilder};
|
||||||
use parquet::schema::types::ColumnPath;
|
use parquet::schema::types::ColumnPath;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use tokio_util::compat::{Compat, FuturesAsyncReadCompatExt, FuturesAsyncWriteCompatExt};
|
use tokio_util::compat::{Compat, FuturesAsyncReadCompatExt, FuturesAsyncWriteCompatExt};
|
||||||
|
|
||||||
use crate::DEFAULT_WRITE_BUFFER_SIZE;
|
|
||||||
use crate::buffered_writer::{ArrowWriterCloser, DfRecordBatchEncoder};
|
use crate::buffered_writer::{ArrowWriterCloser, DfRecordBatchEncoder};
|
||||||
use crate::error::{self, Result, WriteObjectSnafu, WriteParquetSnafu};
|
use crate::error::{self, Result, WriteObjectSnafu, WriteParquetSnafu};
|
||||||
use crate::file_format::FileFormat;
|
use crate::file_format::FileFormat;
|
||||||
use crate::share_buffer::SharedBuffer;
|
use crate::share_buffer::SharedBuffer;
|
||||||
|
use crate::DEFAULT_WRITE_BUFFER_SIZE;
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||||
pub struct ParquetFormat {}
|
pub struct ParquetFormat {}
|
||||||
@@ -196,7 +196,10 @@ pub async fn stream_to_parquet(
|
|||||||
concurrency: usize,
|
concurrency: usize,
|
||||||
) -> Result<usize> {
|
) -> Result<usize> {
|
||||||
let write_props = column_wise_config(
|
let write_props = column_wise_config(
|
||||||
WriterProperties::builder().set_compression(Compression::ZSTD(ZstdLevel::default())),
|
WriterProperties::builder()
|
||||||
|
.set_compression(Compression::ZSTD(ZstdLevel::default()))
|
||||||
|
.set_statistics_truncate_length(None)
|
||||||
|
.set_column_index_truncate_length(None),
|
||||||
schema,
|
schema,
|
||||||
)
|
)
|
||||||
.build();
|
.build();
|
||||||
|
|||||||
@@ -24,8 +24,8 @@ use datafusion::datasource::physical_plan::{
|
|||||||
};
|
};
|
||||||
use datafusion::datasource::source::DataSourceExec;
|
use datafusion::datasource::source::DataSourceExec;
|
||||||
use datafusion::execution::context::TaskContext;
|
use datafusion::execution::context::TaskContext;
|
||||||
use datafusion::physical_plan::ExecutionPlan;
|
|
||||||
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
|
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
|
||||||
|
use datafusion::physical_plan::ExecutionPlan;
|
||||||
use datafusion::prelude::SessionContext;
|
use datafusion::prelude::SessionContext;
|
||||||
use datafusion_orc::OrcSource;
|
use datafusion_orc::OrcSource;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
@@ -204,15 +204,15 @@ async fn test_orc_opener() {
|
|||||||
config: scan_config(schema.clone(), None, path, file_source.clone()),
|
config: scan_config(schema.clone(), None, path, file_source.clone()),
|
||||||
file_source: file_source.clone(),
|
file_source: file_source.clone(),
|
||||||
expected: vec![
|
expected: vec![
|
||||||
"+----------+-----+-------+------------+-----+-----+-------+--------------------+------------------------+-----------+---------------+------------+----------------+---------------+-------------------+--------------+---------------+---------------+----------------------------+-------------+",
|
"+----------+-----+-------+------------+-----+-----+-------+--------------------+------------------------+-----------+---------------+------------+----------------+---------------+-------------------+--------------+---------------+---------------+----------------------------+-------------+",
|
||||||
"| double_a | a | b | str_direct | d | e | f | int_short_repeated | int_neg_short_repeated | int_delta | int_neg_delta | int_direct | int_neg_direct | bigint_direct | bigint_neg_direct | bigint_other | utf8_increase | utf8_decrease | timestamp_simple | date_simple |",
|
"| double_a | a | b | str_direct | d | e | f | int_short_repeated | int_neg_short_repeated | int_delta | int_neg_delta | int_direct | int_neg_direct | bigint_direct | bigint_neg_direct | bigint_other | utf8_increase | utf8_decrease | timestamp_simple | date_simple |",
|
||||||
"+----------+-----+-------+------------+-----+-----+-------+--------------------+------------------------+-----------+---------------+------------+----------------+---------------+-------------------+--------------+---------------+---------------+----------------------------+-------------+",
|
"+----------+-----+-------+------------+-----+-----+-------+--------------------+------------------------+-----------+---------------+------------+----------------+---------------+-------------------+--------------+---------------+---------------+----------------------------+-------------+",
|
||||||
"| 1.0 | 1.0 | true | a | a | ddd | aaaaa | 5 | -5 | 1 | 5 | 1 | -1 | 1 | -1 | 5 | a | eeeee | 2023-04-01T20:15:30.002 | 2023-04-01 |",
|
"| 1.0 | 1.0 | true | a | a | ddd | aaaaa | 5 | -5 | 1 | 5 | 1 | -1 | 1 | -1 | 5 | a | eeeee | 2023-04-01T20:15:30.002 | 2023-04-01 |",
|
||||||
"| 2.0 | 2.0 | false | cccccc | bb | cc | bbbbb | 5 | -5 | 2 | 4 | 6 | -6 | 6 | -6 | -5 | bb | dddd | 2021-08-22T07:26:44.525777 | 2023-03-01 |",
|
"| 2.0 | 2.0 | false | cccccc | bb | cc | bbbbb | 5 | -5 | 2 | 4 | 6 | -6 | 6 | -6 | -5 | bb | dddd | 2021-08-22T07:26:44.525777 | 2023-03-01 |",
|
||||||
"| 3.0 | | | | | | | | | | | | | | | 1 | ccc | ccc | 2023-01-01T00:00:00 | 2023-01-01 |",
|
"| 3.0 | | | | | | | | | | | | | | | 1 | ccc | ccc | 2023-01-01T00:00:00 | 2023-01-01 |",
|
||||||
"| 4.0 | 4.0 | true | ddd | ccc | bb | ccccc | 5 | -5 | 4 | 2 | 3 | -3 | 3 | -3 | 5 | dddd | bb | 2023-02-01T00:00:00 | 2023-02-01 |",
|
"| 4.0 | 4.0 | true | ddd | ccc | bb | ccccc | 5 | -5 | 4 | 2 | 3 | -3 | 3 | -3 | 5 | dddd | bb | 2023-02-01T00:00:00 | 2023-02-01 |",
|
||||||
"| 5.0 | 5.0 | false | ee | ddd | a | ddddd | 5 | -5 | 5 | 1 | 2 | -2 | 2 | -2 | 5 | eeeee | a | 2023-03-01T00:00:00 | 2023-03-01 |",
|
"| 5.0 | 5.0 | false | ee | ddd | a | ddddd | 5 | -5 | 5 | 1 | 2 | -2 | 2 | -2 | 5 | eeeee | a | 2023-03-01T00:00:00 | 2023-03-01 |",
|
||||||
"+----------+-----+-------+------------+-----+-----+-------+--------------------+------------------------+-----------+---------------+------------+----------------+---------------+-------------------+--------------+---------------+---------------+----------------------------+-------------+",
|
"+----------+-----+-------+------------+-----+-----+-------+--------------------+------------------------+-----------+---------------+------------+----------------+---------------+-------------------+--------------+---------------+---------------+----------------------------+-------------+",
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
Test {
|
Test {
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use futures::{TryStreamExt, future};
|
use futures::{future, TryStreamExt};
|
||||||
use object_store::{Entry, ObjectStore};
|
use object_store::{Entry, ObjectStore};
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
|||||||
@@ -12,9 +12,9 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use object_store::ObjectStore;
|
|
||||||
use object_store::services::Fs;
|
use object_store::services::Fs;
|
||||||
use object_store::util::DefaultLoggingInterceptor;
|
use object_store::util::DefaultLoggingInterceptor;
|
||||||
|
use object_store::ObjectStore;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
|
||||||
use crate::error::{BuildBackendSnafu, Result};
|
use crate::error::{BuildBackendSnafu, Result};
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user