mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-24 23:19:57 +00:00
Compare commits
13 Commits
avoid-quer
...
flow_fix_f
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d815bdf770 | ||
|
|
0b324563ac | ||
|
|
ffecb6882e | ||
|
|
c185242997 | ||
|
|
9fda415b0d | ||
|
|
5eaf9816b9 | ||
|
|
6684f8dce3 | ||
|
|
e8660a6f7e | ||
|
|
6659f3cc62 | ||
|
|
d218d65361 | ||
|
|
8f40ba42c1 | ||
|
|
d1ce436442 | ||
|
|
e580ba63ec |
9
.github/workflows/develop.yml
vendored
9
.github/workflows/develop.yml
vendored
@@ -212,14 +212,7 @@ jobs:
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: tar -xvf ./bins.tar.gz
|
||||
- name: Build Fuzz Test
|
||||
shell: bash
|
||||
run: |
|
||||
cd tests-fuzz &
|
||||
cargo install cargo-gc-bin &
|
||||
cargo gc &
|
||||
cd ..
|
||||
- name: Run Fuzz Test
|
||||
- name: Fuzz Test
|
||||
uses: ./.github/actions/fuzz-test
|
||||
env:
|
||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||
|
||||
158
Cargo.lock
generated
158
Cargo.lock
generated
@@ -214,7 +214,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
|
||||
|
||||
[[package]]
|
||||
name = "api"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-decimal",
|
||||
@@ -703,7 +703,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "auth"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -877,7 +877,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "benchmarks"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow",
|
||||
@@ -904,6 +904,7 @@ dependencies = [
|
||||
"rskafka",
|
||||
"serde",
|
||||
"store-api",
|
||||
"tests-integration",
|
||||
"tokio",
|
||||
"toml 0.8.12",
|
||||
"uuid",
|
||||
@@ -1219,7 +1220,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cache"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"catalog",
|
||||
"common-error",
|
||||
@@ -1227,7 +1228,6 @@ dependencies = [
|
||||
"common-meta",
|
||||
"moka",
|
||||
"snafu 0.8.2",
|
||||
"substrait 0.8.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1254,15 +1254,13 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "catalog"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow",
|
||||
"arrow-schema",
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
"bytes",
|
||||
"cache",
|
||||
"catalog",
|
||||
"chrono",
|
||||
"common-catalog",
|
||||
@@ -1536,7 +1534,7 @@ checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce"
|
||||
|
||||
[[package]]
|
||||
name = "client"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -1565,7 +1563,7 @@ dependencies = [
|
||||
"serde_json",
|
||||
"snafu 0.8.2",
|
||||
"substrait 0.17.1",
|
||||
"substrait 0.8.1",
|
||||
"substrait 0.8.0",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tonic 0.11.0",
|
||||
@@ -1595,7 +1593,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cmd"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"auth",
|
||||
@@ -1651,7 +1649,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.2",
|
||||
"store-api",
|
||||
"substrait 0.8.1",
|
||||
"substrait 0.8.0",
|
||||
"table",
|
||||
"temp-env",
|
||||
"tempfile",
|
||||
@@ -1696,7 +1694,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
|
||||
|
||||
[[package]]
|
||||
name = "common-base"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"bitvec",
|
||||
@@ -1712,7 +1710,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-catalog"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"common-error",
|
||||
@@ -1723,7 +1721,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-config"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -1746,7 +1744,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-datasource"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-schema",
|
||||
@@ -1778,7 +1776,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-decimal"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"bigdecimal",
|
||||
"common-error",
|
||||
@@ -1791,7 +1789,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-error"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"snafu 0.8.2",
|
||||
"strum 0.25.0",
|
||||
@@ -1799,7 +1797,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-frontend"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -1814,7 +1812,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-function"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -1847,7 +1845,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-greptimedb-telemetry"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-runtime",
|
||||
@@ -1864,7 +1862,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -1890,7 +1888,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc-expr"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"common-base",
|
||||
@@ -1907,7 +1905,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-macro"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-query",
|
||||
@@ -1922,7 +1920,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-mem-prof"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -1935,7 +1933,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-meta"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"api",
|
||||
@@ -1988,11 +1986,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-plugins"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -2017,7 +2015,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure-test"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-procedure",
|
||||
@@ -2025,11 +2023,10 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-query"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
"bytes",
|
||||
"common-base",
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2049,7 +2046,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-recordbatch"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-error",
|
||||
@@ -2068,7 +2065,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-runtime"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-error",
|
||||
@@ -2088,7 +2085,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-telemetry"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"atty",
|
||||
"backtrace",
|
||||
@@ -2115,7 +2112,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-test-util"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"client",
|
||||
"common-query",
|
||||
@@ -2127,7 +2124,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-time"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"chrono",
|
||||
@@ -2143,7 +2140,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-version"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"build-data",
|
||||
"schemars",
|
||||
@@ -2152,7 +2149,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-wal"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -3152,7 +3149,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datanode"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -3201,6 +3198,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.2",
|
||||
"store-api",
|
||||
"substrait 0.8.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"toml 0.8.12",
|
||||
@@ -3209,7 +3207,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datatypes"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -3700,7 +3698,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "file-engine"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -3802,7 +3800,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "flow"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -3843,7 +3841,7 @@ dependencies = [
|
||||
"snafu 0.8.2",
|
||||
"store-api",
|
||||
"strum 0.25.0",
|
||||
"substrait 0.8.1",
|
||||
"substrait 0.8.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tonic 0.11.0",
|
||||
@@ -3881,7 +3879,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
|
||||
|
||||
[[package]]
|
||||
name = "frontend"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -4197,7 +4195,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=ae26136accd82fbdf8be540cd502f2e94951077e#ae26136accd82fbdf8be540cd502f2e94951077e"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=902f75fdd170c572e90b1f640161d90995f20218#902f75fdd170c572e90b1f640161d90995f20218"
|
||||
dependencies = [
|
||||
"prost 0.12.4",
|
||||
"serde",
|
||||
@@ -4687,7 +4685,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "index"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"asynchronous-codec",
|
||||
@@ -5254,7 +5252,7 @@ checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
|
||||
|
||||
[[package]]
|
||||
name = "log-store"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -5551,7 +5549,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-client"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -5577,7 +5575,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-srv"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -5653,7 +5651,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "metric-engine"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -5735,7 +5733,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mito2"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -6363,7 +6361,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "object-store"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
@@ -6604,7 +6602,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "operator"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -6650,7 +6648,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlparser 0.44.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e4e496b8d62416ad50ce70a1b460c7313610cf5d)",
|
||||
"store-api",
|
||||
"substrait 0.8.1",
|
||||
"substrait 0.8.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tonic 0.11.0",
|
||||
@@ -6894,7 +6892,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "partition"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -7240,7 +7238,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "plugins"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"auth",
|
||||
"common-base",
|
||||
@@ -7518,20 +7516,26 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "promql"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"async-recursion",
|
||||
"async-trait",
|
||||
"bytemuck",
|
||||
"catalog",
|
||||
"common-catalog",
|
||||
"common-error",
|
||||
"common-macro",
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
"common-telemetry",
|
||||
"datafusion 37.0.0",
|
||||
"datafusion-expr 37.0.0",
|
||||
"datafusion-functions 37.0.0",
|
||||
"datatypes",
|
||||
"futures",
|
||||
"greptime-proto",
|
||||
"itertools 0.10.5",
|
||||
"lazy_static",
|
||||
"prometheus",
|
||||
"promql-parser",
|
||||
@@ -7539,6 +7543,7 @@ dependencies = [
|
||||
"query",
|
||||
"session",
|
||||
"snafu 0.8.2",
|
||||
"table",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
@@ -7724,7 +7729,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "puffin"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bitflags 2.5.0",
|
||||
@@ -7835,7 +7840,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "query"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -7846,7 +7851,6 @@ dependencies = [
|
||||
"async-recursion",
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
"bytes",
|
||||
"catalog",
|
||||
"chrono",
|
||||
"common-base",
|
||||
@@ -7859,13 +7863,11 @@ dependencies = [
|
||||
"common-plugins",
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
"common-runtime",
|
||||
"common-telemetry",
|
||||
"common-time",
|
||||
"datafusion 37.0.0",
|
||||
"datafusion-common 37.0.0",
|
||||
"datafusion-expr 37.0.0",
|
||||
"datafusion-functions 37.0.0",
|
||||
"datafusion-optimizer 37.0.0",
|
||||
"datafusion-physical-expr 37.0.0",
|
||||
"datafusion-sql 37.0.0",
|
||||
@@ -7875,7 +7877,6 @@ dependencies = [
|
||||
"futures-util",
|
||||
"greptime-proto",
|
||||
"humantime",
|
||||
"itertools 0.10.5",
|
||||
"lazy_static",
|
||||
"meter-core",
|
||||
"meter-macros",
|
||||
@@ -7887,7 +7888,6 @@ dependencies = [
|
||||
"prometheus",
|
||||
"promql",
|
||||
"promql-parser",
|
||||
"prost 0.12.4",
|
||||
"rand",
|
||||
"regex",
|
||||
"session",
|
||||
@@ -7897,7 +7897,7 @@ dependencies = [
|
||||
"stats-cli",
|
||||
"store-api",
|
||||
"streaming-stats",
|
||||
"substrait 0.8.1",
|
||||
"substrait 0.8.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -9204,7 +9204,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "script"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -9474,7 +9474,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "servers"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"aide",
|
||||
"api",
|
||||
@@ -9560,6 +9560,7 @@ dependencies = [
|
||||
"strum 0.25.0",
|
||||
"table",
|
||||
"tempfile",
|
||||
"tests-integration",
|
||||
"tikv-jemalloc-ctl",
|
||||
"tokio",
|
||||
"tokio-postgres",
|
||||
@@ -9577,7 +9578,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "session"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -9855,7 +9856,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sql"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"chrono",
|
||||
@@ -9911,7 +9912,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlness-runner"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.4",
|
||||
@@ -9924,6 +9925,7 @@ dependencies = [
|
||||
"serde_json",
|
||||
"sqlness",
|
||||
"tempfile",
|
||||
"tests-integration",
|
||||
"tinytemplate",
|
||||
"tokio",
|
||||
]
|
||||
@@ -10128,7 +10130,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "store-api"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -10296,11 +10298,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "substrait"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
"catalog",
|
||||
"common-error",
|
||||
"common-function",
|
||||
"common-macro",
|
||||
"common-telemetry",
|
||||
"datafusion 37.0.0",
|
||||
@@ -10310,6 +10314,7 @@ dependencies = [
|
||||
"datatypes",
|
||||
"promql",
|
||||
"prost 0.12.4",
|
||||
"session",
|
||||
"snafu 0.8.2",
|
||||
"substrait 0.17.1",
|
||||
"tokio",
|
||||
@@ -10484,9 +10489,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "table"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
"chrono",
|
||||
"common-base",
|
||||
@@ -10595,7 +10599,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
|
||||
|
||||
[[package]]
|
||||
name = "tests-fuzz"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"arbitrary",
|
||||
"async-trait",
|
||||
@@ -10628,7 +10632,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tests-integration"
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -10687,7 +10691,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlx",
|
||||
"store-api",
|
||||
"substrait 0.8.1",
|
||||
"substrait 0.8.0",
|
||||
"table",
|
||||
"tempfile",
|
||||
"time",
|
||||
|
||||
@@ -64,7 +64,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.8.1"
|
||||
version = "0.8.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -120,7 +120,7 @@ etcd-client = { git = "https://github.com/MichaelScofield/etcd-client.git", rev
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ae26136accd82fbdf8be540cd502f2e94951077e" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "902f75fdd170c572e90b1f640161d90995f20218" }
|
||||
humantime = "2.1"
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
@@ -172,7 +172,6 @@ tokio-stream = { version = "0.1" }
|
||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
toml = "0.8.8"
|
||||
tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] }
|
||||
tower = { version = "0.4" }
|
||||
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
|
||||
zstd = "0.13"
|
||||
|
||||
@@ -233,6 +232,8 @@ sql = { path = "src/sql" }
|
||||
store-api = { path = "src/store-api" }
|
||||
substrait = { path = "src/common/substrait" }
|
||||
table = { path = "src/table" }
|
||||
# TODO some code depends on this
|
||||
tests-integration = { path = "tests-integration" }
|
||||
|
||||
[workspace.dependencies.meter-macros]
|
||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
|
||||
@@ -12,7 +12,7 @@ api.workspace = true
|
||||
arrow.workspace = true
|
||||
chrono.workspace = true
|
||||
clap.workspace = true
|
||||
client = { workspace = true, features = ["testing"] }
|
||||
client.workspace = true
|
||||
common-base.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-wal.workspace = true
|
||||
@@ -33,6 +33,8 @@ rand.workspace = true
|
||||
rskafka.workspace = true
|
||||
serde.workspace = true
|
||||
store-api.workspace = true
|
||||
# TODO depend `Database` client
|
||||
tests-integration.workspace = true
|
||||
tokio.workspace = true
|
||||
toml.workspace = true
|
||||
uuid.workspace = true
|
||||
|
||||
@@ -28,7 +28,6 @@ use rand::distributions::{Alphanumeric, DistString, Uniform};
|
||||
use rand::rngs::SmallRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use store_api::logstore::provider::Provider;
|
||||
use store_api::logstore::LogStore;
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
@@ -211,7 +210,7 @@ impl From<Args> for Config {
|
||||
pub struct Region {
|
||||
id: RegionId,
|
||||
schema: Vec<ColumnSchema>,
|
||||
provider: Provider,
|
||||
wal_options: WalOptions,
|
||||
next_sequence: AtomicU64,
|
||||
next_entry_id: AtomicU64,
|
||||
next_timestamp: AtomicI64,
|
||||
@@ -228,14 +227,10 @@ impl Region {
|
||||
num_rows: u32,
|
||||
rng_seed: u64,
|
||||
) -> Self {
|
||||
let provider = match wal_options {
|
||||
WalOptions::RaftEngine => Provider::raft_engine_provider(id.as_u64()),
|
||||
WalOptions::Kafka(opts) => Provider::kafka_provider(opts.topic),
|
||||
};
|
||||
Self {
|
||||
id,
|
||||
schema,
|
||||
provider,
|
||||
wal_options,
|
||||
next_sequence: AtomicU64::new(1),
|
||||
next_entry_id: AtomicU64::new(1),
|
||||
next_timestamp: AtomicI64::new(1655276557000),
|
||||
@@ -263,14 +258,14 @@ impl Region {
|
||||
self.id,
|
||||
self.next_entry_id.fetch_add(1, Ordering::Relaxed),
|
||||
&entry,
|
||||
&self.provider,
|
||||
&self.wal_options,
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
/// Replays the region.
|
||||
pub async fn replay<S: LogStore>(&self, wal: &Arc<Wal<S>>) {
|
||||
let mut wal_stream = wal.scan(self.id, 0, &self.provider).unwrap();
|
||||
let mut wal_stream = wal.scan(self.id, 0, &self.wal_options).unwrap();
|
||||
while let Some(res) = wal_stream.next().await {
|
||||
let (_, entry) = res.unwrap();
|
||||
metrics::METRIC_WAL_READ_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);
|
||||
|
||||
1
src/cache/Cargo.toml
vendored
1
src/cache/Cargo.toml
vendored
@@ -11,4 +11,3 @@ common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
moka.workspace = true
|
||||
snafu.workspace = true
|
||||
substrait.workspace = true
|
||||
|
||||
15
src/cache/src/lib.rs
vendored
15
src/cache/src/lib.rs
vendored
@@ -20,8 +20,7 @@ use std::time::Duration;
|
||||
use catalog::kvbackend::new_table_cache;
|
||||
use common_meta::cache::{
|
||||
new_table_flownode_set_cache, new_table_info_cache, new_table_name_cache,
|
||||
new_table_route_cache, new_view_info_cache, CacheRegistry, CacheRegistryBuilder,
|
||||
LayeredCacheRegistryBuilder,
|
||||
new_table_route_cache, CacheRegistry, CacheRegistryBuilder, LayeredCacheRegistryBuilder,
|
||||
};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use moka::future::CacheBuilder;
|
||||
@@ -34,7 +33,6 @@ const DEFAULT_CACHE_TTL: Duration = Duration::from_secs(10 * 60);
|
||||
const DEFAULT_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
|
||||
|
||||
pub const TABLE_INFO_CACHE_NAME: &str = "table_info_cache";
|
||||
pub const VIEW_INFO_CACHE_NAME: &str = "view_info_cache";
|
||||
pub const TABLE_NAME_CACHE_NAME: &str = "table_name_cache";
|
||||
pub const TABLE_CACHE_NAME: &str = "table_cache";
|
||||
pub const TABLE_FLOWNODE_SET_CACHE_NAME: &str = "table_flownode_set_cache";
|
||||
@@ -84,22 +82,11 @@ pub fn build_fundamental_cache_registry(kv_backend: KvBackendRef) -> CacheRegist
|
||||
cache,
|
||||
kv_backend.clone(),
|
||||
));
|
||||
// Builds the view info cache
|
||||
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(DEFAULT_CACHE_TTL)
|
||||
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||
.build();
|
||||
let view_info_cache = Arc::new(new_view_info_cache(
|
||||
VIEW_INFO_CACHE_NAME.to_string(),
|
||||
cache,
|
||||
kv_backend.clone(),
|
||||
));
|
||||
|
||||
CacheRegistryBuilder::default()
|
||||
.add_cache(table_info_cache)
|
||||
.add_cache(table_name_cache)
|
||||
.add_cache(table_route_cache)
|
||||
.add_cache(view_info_cache)
|
||||
.add_cache(table_flownode_set_cache)
|
||||
.build()
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ arrow.workspace = true
|
||||
arrow-schema.workspace = true
|
||||
async-stream.workspace = true
|
||||
async-trait = "0.1"
|
||||
bytes.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-config.workspace = true
|
||||
common-error.workspace = true
|
||||
@@ -49,11 +48,8 @@ table.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
cache.workspace = true
|
||||
catalog = { workspace = true, features = ["testing"] }
|
||||
chrono.workspace = true
|
||||
common-meta = { workspace = true, features = ["testing"] }
|
||||
common-query = { workspace = true, features = ["testing"] }
|
||||
common-test-util.workspace = true
|
||||
log-store.workspace = true
|
||||
object-store.workspace = true
|
||||
|
||||
@@ -19,7 +19,10 @@ use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use datafusion::error::DataFusionError;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::{Location, Snafu};
|
||||
use table::metadata::TableId;
|
||||
use tokio::task::JoinError;
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
@@ -62,6 +65,19 @@ pub enum Error {
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
#[snafu(display("Failed to open system catalog table"))]
|
||||
OpenSystemCatalog {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create system catalog table"))]
|
||||
CreateSystemCatalog {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create table, table info: {}", table_info))]
|
||||
CreateTable {
|
||||
@@ -78,6 +94,52 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"System catalog table type mismatch, expected: binary, found: {:?}",
|
||||
data_type,
|
||||
))]
|
||||
SystemCatalogTypeMismatch {
|
||||
data_type: ConcreteDataType,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid system catalog entry type: {:?}", entry_type))]
|
||||
InvalidEntryType {
|
||||
entry_type: Option<u8>,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid system catalog key: {:?}", key))]
|
||||
InvalidKey {
|
||||
key: Option<String>,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Catalog value is not present"))]
|
||||
EmptyValue {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to deserialize value"))]
|
||||
ValueDeserialize {
|
||||
#[snafu(source)]
|
||||
error: serde_json::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table engine not found: {}", engine_name))]
|
||||
TableEngineNotFound {
|
||||
engine_name: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Cannot find catalog by name: {}", catalog_name))]
|
||||
CatalogNotFound {
|
||||
catalog_name: String,
|
||||
@@ -107,9 +169,44 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("View info not found: {}", name))]
|
||||
ViewInfoNotFound {
|
||||
name: String,
|
||||
#[snafu(display("Schema {} already exists", schema))]
|
||||
SchemaExists {
|
||||
schema: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Operation {} not implemented yet", operation))]
|
||||
Unimplemented {
|
||||
operation: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Operation {} not supported", op))]
|
||||
NotSupported {
|
||||
op: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to open table {table_id}"))]
|
||||
OpenTable {
|
||||
table_id: TableId,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to open table in parallel"))]
|
||||
ParallelOpenTable {
|
||||
#[snafu(source)]
|
||||
error: JoinError,
|
||||
},
|
||||
|
||||
#[snafu(display("Table not found while opening table, table info: {}", table_info))]
|
||||
TableNotFound {
|
||||
table_info: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
@@ -120,6 +217,13 @@ pub enum Error {
|
||||
#[snafu(display("Failed to find region routes"))]
|
||||
FindRegionRoutes { source: partition::error::Error },
|
||||
|
||||
#[snafu(display("Failed to read system catalog table records"))]
|
||||
ReadSystemCatalog {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create recordbatch"))]
|
||||
CreateRecordBatch {
|
||||
#[snafu(implicit)]
|
||||
@@ -127,6 +231,20 @@ pub enum Error {
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to insert table creation record to system catalog"))]
|
||||
InsertCatalogRecord {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to scan system catalog table"))]
|
||||
SystemCatalogTableScan {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Internal error"))]
|
||||
Internal {
|
||||
#[snafu(implicit)]
|
||||
@@ -140,14 +258,20 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to decode logical plan for view: {}", name))]
|
||||
DecodePlan {
|
||||
name: String,
|
||||
#[snafu(display("Failed to execute system catalog table scan"))]
|
||||
SystemCatalogTableScanExec {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Cannot parse catalog value"))]
|
||||
InvalidCatalogValue {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_catalog::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to perform metasrv operation"))]
|
||||
Metasrv {
|
||||
#[snafu(implicit)]
|
||||
@@ -173,6 +297,20 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table schema mismatch"))]
|
||||
TableSchemaMismatch {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("A generic error has occurred, msg: {}", msg))]
|
||||
Generic {
|
||||
msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table metadata manager error"))]
|
||||
TableMetadataManager {
|
||||
source: common_meta::error::Error,
|
||||
@@ -186,26 +324,6 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to get view info from cache"))]
|
||||
GetViewCache {
|
||||
source: common_meta::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Cache not found: {name}"))]
|
||||
CacheNotFound {
|
||||
name: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to cast the catalog manager"))]
|
||||
CastManager {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -213,43 +331,61 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::SchemaNotFound { .. }
|
||||
Error::InvalidKey { .. }
|
||||
| Error::SchemaNotFound { .. }
|
||||
| Error::CatalogNotFound { .. }
|
||||
| Error::FindPartitions { .. }
|
||||
| Error::FindRegionRoutes { .. }
|
||||
| Error::CacheNotFound { .. }
|
||||
| Error::CastManager { .. } => StatusCode::Unexpected,
|
||||
| Error::InvalidEntryType { .. }
|
||||
| Error::ParallelOpenTable { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::ViewInfoNotFound { .. } => StatusCode::TableNotFound,
|
||||
Error::TableNotFound { .. } => StatusCode::TableNotFound,
|
||||
|
||||
Error::SystemCatalog { .. } => StatusCode::StorageUnavailable,
|
||||
Error::SystemCatalog { .. }
|
||||
| Error::EmptyValue { .. }
|
||||
| Error::ValueDeserialize { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
|
||||
Error::Generic { .. }
|
||||
| Error::SystemCatalogTypeMismatch { .. }
|
||||
| Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
|
||||
|
||||
Error::ReadSystemCatalog { source, .. } | Error::CreateRecordBatch { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::InvalidCatalogValue { source, .. } => source.status_code(),
|
||||
|
||||
Error::CreateRecordBatch { source, .. } => source.status_code(),
|
||||
Error::TableExists { .. } => StatusCode::TableAlreadyExists,
|
||||
Error::TableNotExist { .. } => StatusCode::TableNotFound,
|
||||
Error::SchemaExists { .. } | Error::TableEngineNotFound { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
|
||||
Error::ListCatalogs { source, .. }
|
||||
| Error::ListNodes { source, .. }
|
||||
| Error::ListSchemas { source, .. }
|
||||
| Error::ListTables { source, .. } => source.status_code(),
|
||||
|
||||
Error::CreateTable { source, .. } => source.status_code(),
|
||||
Error::OpenSystemCatalog { source, .. }
|
||||
| Error::CreateSystemCatalog { source, .. }
|
||||
| Error::InsertCatalogRecord { source, .. }
|
||||
| Error::OpenTable { source, .. }
|
||||
| Error::CreateTable { source, .. }
|
||||
| Error::TableSchemaMismatch { source, .. } => source.status_code(),
|
||||
|
||||
Error::Metasrv { source, .. } => source.status_code(),
|
||||
Error::DecodePlan { source, .. } => source.status_code(),
|
||||
Error::SystemCatalogTableScan { source, .. } => source.status_code(),
|
||||
Error::SystemCatalogTableScanExec { source, .. } => source.status_code(),
|
||||
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
|
||||
|
||||
Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::Unimplemented { .. } | Error::NotSupported { .. } => StatusCode::Unsupported,
|
||||
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
||||
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
|
||||
Error::TableMetadataManager { source, .. } => source.status_code(),
|
||||
Error::GetViewCache { source, .. } | Error::GetTableCache { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::GetTableCache { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -281,6 +417,11 @@ mod tests {
|
||||
.status_code()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
StatusCode::Unexpected,
|
||||
InvalidKeySnafu { key: None }.build().status_code()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
StatusCode::StorageUnavailable,
|
||||
Error::SystemCatalog {
|
||||
@@ -289,6 +430,19 @@ mod tests {
|
||||
}
|
||||
.status_code()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
StatusCode::Internal,
|
||||
Error::SystemCatalogTypeMismatch {
|
||||
data_type: ConcreteDataType::binary_datatype(),
|
||||
location: Location::generate(),
|
||||
}
|
||||
.status_code()
|
||||
);
|
||||
assert_eq!(
|
||||
StatusCode::StorageUnavailable,
|
||||
EmptyValueSnafu {}.build().status_code()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -22,13 +22,14 @@ use common_catalog::consts::{
|
||||
};
|
||||
use common_config::Mode;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache::{LayeredCacheRegistryRef, ViewInfoCacheRef};
|
||||
use common_meta::cache::TableRouteCacheRef;
|
||||
use common_meta::key::catalog_name::CatalogNameKey;
|
||||
use common_meta::key::schema_name::SchemaNameKey;
|
||||
use common_meta::key::table_info::TableInfoValue;
|
||||
use common_meta::key::table_name::TableNameKey;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::table_name::TableName;
|
||||
use futures_util::stream::BoxStream;
|
||||
use futures_util::{StreamExt, TryStreamExt};
|
||||
use meta_client::client::MetaClient;
|
||||
@@ -37,12 +38,11 @@ use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
|
||||
use snafu::prelude::*;
|
||||
use table::dist_table::DistTable;
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
use table::table_name::TableName;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{
|
||||
CacheNotFoundSnafu, GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu,
|
||||
ListSchemasSnafu, ListTablesSnafu, Result, TableMetadataManagerSnafu,
|
||||
GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu, ListSchemasSnafu,
|
||||
ListTablesSnafu, Result, TableMetadataManagerSnafu,
|
||||
};
|
||||
use crate::information_schema::InformationSchemaProvider;
|
||||
use crate::kvbackend::TableCacheRef;
|
||||
@@ -61,26 +61,25 @@ pub struct KvBackendCatalogManager {
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
/// A sub-CatalogManager that handles system tables
|
||||
system_catalog: SystemCatalog,
|
||||
cache_registry: LayeredCacheRegistryRef,
|
||||
table_cache: TableCacheRef,
|
||||
}
|
||||
|
||||
const CATALOG_CACHE_MAX_CAPACITY: u64 = 128;
|
||||
|
||||
impl KvBackendCatalogManager {
|
||||
pub fn new(
|
||||
pub async fn new(
|
||||
mode: Mode,
|
||||
meta_client: Option<Arc<MetaClient>>,
|
||||
backend: KvBackendRef,
|
||||
cache_registry: LayeredCacheRegistryRef,
|
||||
table_cache: TableCacheRef,
|
||||
table_route_cache: TableRouteCacheRef,
|
||||
) -> Arc<Self> {
|
||||
Arc::new_cyclic(|me| Self {
|
||||
mode,
|
||||
meta_client,
|
||||
partition_manager: Arc::new(PartitionRuleManager::new(
|
||||
backend.clone(),
|
||||
cache_registry
|
||||
.get()
|
||||
.expect("Failed to get table_route_cache"),
|
||||
table_route_cache,
|
||||
)),
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
||||
system_catalog: SystemCatalog {
|
||||
@@ -91,7 +90,7 @@ impl KvBackendCatalogManager {
|
||||
me.clone(),
|
||||
)),
|
||||
},
|
||||
cache_registry,
|
||||
table_cache,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -100,12 +99,6 @@ impl KvBackendCatalogManager {
|
||||
&self.mode
|
||||
}
|
||||
|
||||
pub fn view_info_cache(&self) -> Result<ViewInfoCacheRef> {
|
||||
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "view_info_cache",
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the `[MetaClient]`.
|
||||
pub fn meta_client(&self) -> Option<Arc<MetaClient>> {
|
||||
self.meta_client.clone()
|
||||
@@ -222,11 +215,7 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
return Ok(Some(table));
|
||||
}
|
||||
|
||||
let table_cache: TableCacheRef = self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "table_cache",
|
||||
})?;
|
||||
|
||||
table_cache
|
||||
self.table_cache
|
||||
.get_by_ref(&TableName {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
schema_name: schema_name.to_string(),
|
||||
|
||||
@@ -17,11 +17,11 @@ use std::sync::Arc;
|
||||
use common_meta::cache::{CacheContainer, Initializer, TableInfoCacheRef, TableNameCacheRef};
|
||||
use common_meta::error::{Result as MetaResult, ValueNotExistSnafu};
|
||||
use common_meta::instruction::CacheIdent;
|
||||
use common_meta::table_name::TableName;
|
||||
use futures::future::BoxFuture;
|
||||
use moka::future::Cache;
|
||||
use snafu::OptionExt;
|
||||
use table::dist_table::DistTable;
|
||||
use table::table_name::TableName;
|
||||
use table::TableRef;
|
||||
|
||||
pub type TableCacheRef = Arc<TableCache>;
|
||||
|
||||
@@ -15,25 +15,15 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use bytes::Bytes;
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_query::logical_plan::SubstraitPlanDecoderRef;
|
||||
use datafusion::common::{ResolvedTableReference, TableReference};
|
||||
use datafusion::datasource::view::ViewTable;
|
||||
use datafusion::datasource::{provider_as_source, TableProvider};
|
||||
use datafusion::datasource::provider_as_source;
|
||||
use datafusion::logical_expr::TableSource;
|
||||
use session::context::QueryContext;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::metadata::TableType;
|
||||
use snafu::{ensure, OptionExt};
|
||||
use table::table::adapter::DfTableProviderAdapter;
|
||||
mod dummy_catalog;
|
||||
use dummy_catalog::DummyCatalogList;
|
||||
|
||||
use crate::error::{
|
||||
CastManagerSnafu, DatafusionSnafu, DecodePlanSnafu, GetViewCacheSnafu, QueryAccessDeniedSnafu,
|
||||
Result, TableNotExistSnafu, ViewInfoNotFoundSnafu,
|
||||
};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::error::{QueryAccessDeniedSnafu, Result, TableNotExistSnafu};
|
||||
use crate::CatalogManagerRef;
|
||||
|
||||
pub struct DfTableSourceProvider {
|
||||
@@ -42,7 +32,6 @@ pub struct DfTableSourceProvider {
|
||||
disallow_cross_catalog_query: bool,
|
||||
default_catalog: String,
|
||||
default_schema: String,
|
||||
plan_decoder: SubstraitPlanDecoderRef,
|
||||
}
|
||||
|
||||
impl DfTableSourceProvider {
|
||||
@@ -50,7 +39,6 @@ impl DfTableSourceProvider {
|
||||
catalog_manager: CatalogManagerRef,
|
||||
disallow_cross_catalog_query: bool,
|
||||
query_ctx: &QueryContext,
|
||||
plan_decoder: SubstraitPlanDecoderRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
catalog_manager,
|
||||
@@ -58,7 +46,6 @@ impl DfTableSourceProvider {
|
||||
resolved_tables: HashMap::new(),
|
||||
default_catalog: query_ctx.current_catalog().to_owned(),
|
||||
default_schema: query_ctx.current_schema().to_owned(),
|
||||
plan_decoder,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -107,39 +94,8 @@ impl DfTableSourceProvider {
|
||||
table: format_full_table_name(catalog_name, schema_name, table_name),
|
||||
})?;
|
||||
|
||||
let provider: Arc<dyn TableProvider> = if table.table_info().table_type == TableType::View {
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.as_any()
|
||||
.downcast_ref::<KvBackendCatalogManager>()
|
||||
.context(CastManagerSnafu)?;
|
||||
|
||||
let view_info = catalog_manager
|
||||
.view_info_cache()?
|
||||
.get(table.table_info().ident.table_id)
|
||||
.await
|
||||
.context(GetViewCacheSnafu)?
|
||||
.context(ViewInfoNotFoundSnafu {
|
||||
name: &table.table_info().name,
|
||||
})?;
|
||||
|
||||
// Build the catalog list provider for deserialization.
|
||||
let catalog_list = Arc::new(DummyCatalogList::new(self.catalog_manager.clone()));
|
||||
let logical_plan = self
|
||||
.plan_decoder
|
||||
.decode(Bytes::from(view_info.view_info.clone()), catalog_list, true)
|
||||
.await
|
||||
.context(DecodePlanSnafu {
|
||||
name: &table.table_info().name,
|
||||
})?;
|
||||
|
||||
Arc::new(ViewTable::try_new(logical_plan, None).context(DatafusionSnafu)?)
|
||||
} else {
|
||||
Arc::new(DfTableProviderAdapter::new(table))
|
||||
};
|
||||
|
||||
let source = provider_as_source(provider);
|
||||
|
||||
let provider = DfTableProviderAdapter::new(table);
|
||||
let source = provider_as_source(Arc::new(provider));
|
||||
let _ = self.resolved_tables.insert(resolved_name, source.clone());
|
||||
Ok(source)
|
||||
}
|
||||
@@ -147,7 +103,6 @@ impl DfTableSourceProvider {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_query::test_util::DummyDecoder;
|
||||
use session::context::QueryContext;
|
||||
|
||||
use super::*;
|
||||
@@ -157,12 +112,8 @@ mod tests {
|
||||
fn test_validate_table_ref() {
|
||||
let query_ctx = &QueryContext::with("greptime", "public");
|
||||
|
||||
let table_provider = DfTableSourceProvider::new(
|
||||
MemoryCatalogManager::with_default_setup(),
|
||||
true,
|
||||
query_ctx,
|
||||
DummyDecoder::arc(),
|
||||
);
|
||||
let table_provider =
|
||||
DfTableSourceProvider::new(MemoryCatalogManager::with_default_setup(), true, query_ctx);
|
||||
|
||||
let table_ref = TableReference::bare("table_name");
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
@@ -197,99 +148,4 @@ mod tests {
|
||||
let table_ref = TableReference::full("greptime", "greptime_private", "columns");
|
||||
assert!(table_provider.resolve_table_ref(table_ref).is_ok());
|
||||
}
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
use arrow::datatypes::{DataType, Field, Schema, SchemaRef};
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use common_config::Mode;
|
||||
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use common_query::error::Result as QueryResult;
|
||||
use common_query::logical_plan::SubstraitPlanDecoder;
|
||||
use datafusion::catalog::CatalogProviderList;
|
||||
use datafusion::logical_expr::builder::LogicalTableSource;
|
||||
use datafusion::logical_expr::{col, lit, LogicalPlan, LogicalPlanBuilder};
|
||||
|
||||
struct MockDecoder;
|
||||
impl MockDecoder {
|
||||
pub fn arc() -> Arc<Self> {
|
||||
Arc::new(MockDecoder)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl SubstraitPlanDecoder for MockDecoder {
|
||||
async fn decode(
|
||||
&self,
|
||||
_message: bytes::Bytes,
|
||||
_catalog_list: Arc<dyn CatalogProviderList>,
|
||||
_optimize: bool,
|
||||
) -> QueryResult<LogicalPlan> {
|
||||
Ok(mock_plan())
|
||||
}
|
||||
}
|
||||
|
||||
fn mock_plan() -> LogicalPlan {
|
||||
let schema = Schema::new(vec![
|
||||
Field::new("id", DataType::Int32, true),
|
||||
Field::new("name", DataType::Utf8, true),
|
||||
]);
|
||||
let table_source = LogicalTableSource::new(SchemaRef::new(schema));
|
||||
|
||||
let projection = None;
|
||||
|
||||
let builder =
|
||||
LogicalPlanBuilder::scan("person", Arc::new(table_source), projection).unwrap();
|
||||
|
||||
builder
|
||||
.filter(col("id").gt(lit(500)))
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_resolve_view() {
|
||||
let query_ctx = &QueryContext::with("greptime", "public");
|
||||
let backend = Arc::new(MemoryKvBackend::default());
|
||||
let layered_cache_builder = LayeredCacheRegistryBuilder::default()
|
||||
.add_cache_registry(CacheRegistryBuilder::default().build());
|
||||
let fundamental_cache_registry = build_fundamental_cache_registry(backend.clone());
|
||||
let layered_cache_registry = Arc::new(
|
||||
with_default_composite_cache_registry(
|
||||
layered_cache_builder.add_cache_registry(fundamental_cache_registry),
|
||||
)
|
||||
.unwrap()
|
||||
.build(),
|
||||
);
|
||||
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
Mode::Standalone,
|
||||
None,
|
||||
backend.clone(),
|
||||
layered_cache_registry,
|
||||
);
|
||||
let table_metadata_manager = TableMetadataManager::new(backend);
|
||||
let mut view_info = common_meta::key::test_utils::new_test_table_info(1024, vec![]);
|
||||
view_info.table_type = TableType::View;
|
||||
let logical_plan = vec![1, 2, 3];
|
||||
// Create view metadata
|
||||
table_metadata_manager
|
||||
.create_view_metadata(view_info.clone().into(), logical_plan, HashSet::new())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut table_provider =
|
||||
DfTableSourceProvider::new(catalog_manager, true, query_ctx, MockDecoder::arc());
|
||||
|
||||
// View not found
|
||||
let table_ref = TableReference::bare("not_exists_view");
|
||||
assert!(table_provider.resolve_table(table_ref).await.is_err());
|
||||
|
||||
let table_ref = TableReference::bare(view_info.name);
|
||||
let source = table_provider.resolve_table(table_ref).await.unwrap();
|
||||
assert_eq!(*source.get_logical_plan().unwrap(), mock_plan());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,129 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Dummy catalog for region server.
|
||||
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_catalog::format_full_table_name;
|
||||
use datafusion::catalog::schema::SchemaProvider;
|
||||
use datafusion::catalog::{CatalogProvider, CatalogProviderList};
|
||||
use datafusion::datasource::TableProvider;
|
||||
use snafu::OptionExt;
|
||||
use table::table::adapter::DfTableProviderAdapter;
|
||||
|
||||
use crate::error::TableNotExistSnafu;
|
||||
use crate::CatalogManagerRef;
|
||||
|
||||
/// Delegate the resolving requests to the `[CatalogManager]` unconditionally.
|
||||
#[derive(Clone)]
|
||||
pub struct DummyCatalogList {
|
||||
catalog_manager: CatalogManagerRef,
|
||||
}
|
||||
|
||||
impl DummyCatalogList {
|
||||
/// Creates a new catalog list with the given catalog manager.
|
||||
pub fn new(catalog_manager: CatalogManagerRef) -> Self {
|
||||
Self { catalog_manager }
|
||||
}
|
||||
}
|
||||
|
||||
impl CatalogProviderList for DummyCatalogList {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn register_catalog(
|
||||
&self,
|
||||
_name: String,
|
||||
_catalog: Arc<dyn CatalogProvider>,
|
||||
) -> Option<Arc<dyn CatalogProvider>> {
|
||||
None
|
||||
}
|
||||
|
||||
fn catalog_names(&self) -> Vec<String> {
|
||||
vec![]
|
||||
}
|
||||
|
||||
fn catalog(&self, catalog_name: &str) -> Option<Arc<dyn CatalogProvider>> {
|
||||
Some(Arc::new(DummyCatalogProvider {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
catalog_manager: self.catalog_manager.clone(),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
/// A dummy catalog provider for [DummyCatalogList].
|
||||
#[derive(Clone)]
|
||||
struct DummyCatalogProvider {
|
||||
catalog_name: String,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
}
|
||||
|
||||
impl CatalogProvider for DummyCatalogProvider {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn schema_names(&self) -> Vec<String> {
|
||||
vec![]
|
||||
}
|
||||
|
||||
fn schema(&self, schema_name: &str) -> Option<Arc<dyn SchemaProvider>> {
|
||||
Some(Arc::new(DummySchemaProvider {
|
||||
catalog_name: self.catalog_name.clone(),
|
||||
schema_name: schema_name.to_string(),
|
||||
catalog_manager: self.catalog_manager.clone(),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
/// A dummy schema provider for [DummyCatalogList].
|
||||
#[derive(Clone)]
|
||||
struct DummySchemaProvider {
|
||||
catalog_name: String,
|
||||
schema_name: String,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SchemaProvider for DummySchemaProvider {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn table_names(&self) -> Vec<String> {
|
||||
vec![]
|
||||
}
|
||||
|
||||
async fn table(&self, name: &str) -> datafusion::error::Result<Option<Arc<dyn TableProvider>>> {
|
||||
let table = self
|
||||
.catalog_manager
|
||||
.table(&self.catalog_name, &self.schema_name, name)
|
||||
.await?
|
||||
.with_context(|| TableNotExistSnafu {
|
||||
table: format_full_table_name(&self.catalog_name, &self.schema_name, name),
|
||||
})?;
|
||||
|
||||
let table_provider: Arc<dyn TableProvider> = Arc::new(DfTableProviderAdapter::new(table));
|
||||
|
||||
Ok(Some(table_provider))
|
||||
}
|
||||
|
||||
fn table_exist(&self, _name: &str) -> bool {
|
||||
true
|
||||
}
|
||||
}
|
||||
@@ -173,14 +173,14 @@ impl Client {
|
||||
Ok(FlightClient { addr, client })
|
||||
}
|
||||
|
||||
pub(crate) fn raw_region_client(&self) -> Result<(String, PbRegionClient<Channel>)> {
|
||||
let (addr, channel) = self.find_channel()?;
|
||||
pub(crate) fn raw_region_client(&self) -> Result<PbRegionClient<Channel>> {
|
||||
let (_, channel) = self.find_channel()?;
|
||||
let client = PbRegionClient::new(channel)
|
||||
.max_decoding_message_size(self.max_grpc_recv_message_size())
|
||||
.max_encoding_message_size(self.max_grpc_send_message_size())
|
||||
.accept_compressed(CompressionEncoding::Zstd)
|
||||
.send_compressed(CompressionEncoding::Zstd);
|
||||
Ok((addr, client))
|
||||
Ok(client)
|
||||
}
|
||||
|
||||
pub fn make_prometheus_gateway_client(&self) -> Result<PrometheusGatewayClient<Channel>> {
|
||||
|
||||
@@ -89,9 +89,8 @@ pub enum Error {
|
||||
source: common_grpc::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to request RegionServer {}, code: {}", addr, code))]
|
||||
#[snafu(display("Failed to request RegionServer, code: {}", code))]
|
||||
RegionServer {
|
||||
addr: String,
|
||||
code: Code,
|
||||
source: BoxedError,
|
||||
#[snafu(implicit)]
|
||||
|
||||
@@ -12,12 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(assert_matches)]
|
||||
|
||||
mod client;
|
||||
pub mod client_manager;
|
||||
#[cfg(feature = "testing")]
|
||||
mod database;
|
||||
pub mod error;
|
||||
pub mod load_balance;
|
||||
mod metrics;
|
||||
@@ -33,8 +29,6 @@ pub use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
|
||||
use snafu::OptionExt;
|
||||
|
||||
pub use self::client::Client;
|
||||
#[cfg(feature = "testing")]
|
||||
pub use self::database::Database;
|
||||
pub use self::error::{Error, Result};
|
||||
use crate::error::{IllegalDatabaseResponseSnafu, ServerSnafu};
|
||||
|
||||
|
||||
@@ -177,7 +177,7 @@ impl RegionRequester {
|
||||
.with_label_values(&[request_type.as_str()])
|
||||
.start_timer();
|
||||
|
||||
let (addr, mut client) = self.client.raw_region_client()?;
|
||||
let mut client = self.client.raw_region_client()?;
|
||||
|
||||
let response = client
|
||||
.handle(request)
|
||||
@@ -187,7 +187,6 @@ impl RegionRequester {
|
||||
let err: error::Error = e.into();
|
||||
// Uses `Error::RegionServer` instead of `Error::Server`
|
||||
error::Error::RegionServer {
|
||||
addr,
|
||||
code,
|
||||
source: BoxedError::new(err),
|
||||
location: location!(),
|
||||
|
||||
@@ -80,7 +80,6 @@ tracing-appender = "0.2"
|
||||
tikv-jemallocator = "0.5"
|
||||
|
||||
[dev-dependencies]
|
||||
client = { workspace = true, features = ["testing"] }
|
||||
common-test-util.workspace = true
|
||||
serde.workspace = true
|
||||
temp-env = "0.3"
|
||||
|
||||
@@ -22,14 +22,18 @@ mod helper;
|
||||
|
||||
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
|
||||
#[allow(unused)]
|
||||
mod repl;
|
||||
// mod repl;
|
||||
// TODO(weny): Removes it
|
||||
#[allow(deprecated)]
|
||||
mod upgrade;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use bench::BenchTableMetadataCommand;
|
||||
use clap::Parser;
|
||||
use common_telemetry::logging::{LoggingOptions, TracingOptions};
|
||||
pub use repl::Repl;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
// pub use repl::Repl;
|
||||
use upgrade::UpgradeCommand;
|
||||
|
||||
use self::export::ExportCommand;
|
||||
use crate::error::Result;
|
||||
@@ -112,6 +116,7 @@ impl Command {
|
||||
#[derive(Parser)]
|
||||
enum SubCommand {
|
||||
// Attach(AttachCommand),
|
||||
Upgrade(UpgradeCommand),
|
||||
Bench(BenchTableMetadataCommand),
|
||||
Export(ExportCommand),
|
||||
}
|
||||
@@ -120,6 +125,7 @@ impl SubCommand {
|
||||
async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
|
||||
match self {
|
||||
// SubCommand::Attach(cmd) => cmd.build().await,
|
||||
SubCommand::Upgrade(cmd) => cmd.build(guard).await,
|
||||
SubCommand::Bench(cmd) => cmd.build(guard).await,
|
||||
SubCommand::Export(cmd) => cmd.build(guard).await,
|
||||
}
|
||||
|
||||
@@ -23,13 +23,13 @@ use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::etcd::EtcdStore;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::rpc::router::{Region, RegionRoute};
|
||||
use common_meta::table_name::TableName;
|
||||
use common_telemetry::info;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, RawSchema};
|
||||
use rand::Rng;
|
||||
use store_api::storage::RegionNumber;
|
||||
use table::metadata::{RawTableInfo, RawTableMeta, TableId, TableIdent, TableType};
|
||||
use table::table_name::TableName;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use self::metadata::TableMetadataBencher;
|
||||
|
||||
@@ -16,7 +16,7 @@ use std::time::Instant;
|
||||
|
||||
use common_meta::key::table_route::TableRouteValue;
|
||||
use common_meta::key::TableMetadataManagerRef;
|
||||
use table::table_name::TableName;
|
||||
use common_meta::table_name::TableName;
|
||||
|
||||
use crate::cli::bench::{
|
||||
bench_self_recorded, create_region_routes, create_region_wal_options, create_table_info,
|
||||
|
||||
@@ -434,80 +434,3 @@ fn split_database(database: &str) -> Result<(String, Option<String>)> {
|
||||
Ok((catalog.to_string(), Some(schema.to_string())))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use clap::Parser;
|
||||
use client::{Client, Database};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
|
||||
use crate::error::Result as CmdResult;
|
||||
use crate::options::GlobalOptions;
|
||||
use crate::{cli, standalone, App};
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_export_create_table_with_quoted_names() -> CmdResult<()> {
|
||||
let output_dir = tempfile::tempdir().unwrap();
|
||||
|
||||
let standalone = standalone::Command::parse_from([
|
||||
"standalone",
|
||||
"start",
|
||||
"--data-home",
|
||||
&*output_dir.path().to_string_lossy(),
|
||||
]);
|
||||
|
||||
let standalone_opts = standalone.load_options(&GlobalOptions::default()).unwrap();
|
||||
let mut instance = standalone.build(standalone_opts).await?;
|
||||
instance.start().await?;
|
||||
|
||||
let client = Client::with_urls(["127.0.0.1:4001"]);
|
||||
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
database
|
||||
.sql(r#"CREATE DATABASE "cli.export.create_table";"#)
|
||||
.await
|
||||
.unwrap();
|
||||
database
|
||||
.sql(
|
||||
r#"CREATE TABLE "cli.export.create_table"."a.b.c"(
|
||||
ts TIMESTAMP,
|
||||
TIME INDEX (ts)
|
||||
) engine=mito;
|
||||
"#,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let output_dir = tempfile::tempdir().unwrap();
|
||||
let cli = cli::Command::parse_from([
|
||||
"cli",
|
||||
"export",
|
||||
"--addr",
|
||||
"127.0.0.1:4000",
|
||||
"--output-dir",
|
||||
&*output_dir.path().to_string_lossy(),
|
||||
"--target",
|
||||
"create-table",
|
||||
]);
|
||||
let mut cli_app = cli.build(LoggingOptions::default()).await?;
|
||||
cli_app.start().await?;
|
||||
|
||||
instance.stop().await?;
|
||||
|
||||
let output_file = output_dir
|
||||
.path()
|
||||
.join("greptime-cli.export.create_table.sql");
|
||||
let res = std::fs::read_to_string(output_file).unwrap();
|
||||
let expect = r#"CREATE TABLE IF NOT EXISTS "a.b.c" (
|
||||
"ts" TIMESTAMP(3) NOT NULL,
|
||||
TIME INDEX ("ts")
|
||||
)
|
||||
|
||||
ENGINE=mito
|
||||
;
|
||||
"#;
|
||||
assert_eq!(res.trim(), expect.trim());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,18 +16,14 @@ use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use cache::{
|
||||
build_fundamental_cache_registry, with_default_composite_cache_registry, TABLE_CACHE_NAME,
|
||||
TABLE_ROUTE_CACHE_NAME,
|
||||
};
|
||||
use catalog::kvbackend::{
|
||||
CachedMetaKvBackend, CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend,
|
||||
CachedMetaKvBackend, CachedMetaKvBackendBuilder, KvBackendCatalogManager,
|
||||
};
|
||||
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use client::{Client, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_base::Plugins;
|
||||
use common_config::Mode;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||
use common_meta::cache_invalidator::MultiCacheInvalidator;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use common_telemetry::debug;
|
||||
@@ -37,18 +33,17 @@ use query::datafusion::DatafusionQueryEngine;
|
||||
use query::logical_optimizer::LogicalOptimizer;
|
||||
use query::parser::QueryLanguageParser;
|
||||
use query::plan::LogicalPlan;
|
||||
use query::query_engine::{DefaultSerializer, QueryEngineState};
|
||||
use query::query_engine::QueryEngineState;
|
||||
use query::QueryEngine;
|
||||
use rustyline::error::ReadlineError;
|
||||
use rustyline::Editor;
|
||||
use session::context::QueryContext;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use snafu::ResultExt;
|
||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||
|
||||
use crate::cli::cmd::ReplCommand;
|
||||
use crate::cli::helper::RustylineHelper;
|
||||
use crate::cli::AttachCommand;
|
||||
use crate::error;
|
||||
use crate::error::{
|
||||
CollectRecordBatchesSnafu, ParseSqlSnafu, PlanStatementSnafu, PrettyPrintRecordBatchesSnafu,
|
||||
ReadlineSnafu, ReplCreationSnafu, RequestDatabaseSnafu, Result, StartMetaClientSnafu,
|
||||
@@ -185,7 +180,7 @@ impl Repl {
|
||||
.context(PlanStatementSnafu)?;
|
||||
|
||||
let plan = DFLogicalSubstraitConvertor {}
|
||||
.encode(&plan, DefaultSerializer)
|
||||
.encode(&plan)
|
||||
.context(SubstraitEncodeLogicalPlanSnafu)?;
|
||||
|
||||
self.database.logical_plan(plan.to_vec()).await
|
||||
@@ -262,30 +257,19 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
||||
|
||||
let cached_meta_backend =
|
||||
Arc::new(CachedMetaKvBackendBuilder::new(meta_client.clone()).build());
|
||||
let layered_cache_builder = LayeredCacheRegistryBuilder::default().add_cache_registry(
|
||||
CacheRegistryBuilder::default()
|
||||
.add_cache(cached_meta_backend.clone())
|
||||
.build(),
|
||||
);
|
||||
let fundamental_cache_registry =
|
||||
build_fundamental_cache_registry(Arc::new(MetaKvBackend::new(meta_client.clone())));
|
||||
let layered_cache_registry = Arc::new(
|
||||
with_default_composite_cache_registry(
|
||||
layered_cache_builder.add_cache_registry(fundamental_cache_registry),
|
||||
)
|
||||
.context(error::BuildCacheRegistrySnafu)?
|
||||
.build(),
|
||||
);
|
||||
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::with_invalidators(vec![
|
||||
cached_meta_backend.clone(),
|
||||
]));
|
||||
let catalog_list = KvBackendCatalogManager::new(
|
||||
Mode::Distributed,
|
||||
Some(meta_client.clone()),
|
||||
cached_meta_backend.clone(),
|
||||
layered_cache_registry,
|
||||
);
|
||||
multi_cache_invalidator,
|
||||
)
|
||||
.await;
|
||||
let plugins: Plugins = Default::default();
|
||||
let state = Arc::new(QueryEngineState::new(
|
||||
catalog_manager,
|
||||
catalog_list,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
|
||||
584
src/cmd/src/cli/upgrade.rs
Normal file
584
src/cmd/src/cli/upgrade.rs
Normal file
@@ -0,0 +1,584 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::Parser;
|
||||
use client::api::v1::meta::TableRouteValue;
|
||||
use common_meta::ddl::utils::region_storage_path;
|
||||
use common_meta::error as MetaError;
|
||||
use common_meta::key::catalog_name::{CatalogNameKey, CatalogNameValue};
|
||||
use common_meta::key::datanode_table::{DatanodeTableKey, DatanodeTableValue, RegionInfo};
|
||||
use common_meta::key::schema_name::{SchemaNameKey, SchemaNameValue};
|
||||
use common_meta::key::table_info::{TableInfoKey, TableInfoValue};
|
||||
use common_meta::key::table_name::{TableNameKey, TableNameValue};
|
||||
use common_meta::key::table_region::{TableRegionKey, TableRegionValue};
|
||||
use common_meta::key::table_route::{TableRouteKey, TableRouteValue as NextTableRouteValue};
|
||||
use common_meta::key::{MetaKey, RegionDistribution, TableMetaValue};
|
||||
use common_meta::kv_backend::etcd::EtcdStore;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::range_stream::PaginationStream;
|
||||
use common_meta::rpc::router::TableRoute;
|
||||
use common_meta::rpc::store::{BatchDeleteRequest, BatchPutRequest, PutRequest, RangeRequest};
|
||||
use common_meta::rpc::KeyValue;
|
||||
use common_meta::util::get_prefix_end_key;
|
||||
use common_telemetry::info;
|
||||
use etcd_client::Client;
|
||||
use futures::TryStreamExt;
|
||||
use prost::Message;
|
||||
use snafu::ResultExt;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
use v1_helper::{CatalogKey as v1CatalogKey, SchemaKey as v1SchemaKey, TableGlobalValue};
|
||||
|
||||
use crate::cli::{Instance, Tool};
|
||||
use crate::error::{self, ConnectEtcdSnafu, Result};
|
||||
|
||||
#[derive(Debug, Default, Parser)]
|
||||
pub struct UpgradeCommand {
|
||||
#[clap(long)]
|
||||
etcd_addr: String,
|
||||
#[clap(long)]
|
||||
dryrun: bool,
|
||||
|
||||
#[clap(long)]
|
||||
skip_table_global_keys: bool,
|
||||
#[clap(long)]
|
||||
skip_catalog_keys: bool,
|
||||
#[clap(long)]
|
||||
skip_schema_keys: bool,
|
||||
#[clap(long)]
|
||||
skip_table_route_keys: bool,
|
||||
}
|
||||
|
||||
impl UpgradeCommand {
|
||||
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
|
||||
let client = Client::connect([&self.etcd_addr], None)
|
||||
.await
|
||||
.context(ConnectEtcdSnafu {
|
||||
etcd_addr: &self.etcd_addr,
|
||||
})?;
|
||||
let tool = MigrateTableMetadata {
|
||||
etcd_store: EtcdStore::with_etcd_client(client, 128),
|
||||
dryrun: self.dryrun,
|
||||
skip_catalog_keys: self.skip_catalog_keys,
|
||||
skip_table_global_keys: self.skip_table_global_keys,
|
||||
skip_schema_keys: self.skip_schema_keys,
|
||||
skip_table_route_keys: self.skip_table_route_keys,
|
||||
};
|
||||
Ok(Instance::new(Box::new(tool), guard))
|
||||
}
|
||||
}
|
||||
|
||||
struct MigrateTableMetadata {
|
||||
etcd_store: KvBackendRef,
|
||||
dryrun: bool,
|
||||
|
||||
skip_table_global_keys: bool,
|
||||
|
||||
skip_catalog_keys: bool,
|
||||
|
||||
skip_schema_keys: bool,
|
||||
|
||||
skip_table_route_keys: bool,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Tool for MigrateTableMetadata {
|
||||
// migrates database's metadata from 0.3 to 0.4.
|
||||
async fn do_work(&self) -> Result<()> {
|
||||
if !self.skip_table_global_keys {
|
||||
self.migrate_table_global_values().await?;
|
||||
}
|
||||
if !self.skip_catalog_keys {
|
||||
self.migrate_catalog_keys().await?;
|
||||
}
|
||||
if !self.skip_schema_keys {
|
||||
self.migrate_schema_keys().await?;
|
||||
}
|
||||
if !self.skip_table_route_keys {
|
||||
self.migrate_table_route_keys().await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
const PAGE_SIZE: usize = 1000;
|
||||
|
||||
impl MigrateTableMetadata {
|
||||
async fn migrate_table_route_keys(&self) -> Result<()> {
|
||||
let key = b"__meta_table_route".to_vec();
|
||||
let range_end = get_prefix_end_key(&key);
|
||||
let mut keys = Vec::new();
|
||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
||||
|
||||
let mut stream = PaginationStream::new(
|
||||
self.etcd_store.clone(),
|
||||
RangeRequest::new().with_range(key, range_end),
|
||||
PAGE_SIZE,
|
||||
Arc::new(|kv: KeyValue| {
|
||||
let value =
|
||||
TableRouteValue::decode(&kv.value[..]).context(MetaError::DecodeProtoSnafu)?;
|
||||
Ok((kv.key, value))
|
||||
}),
|
||||
);
|
||||
|
||||
while let Some((key, value)) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
||||
let table_id = self.migrate_table_route_key(value).await?;
|
||||
keys.push(key);
|
||||
keys.push(TableRegionKey::new(table_id).to_bytes())
|
||||
}
|
||||
|
||||
info!("Total migrated TableRouteKeys: {}", keys.len() / 2);
|
||||
self.delete_migrated_keys(keys).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn migrate_table_route_key(&self, value: TableRouteValue) -> Result<u32> {
|
||||
let table_route = TableRoute::try_from_raw(
|
||||
&value.peers,
|
||||
value.table_route.expect("expected table_route"),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let new_table_value = NextTableRouteValue::physical(table_route.region_routes);
|
||||
|
||||
let table_id = table_route.table.id as u32;
|
||||
let new_key = TableRouteKey::new(table_id);
|
||||
info!("Creating '{new_key}'");
|
||||
|
||||
if self.dryrun {
|
||||
info!("Dryrun: do nothing");
|
||||
} else {
|
||||
self.etcd_store
|
||||
.put(
|
||||
PutRequest::new()
|
||||
.with_key(new_key.to_bytes())
|
||||
.with_value(new_table_value.try_as_raw_value().unwrap()),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
Ok(table_id)
|
||||
}
|
||||
|
||||
async fn migrate_schema_keys(&self) -> Result<()> {
|
||||
// The schema key prefix.
|
||||
let key = b"__s".to_vec();
|
||||
let range_end = get_prefix_end_key(&key);
|
||||
|
||||
let mut keys = Vec::new();
|
||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
||||
let mut stream = PaginationStream::new(
|
||||
self.etcd_store.clone(),
|
||||
RangeRequest::new().with_range(key, range_end),
|
||||
PAGE_SIZE,
|
||||
Arc::new(|kv: KeyValue| {
|
||||
let key_str =
|
||||
std::str::from_utf8(&kv.key).context(MetaError::ConvertRawKeySnafu)?;
|
||||
let key = v1SchemaKey::parse(key_str)
|
||||
.unwrap_or_else(|e| panic!("schema key is corrupted: {e}, key: {key_str}"));
|
||||
|
||||
Ok(key)
|
||||
}),
|
||||
);
|
||||
while let Some(key) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
||||
let _ = self.migrate_schema_key(&key).await;
|
||||
keys.push(key.to_string().as_bytes().to_vec());
|
||||
}
|
||||
info!("Total migrated SchemaKeys: {}", keys.len());
|
||||
self.delete_migrated_keys(keys).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn migrate_schema_key(&self, key: &v1SchemaKey) -> Result<()> {
|
||||
let new_key = SchemaNameKey::new(&key.catalog_name, &key.schema_name);
|
||||
let schema_name_value = SchemaNameValue::default();
|
||||
|
||||
info!("Creating '{new_key}'");
|
||||
|
||||
if self.dryrun {
|
||||
info!("Dryrun: do nothing");
|
||||
} else {
|
||||
self.etcd_store
|
||||
.put(
|
||||
PutRequest::new()
|
||||
.with_key(new_key.to_bytes())
|
||||
.with_value(schema_name_value.try_as_raw_value().unwrap()),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn migrate_catalog_keys(&self) -> Result<()> {
|
||||
// The catalog key prefix.
|
||||
let key = b"__c".to_vec();
|
||||
let range_end = get_prefix_end_key(&key);
|
||||
|
||||
let mut keys = Vec::new();
|
||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
||||
let mut stream = PaginationStream::new(
|
||||
self.etcd_store.clone(),
|
||||
RangeRequest::new().with_range(key, range_end),
|
||||
PAGE_SIZE,
|
||||
Arc::new(|kv: KeyValue| {
|
||||
let key_str =
|
||||
std::str::from_utf8(&kv.key).context(MetaError::ConvertRawKeySnafu)?;
|
||||
let key = v1CatalogKey::parse(key_str)
|
||||
.unwrap_or_else(|e| panic!("catalog key is corrupted: {e}, key: {key_str}"));
|
||||
|
||||
Ok(key)
|
||||
}),
|
||||
);
|
||||
while let Some(key) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
||||
let _ = self.migrate_catalog_key(&key).await;
|
||||
keys.push(key.to_string().as_bytes().to_vec());
|
||||
}
|
||||
info!("Total migrated CatalogKeys: {}", keys.len());
|
||||
self.delete_migrated_keys(keys).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn migrate_catalog_key(&self, key: &v1CatalogKey) {
|
||||
let new_key = CatalogNameKey::new(&key.catalog_name);
|
||||
let catalog_name_value = CatalogNameValue;
|
||||
|
||||
info!("Creating '{new_key}'");
|
||||
|
||||
if self.dryrun {
|
||||
info!("Dryrun: do nothing");
|
||||
} else {
|
||||
self.etcd_store
|
||||
.put(
|
||||
PutRequest::new()
|
||||
.with_key(new_key.to_bytes())
|
||||
.with_value(catalog_name_value.try_as_raw_value().unwrap()),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
async fn migrate_table_global_values(&self) -> Result<()> {
|
||||
let key = b"__tg".to_vec();
|
||||
let range_end = get_prefix_end_key(&key);
|
||||
|
||||
let mut keys = Vec::new();
|
||||
|
||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
||||
let mut stream = PaginationStream::new(
|
||||
self.etcd_store.clone(),
|
||||
RangeRequest::new().with_range(key, range_end.clone()),
|
||||
PAGE_SIZE,
|
||||
Arc::new(|kv: KeyValue| {
|
||||
let key = String::from_utf8_lossy(kv.key()).to_string();
|
||||
let value = TableGlobalValue::from_bytes(kv.value())
|
||||
.unwrap_or_else(|e| panic!("table global value is corrupted: {e}, key: {key}"));
|
||||
|
||||
Ok((key, value))
|
||||
}),
|
||||
);
|
||||
while let Some((key, value)) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
||||
self.create_table_name_key(&value).await;
|
||||
|
||||
self.create_datanode_table_keys(&value).await;
|
||||
|
||||
self.split_table_global_value(&key, value).await;
|
||||
|
||||
keys.push(key.as_bytes().to_vec());
|
||||
}
|
||||
|
||||
info!("Total migrated TableGlobalKeys: {}", keys.len());
|
||||
self.delete_migrated_keys(keys).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn delete_migrated_keys(&self, keys: Vec<Vec<u8>>) {
|
||||
for keys in keys.chunks(PAGE_SIZE) {
|
||||
info!("Deleting {} keys", keys.len());
|
||||
let req = BatchDeleteRequest {
|
||||
keys: keys.to_vec(),
|
||||
prev_kv: false,
|
||||
};
|
||||
if self.dryrun {
|
||||
info!("Dryrun: do nothing");
|
||||
} else {
|
||||
self.etcd_store.batch_delete(req).await.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn split_table_global_value(&self, key: &str, value: TableGlobalValue) {
|
||||
let table_id = value.table_id();
|
||||
let region_distribution: RegionDistribution = value.regions_id_map.into_iter().collect();
|
||||
|
||||
let table_info_key = TableInfoKey::new(table_id);
|
||||
let table_info_value = TableInfoValue::new(value.table_info);
|
||||
|
||||
let table_region_key = TableRegionKey::new(table_id);
|
||||
let table_region_value = TableRegionValue::new(region_distribution);
|
||||
|
||||
info!("Splitting TableGlobalKey '{key}' into '{table_info_key}' and '{table_region_key}'");
|
||||
|
||||
if self.dryrun {
|
||||
info!("Dryrun: do nothing");
|
||||
} else {
|
||||
self.etcd_store
|
||||
.batch_put(
|
||||
BatchPutRequest::new()
|
||||
.add_kv(
|
||||
table_info_key.to_bytes(),
|
||||
table_info_value.try_as_raw_value().unwrap(),
|
||||
)
|
||||
.add_kv(
|
||||
table_region_key.to_bytes(),
|
||||
table_region_value.try_as_raw_value().unwrap(),
|
||||
),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
async fn create_table_name_key(&self, value: &TableGlobalValue) {
|
||||
let table_info = &value.table_info;
|
||||
let table_id = value.table_id();
|
||||
|
||||
let table_name_key = TableNameKey::new(
|
||||
&table_info.catalog_name,
|
||||
&table_info.schema_name,
|
||||
&table_info.name,
|
||||
);
|
||||
let table_name_value = TableNameValue::new(table_id);
|
||||
|
||||
info!("Creating '{table_name_key}' => {table_id}");
|
||||
|
||||
if self.dryrun {
|
||||
info!("Dryrun: do nothing");
|
||||
} else {
|
||||
self.etcd_store
|
||||
.put(
|
||||
PutRequest::new()
|
||||
.with_key(table_name_key.to_bytes())
|
||||
.with_value(table_name_value.try_as_raw_value().unwrap()),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
async fn create_datanode_table_keys(&self, value: &TableGlobalValue) {
|
||||
let table_id = value.table_id();
|
||||
let engine = value.table_info.meta.engine.as_str();
|
||||
let region_storage_path = region_storage_path(
|
||||
&value.table_info.catalog_name,
|
||||
&value.table_info.schema_name,
|
||||
);
|
||||
let region_distribution: RegionDistribution =
|
||||
value.regions_id_map.clone().into_iter().collect();
|
||||
|
||||
// TODO(niebayes): properly fetch or construct wal options.
|
||||
let region_wal_options = HashMap::default();
|
||||
|
||||
let datanode_table_kvs = region_distribution
|
||||
.into_iter()
|
||||
.map(|(datanode_id, regions)| {
|
||||
let k = DatanodeTableKey::new(datanode_id, table_id);
|
||||
info!("Creating DatanodeTableKey '{k}' => {regions:?}");
|
||||
(
|
||||
k,
|
||||
DatanodeTableValue::new(
|
||||
table_id,
|
||||
regions,
|
||||
RegionInfo {
|
||||
engine: engine.to_string(),
|
||||
region_storage_path: region_storage_path.clone(),
|
||||
region_options: (&value.table_info.meta.options).into(),
|
||||
region_wal_options: region_wal_options.clone(),
|
||||
},
|
||||
),
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if self.dryrun {
|
||||
info!("Dryrun: do nothing");
|
||||
} else {
|
||||
let mut req = BatchPutRequest::new();
|
||||
for (key, value) in datanode_table_kvs {
|
||||
req = req.add_kv(key.to_bytes(), value.try_as_raw_value().unwrap());
|
||||
}
|
||||
self.etcd_store.batch_put(req).await.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[deprecated(since = "0.4.0", note = "Used for migrate old version(v0.3) metadata")]
|
||||
mod v1_helper {
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::{Display, Formatter};
|
||||
|
||||
use err::{DeserializeCatalogEntryValueSnafu, Error, InvalidCatalogSnafu};
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
|
||||
pub const CATALOG_KEY_PREFIX: &str = "__c";
|
||||
pub const SCHEMA_KEY_PREFIX: &str = "__s";
|
||||
|
||||
/// The pattern of a valid catalog, schema or table name.
|
||||
const NAME_PATTERN: &str = "[a-zA-Z_:][a-zA-Z0-9_:]*";
|
||||
|
||||
lazy_static! {
|
||||
static ref CATALOG_KEY_PATTERN: Regex =
|
||||
Regex::new(&format!("^{CATALOG_KEY_PREFIX}-({NAME_PATTERN})$")).unwrap();
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref SCHEMA_KEY_PATTERN: Regex = Regex::new(&format!(
|
||||
"^{SCHEMA_KEY_PREFIX}-({NAME_PATTERN})-({NAME_PATTERN})$"
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
/// Table global info contains necessary info for a datanode to create table regions, including
|
||||
/// table id, table meta(schema...), region id allocation across datanodes.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct TableGlobalValue {
|
||||
/// Id of datanode that created the global table info kv. only for debugging.
|
||||
pub node_id: u64,
|
||||
/// Allocation of region ids across all datanodes.
|
||||
pub regions_id_map: HashMap<u64, Vec<u32>>,
|
||||
pub table_info: RawTableInfo,
|
||||
}
|
||||
|
||||
impl TableGlobalValue {
|
||||
pub fn table_id(&self) -> TableId {
|
||||
self.table_info.ident.table_id
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CatalogKey {
|
||||
pub catalog_name: String,
|
||||
}
|
||||
|
||||
impl Display for CatalogKey {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(CATALOG_KEY_PREFIX)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.catalog_name)
|
||||
}
|
||||
}
|
||||
|
||||
impl CatalogKey {
|
||||
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
|
||||
let key = s.as_ref();
|
||||
let captures = CATALOG_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidCatalogSnafu { key })?;
|
||||
ensure!(captures.len() == 2, InvalidCatalogSnafu { key });
|
||||
Ok(Self {
|
||||
catalog_name: captures[1].to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CatalogValue;
|
||||
|
||||
pub struct SchemaKey {
|
||||
pub catalog_name: String,
|
||||
pub schema_name: String,
|
||||
}
|
||||
|
||||
impl Display for SchemaKey {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(SCHEMA_KEY_PREFIX)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.catalog_name)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.schema_name)
|
||||
}
|
||||
}
|
||||
|
||||
impl SchemaKey {
|
||||
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
|
||||
let key = s.as_ref();
|
||||
let captures = SCHEMA_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidCatalogSnafu { key })?;
|
||||
ensure!(captures.len() == 3, InvalidCatalogSnafu { key });
|
||||
Ok(Self {
|
||||
catalog_name: captures[1].to_string(),
|
||||
schema_name: captures[2].to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct SchemaValue;
|
||||
|
||||
macro_rules! define_catalog_value {
|
||||
( $($val_ty: ty), *) => {
|
||||
$(
|
||||
impl $val_ty {
|
||||
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
|
||||
serde_json::from_str(s.as_ref())
|
||||
.context(DeserializeCatalogEntryValueSnafu { raw: s.as_ref() })
|
||||
}
|
||||
|
||||
pub fn from_bytes(bytes: impl AsRef<[u8]>) -> Result<Self, Error> {
|
||||
Self::parse(&String::from_utf8_lossy(bytes.as_ref()))
|
||||
}
|
||||
}
|
||||
)*
|
||||
}
|
||||
}
|
||||
|
||||
define_catalog_value!(TableGlobalValue);
|
||||
|
||||
mod err {
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
#[snafu(display("Invalid catalog info: {}", key))]
|
||||
InvalidCatalog {
|
||||
key: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to deserialize catalog entry value: {}", raw))]
|
||||
DeserializeCatalogEntryValue {
|
||||
raw: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: serde_json::error::Error,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -163,15 +163,6 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to request database, sql: {sql}"))]
|
||||
RequestDatabase {
|
||||
sql: String,
|
||||
#[snafu(source)]
|
||||
source: client::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to collect RecordBatches"))]
|
||||
CollectRecordBatches {
|
||||
#[snafu(implicit)]
|
||||
@@ -363,7 +354,6 @@ impl ErrorExt for Error {
|
||||
Error::ReplCreation { .. } | Error::Readline { .. } | Error::HttpQuerySql { .. } => {
|
||||
StatusCode::Internal
|
||||
}
|
||||
Error::RequestDatabase { source, .. } => source.status_code(),
|
||||
Error::CollectRecordBatches { source, .. }
|
||||
| Error::PrettyPrintRecordBatches { source, .. } => source.status_code(),
|
||||
Error::StartMetaClient { source, .. } => source.status_code(),
|
||||
@@ -375,11 +365,11 @@ impl ErrorExt for Error {
|
||||
|
||||
Error::SerdeJson { .. } | Error::FileIo { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::CacheRequired { .. } | Error::BuildCacheRegistry { .. } => StatusCode::Internal,
|
||||
|
||||
Error::Other { source, .. } => source.status_code(),
|
||||
|
||||
Error::BuildRuntime { source, .. } => source.status_code(),
|
||||
|
||||
Error::CacheRequired { .. } | Error::BuildCacheRegistry { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,10 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use cache::{
|
||||
build_fundamental_cache_registry, with_default_composite_cache_registry, TABLE_CACHE_NAME,
|
||||
TABLE_ROUTE_CACHE_NAME,
|
||||
};
|
||||
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||
use clap::Parser;
|
||||
use client::client_manager::DatanodeClients;
|
||||
@@ -299,12 +302,25 @@ impl StartCommand {
|
||||
.build(),
|
||||
);
|
||||
|
||||
let table_cache = layered_cache_registry
|
||||
.get()
|
||||
.context(error::CacheRequiredSnafu {
|
||||
name: TABLE_CACHE_NAME,
|
||||
})?;
|
||||
let table_route_cache =
|
||||
layered_cache_registry
|
||||
.get()
|
||||
.context(error::CacheRequiredSnafu {
|
||||
name: TABLE_ROUTE_CACHE_NAME,
|
||||
})?;
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
opts.mode,
|
||||
Some(meta_client.clone()),
|
||||
cached_meta_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
);
|
||||
table_cache,
|
||||
table_route_cache,
|
||||
)
|
||||
.await;
|
||||
|
||||
let executor = HandlerGroupExecutor::new(vec![
|
||||
Arc::new(ParseMailboxMessageHandler),
|
||||
|
||||
@@ -16,7 +16,10 @@ use std::sync::Arc;
|
||||
use std::{fs, path};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use cache::{
|
||||
build_fundamental_cache_registry, with_default_composite_cache_registry, TABLE_CACHE_NAME,
|
||||
TABLE_ROUTE_CACHE_NAME,
|
||||
};
|
||||
use catalog::kvbackend::KvBackendCatalogManager;
|
||||
use clap::Parser;
|
||||
use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
|
||||
@@ -58,14 +61,14 @@ use servers::export_metrics::ExportMetricsOption;
|
||||
use servers::http::HttpOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{
|
||||
BuildCacheRegistrySnafu, CreateDirSnafu, IllegalConfigSnafu, InitDdlManagerSnafu,
|
||||
InitMetadataSnafu, InitTimezoneSnafu, LoadLayeredConfigSnafu, Result, ShutdownDatanodeSnafu,
|
||||
ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu, StartProcedureManagerSnafu,
|
||||
StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
|
||||
BuildCacheRegistrySnafu, CacheRequiredSnafu, CreateDirSnafu, IllegalConfigSnafu,
|
||||
InitDdlManagerSnafu, InitMetadataSnafu, InitTimezoneSnafu, LoadLayeredConfigSnafu, Result,
|
||||
ShutdownDatanodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu,
|
||||
StartProcedureManagerSnafu, StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
|
||||
};
|
||||
use crate::options::GlobalOptions;
|
||||
use crate::{log_versions, App};
|
||||
@@ -418,12 +421,20 @@ impl StartCommand {
|
||||
.build(),
|
||||
);
|
||||
|
||||
let table_cache = layered_cache_registry.get().context(CacheRequiredSnafu {
|
||||
name: TABLE_CACHE_NAME,
|
||||
})?;
|
||||
let table_route_cache = layered_cache_registry.get().context(CacheRequiredSnafu {
|
||||
name: TABLE_ROUTE_CACHE_NAME,
|
||||
})?;
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
dn_opts.mode,
|
||||
None,
|
||||
kv_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
);
|
||||
table_cache,
|
||||
table_route_cache,
|
||||
)
|
||||
.await;
|
||||
|
||||
let table_metadata_manager =
|
||||
Self::create_table_metadata_manager(kv_backend.clone()).await?;
|
||||
|
||||
@@ -143,6 +143,8 @@ fn clamp_impl<T: LogicalPrimitiveType, const CLAMP_MIN: bool, const CLAMP_MAX: b
|
||||
min: T::Native,
|
||||
max: T::Native,
|
||||
) -> Result<VectorRef> {
|
||||
common_telemetry::info!("[DEBUG] min {min:?}, max {max:?}");
|
||||
|
||||
let iter = ArrayIter::new(input);
|
||||
let result = iter.map(|x| {
|
||||
x.map(|x| {
|
||||
|
||||
@@ -25,7 +25,7 @@ prost.workspace = true
|
||||
snafu.workspace = true
|
||||
tokio.workspace = true
|
||||
tonic.workspace = true
|
||||
tower.workspace = true
|
||||
tower = "0.4"
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.4"
|
||||
|
||||
@@ -24,7 +24,7 @@ pub use registry::{
|
||||
LayeredCacheRegistryBuilder, LayeredCacheRegistryRef,
|
||||
};
|
||||
pub use table::{
|
||||
new_table_info_cache, new_table_name_cache, new_table_route_cache, new_view_info_cache,
|
||||
TableInfoCache, TableInfoCacheRef, TableNameCache, TableNameCacheRef, TableRoute,
|
||||
TableRouteCache, TableRouteCacheRef, ViewInfoCache, ViewInfoCacheRef,
|
||||
new_table_info_cache, new_table_name_cache, new_table_route_cache, TableInfoCache,
|
||||
TableInfoCacheRef, TableNameCache, TableNameCacheRef, TableRoute, TableRouteCache,
|
||||
TableRouteCacheRef,
|
||||
};
|
||||
|
||||
@@ -145,13 +145,13 @@ mod tests {
|
||||
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use moka::future::CacheBuilder;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use crate::cache::flow::table_flownode::new_table_flownode_set_cache;
|
||||
use crate::instruction::{CacheIdent, CreateFlow, DropFlow};
|
||||
use crate::key::flow::flow_info::FlowInfoValue;
|
||||
use crate::key::flow::FlowMetadataManager;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::table_name::TableName;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cache_empty_set() {
|
||||
|
||||
3
src/common/meta/src/cache/table.rs
vendored
3
src/common/meta/src/cache/table.rs
vendored
@@ -15,9 +15,6 @@
|
||||
mod table_info;
|
||||
mod table_name;
|
||||
mod table_route;
|
||||
mod view_info;
|
||||
|
||||
pub use table_info::{new_table_info_cache, TableInfoCache, TableInfoCacheRef};
|
||||
pub use table_name::{new_table_name_cache, TableNameCache, TableNameCacheRef};
|
||||
pub use table_route::{new_table_route_cache, TableRoute, TableRouteCache, TableRouteCacheRef};
|
||||
pub use view_info::{new_view_info_cache, ViewInfoCache, ViewInfoCacheRef};
|
||||
|
||||
@@ -18,7 +18,6 @@ use futures::future::BoxFuture;
|
||||
use moka::future::Cache;
|
||||
use snafu::OptionExt;
|
||||
use table::metadata::TableId;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use crate::cache::{CacheContainer, Initializer};
|
||||
use crate::error;
|
||||
@@ -26,6 +25,7 @@ use crate::error::Result;
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::table_name::{TableNameKey, TableNameManager, TableNameManagerRef};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::table_name::TableName;
|
||||
|
||||
/// [TableNameCache] caches the [TableName] to [TableId] mapping.
|
||||
pub type TableNameCache = CacheContainer<TableName, TableId, CacheIdent>;
|
||||
|
||||
143
src/common/meta/src/cache/table/view_info.rs
vendored
143
src/common/meta/src/cache/table/view_info.rs
vendored
@@ -1,143 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::future::BoxFuture;
|
||||
use moka::future::Cache;
|
||||
use snafu::OptionExt;
|
||||
use store_api::storage::TableId;
|
||||
|
||||
use crate::cache::{CacheContainer, Initializer};
|
||||
use crate::error;
|
||||
use crate::error::Result;
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::view_info::{ViewInfoManager, ViewInfoManagerRef, ViewInfoValue};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
|
||||
/// [ViewInfoCache] caches the [TableId] to [ViewInfoValue] mapping.
|
||||
pub type ViewInfoCache = CacheContainer<TableId, Arc<ViewInfoValue>, CacheIdent>;
|
||||
|
||||
pub type ViewInfoCacheRef = Arc<ViewInfoCache>;
|
||||
|
||||
/// Constructs a [ViewInfoCache].
|
||||
pub fn new_view_info_cache(
|
||||
name: String,
|
||||
cache: Cache<TableId, Arc<ViewInfoValue>>,
|
||||
kv_backend: KvBackendRef,
|
||||
) -> ViewInfoCache {
|
||||
let view_info_manager = Arc::new(ViewInfoManager::new(kv_backend));
|
||||
let init = init_factory(view_info_manager);
|
||||
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||
}
|
||||
|
||||
fn init_factory(view_info_manager: ViewInfoManagerRef) -> Initializer<TableId, Arc<ViewInfoValue>> {
|
||||
Arc::new(move |view_id| {
|
||||
let view_info_manager = view_info_manager.clone();
|
||||
Box::pin(async move {
|
||||
let view_info = view_info_manager
|
||||
.get(*view_id)
|
||||
.await?
|
||||
.context(error::ValueNotExistSnafu {})?
|
||||
.into_inner();
|
||||
|
||||
Ok(Some(Arc::new(view_info)))
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
fn invalidator<'a>(
|
||||
cache: &'a Cache<TableId, Arc<ViewInfoValue>>,
|
||||
ident: &'a CacheIdent,
|
||||
) -> BoxFuture<'a, Result<()>> {
|
||||
Box::pin(async move {
|
||||
if let CacheIdent::TableId(table_id) = ident {
|
||||
cache.invalidate(table_id).await
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
fn filter(ident: &CacheIdent) -> bool {
|
||||
matches!(ident, CacheIdent::TableId(_))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
|
||||
use moka::future::CacheBuilder;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use super::*;
|
||||
use crate::ddl::tests::create_view::test_create_view_task;
|
||||
use crate::key::TableMetadataManager;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_view_info_cache() {
|
||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||
let table_metadata_manager = TableMetadataManager::new(mem_kv.clone());
|
||||
let cache = CacheBuilder::new(128).build();
|
||||
let cache = new_view_info_cache("test".to_string(), cache, mem_kv.clone());
|
||||
|
||||
let result = cache.get(1024).await.unwrap();
|
||||
assert!(result.is_none());
|
||||
let mut task = test_create_view_task("my_view");
|
||||
let table_names = {
|
||||
let mut set = HashSet::new();
|
||||
set.insert(TableName {
|
||||
catalog_name: "greptime".to_string(),
|
||||
schema_name: "public".to_string(),
|
||||
table_name: "a_table".to_string(),
|
||||
});
|
||||
set.insert(TableName {
|
||||
catalog_name: "greptime".to_string(),
|
||||
schema_name: "public".to_string(),
|
||||
table_name: "b_table".to_string(),
|
||||
});
|
||||
set
|
||||
};
|
||||
|
||||
task.view_info.ident.table_id = 1024;
|
||||
table_metadata_manager
|
||||
.create_view_metadata(
|
||||
task.view_info.clone(),
|
||||
task.create_view.logical_plan.clone(),
|
||||
table_names,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let view_info = cache.get(1024).await.unwrap().unwrap();
|
||||
assert_eq!(view_info.view_info, task.create_view.logical_plan);
|
||||
assert_eq!(
|
||||
view_info.table_names,
|
||||
task.create_view
|
||||
.table_names
|
||||
.iter()
|
||||
.map(|t| t.clone().into())
|
||||
.collect::<HashSet<_>>()
|
||||
);
|
||||
|
||||
assert!(cache.contains_key(&1024));
|
||||
cache
|
||||
.invalidate(&[CacheIdent::TableId(1024)])
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!cache.contains_key(&1024));
|
||||
}
|
||||
}
|
||||
@@ -48,7 +48,7 @@ pub mod table_meta;
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub mod test_util;
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests;
|
||||
mod tests;
|
||||
pub mod truncate_table;
|
||||
pub mod utils;
|
||||
|
||||
|
||||
@@ -13,10 +13,10 @@
|
||||
// limitations under the License.
|
||||
|
||||
use table::metadata::RawTableInfo;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::table_name::TableName;
|
||||
|
||||
impl AlterLogicalTablesProcedure {
|
||||
pub(crate) fn build_table_cache_keys_to_invalidate(&self) -> Vec<CacheIdent> {
|
||||
|
||||
@@ -18,13 +18,13 @@ use common_telemetry::{info, warn};
|
||||
use itertools::Itertools;
|
||||
use snafu::OptionExt;
|
||||
use table::metadata::TableId;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use crate::cache_invalidator::Context;
|
||||
use crate::ddl::create_logical_tables::CreateLogicalTablesProcedure;
|
||||
use crate::ddl::physical_table_metadata;
|
||||
use crate::error::{Result, TableInfoNotFoundSnafu};
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::table_name::TableName;
|
||||
|
||||
impl CreateLogicalTablesProcedure {
|
||||
pub(crate) async fn update_physical_table_metadata(&mut self) -> Result<()> {
|
||||
|
||||
@@ -22,11 +22,9 @@ use strum::AsRefStr;
|
||||
use table::metadata::{RawTableInfo, TableId, TableType};
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::cache_invalidator::Context;
|
||||
use crate::ddl::utils::handle_retry_error;
|
||||
use crate::ddl::{DdlContext, TableMetadata, TableMetadataAllocatorContext};
|
||||
use crate::error::{self, Result};
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableNameLock};
|
||||
use crate::rpc::ddl::CreateViewTask;
|
||||
@@ -159,25 +157,6 @@ impl CreateViewProcedure {
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
async fn invalidate_view_cache(&self) -> Result<()> {
|
||||
let cache_invalidator = &self.context.cache_invalidator;
|
||||
let ctx = Context {
|
||||
subject: Some("Invalidate view cache by creating view".to_string()),
|
||||
};
|
||||
|
||||
cache_invalidator
|
||||
.invalidate(
|
||||
&ctx,
|
||||
&[
|
||||
CacheIdent::TableName(self.data.table_ref().into()),
|
||||
CacheIdent::TableId(self.view_id()),
|
||||
],
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Creates view metadata
|
||||
///
|
||||
/// Abort(not-retry):
|
||||
@@ -196,21 +175,15 @@ impl CreateViewProcedure {
|
||||
view_name: self.data.table_ref().to_string(),
|
||||
})?;
|
||||
let new_logical_plan = self.data.task.raw_logical_plan().clone();
|
||||
let table_names = self.data.task.table_names();
|
||||
|
||||
manager
|
||||
.update_view_info(view_id, ¤t_view_info, new_logical_plan, table_names)
|
||||
.update_view_info(view_id, ¤t_view_info, new_logical_plan)
|
||||
.await?;
|
||||
|
||||
info!("Updated view metadata for view {view_id}");
|
||||
} else {
|
||||
let raw_view_info = self.view_info().clone();
|
||||
manager
|
||||
.create_view_metadata(
|
||||
raw_view_info,
|
||||
self.data.task.raw_logical_plan().clone(),
|
||||
self.data.task.table_names(),
|
||||
)
|
||||
.create_view_metadata(raw_view_info, self.data.task.raw_logical_plan())
|
||||
.await?;
|
||||
|
||||
info!(
|
||||
@@ -218,7 +191,6 @@ impl CreateViewProcedure {
|
||||
ctx.procedure_id
|
||||
);
|
||||
}
|
||||
self.invalidate_view_cache().await?;
|
||||
|
||||
Ok(Status::done_with_output(view_id))
|
||||
}
|
||||
|
||||
@@ -14,23 +14,19 @@
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_procedure::Status;
|
||||
use futures::TryStreamExt;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::OptionExt;
|
||||
use table::metadata::{TableId, TableType};
|
||||
use table::table_name::TableName;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use super::executor::DropDatabaseExecutor;
|
||||
use super::metadata::DropDatabaseRemoveMetadata;
|
||||
use super::DropTableTarget;
|
||||
use crate::cache_invalidator::Context;
|
||||
use crate::ddl::drop_database::{DropDatabaseContext, State};
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{Result, TableInfoNotFoundSnafu};
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::error::Result;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::table_name::TableName;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub(crate) struct DropDatabaseCursor {
|
||||
@@ -105,40 +101,6 @@ impl DropDatabaseCursor {
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_view(
|
||||
&self,
|
||||
ddl_ctx: &DdlContext,
|
||||
ctx: &mut DropDatabaseContext,
|
||||
table_name: String,
|
||||
table_id: TableId,
|
||||
) -> Result<(Box<dyn State>, Status)> {
|
||||
let view_name = TableName::new(&ctx.catalog, &ctx.schema, &table_name);
|
||||
ddl_ctx
|
||||
.table_metadata_manager
|
||||
.destroy_view_info(table_id, &view_name)
|
||||
.await?;
|
||||
|
||||
let cache_invalidator = &ddl_ctx.cache_invalidator;
|
||||
let ctx = Context {
|
||||
subject: Some("Invalidate table cache by dropping table".to_string()),
|
||||
};
|
||||
|
||||
cache_invalidator
|
||||
.invalidate(
|
||||
&ctx,
|
||||
&[
|
||||
CacheIdent::TableName(view_name),
|
||||
CacheIdent::TableId(table_id),
|
||||
],
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok((
|
||||
Box::new(DropDatabaseCursor::new(self.target)),
|
||||
Status::executing(false),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -160,20 +122,6 @@ impl State for DropDatabaseCursor {
|
||||
match ctx.tables.as_mut().unwrap().try_next().await? {
|
||||
Some((table_name, table_name_value)) => {
|
||||
let table_id = table_name_value.table_id();
|
||||
|
||||
let table_info_value = ddl_ctx
|
||||
.table_metadata_manager
|
||||
.table_info_manager()
|
||||
.get(table_id)
|
||||
.await?
|
||||
.with_context(|| TableInfoNotFoundSnafu {
|
||||
table: format_full_table_name(&ctx.catalog, &ctx.schema, &table_name),
|
||||
})?;
|
||||
|
||||
if table_info_value.table_info.table_type == TableType::View {
|
||||
return self.handle_view(ddl_ctx, ctx, table_name, table_id).await;
|
||||
}
|
||||
|
||||
match ddl_ctx
|
||||
.table_metadata_manager
|
||||
.table_route_manager()
|
||||
|
||||
@@ -19,7 +19,6 @@ use common_telemetry::info;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::OptionExt;
|
||||
use table::metadata::TableId;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use super::cursor::DropDatabaseCursor;
|
||||
use super::{DropDatabaseContext, DropTableTarget};
|
||||
@@ -30,6 +29,7 @@ use crate::error::{self, Result};
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::region_keeper::OperatingRegionGuard;
|
||||
use crate::rpc::router::{operating_leader_regions, RegionRoute};
|
||||
use crate::table_name::TableName;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub(crate) struct DropDatabaseExecutor {
|
||||
@@ -135,7 +135,6 @@ mod tests {
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use crate::ddl::drop_database::cursor::DropDatabaseCursor;
|
||||
use crate::ddl::drop_database::executor::DropDatabaseExecutor;
|
||||
@@ -145,6 +144,7 @@ mod tests {
|
||||
use crate::key::datanode_table::DatanodeTableKey;
|
||||
use crate::peer::Peer;
|
||||
use crate::rpc::router::region_distribution;
|
||||
use crate::table_name::TableName;
|
||||
use crate::test_util::{new_ddl_context, MockDatanodeHandler, MockDatanodeManager};
|
||||
|
||||
#[derive(Clone)]
|
||||
|
||||
@@ -23,7 +23,6 @@ use futures::future::join_all;
|
||||
use snafu::ensure;
|
||||
use store_api::storage::RegionId;
|
||||
use table::metadata::TableId;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use crate::cache_invalidator::Context;
|
||||
use crate::ddl::utils::add_peer_context_if_needed;
|
||||
@@ -33,6 +32,7 @@ use crate::instruction::CacheIdent;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
|
||||
use crate::table_name::TableName;
|
||||
|
||||
/// [Control] indicated to the caller whether to go to the next step.
|
||||
#[derive(Debug)]
|
||||
@@ -224,7 +224,6 @@ mod tests {
|
||||
use api::v1::{ColumnDataType, SemanticType};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use table::metadata::RawTableInfo;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use super::*;
|
||||
use crate::ddl::test_util::columns::TestColumnDefBuilder;
|
||||
@@ -232,6 +231,7 @@ mod tests {
|
||||
build_raw_table_info_from_expr, TestCreateTableExprBuilder,
|
||||
};
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::table_name::TableName;
|
||||
use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
||||
|
||||
fn test_create_raw_table_info(name: &str) -> RawTableInfo {
|
||||
|
||||
@@ -17,7 +17,7 @@ mod alter_table;
|
||||
mod create_flow;
|
||||
mod create_logical_tables;
|
||||
mod create_table;
|
||||
pub(crate) mod create_view;
|
||||
mod create_view;
|
||||
mod drop_database;
|
||||
mod drop_flow;
|
||||
mod drop_table;
|
||||
|
||||
@@ -19,7 +19,6 @@ use std::sync::Arc;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_procedure_test::execute_procedure_until_done;
|
||||
use session::context::QueryContext;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use crate::ddl::create_flow::CreateFlowProcedure;
|
||||
use crate::ddl::test_util::create_table::test_create_table_task;
|
||||
@@ -28,6 +27,7 @@ use crate::ddl::DdlContext;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::key::FlowId;
|
||||
use crate::rpc::ddl::CreateFlowTask;
|
||||
use crate::table_name::TableName;
|
||||
use crate::test_util::{new_ddl_context, MockFlownodeManager};
|
||||
use crate::{error, ClusterId};
|
||||
|
||||
|
||||
@@ -13,10 +13,9 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::{CreateViewExpr, TableName};
|
||||
use api::v1::CreateViewExpr;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_procedure::{Context as ProcedureContext, Procedure, ProcedureId, Status};
|
||||
@@ -32,35 +31,7 @@ use crate::error::Error;
|
||||
use crate::rpc::ddl::CreateViewTask;
|
||||
use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
||||
|
||||
fn test_table_names() -> HashSet<table::table_name::TableName> {
|
||||
let mut set = HashSet::new();
|
||||
set.insert(table::table_name::TableName {
|
||||
catalog_name: "greptime".to_string(),
|
||||
schema_name: "public".to_string(),
|
||||
table_name: "a_table".to_string(),
|
||||
});
|
||||
set.insert(table::table_name::TableName {
|
||||
catalog_name: "greptime".to_string(),
|
||||
schema_name: "public".to_string(),
|
||||
table_name: "b_table".to_string(),
|
||||
});
|
||||
set
|
||||
}
|
||||
|
||||
pub(crate) fn test_create_view_task(name: &str) -> CreateViewTask {
|
||||
let table_names = vec![
|
||||
TableName {
|
||||
catalog_name: "greptime".to_string(),
|
||||
schema_name: "public".to_string(),
|
||||
table_name: "a_table".to_string(),
|
||||
},
|
||||
TableName {
|
||||
catalog_name: "greptime".to_string(),
|
||||
schema_name: "public".to_string(),
|
||||
table_name: "b_table".to_string(),
|
||||
},
|
||||
];
|
||||
|
||||
fn test_create_view_task(name: &str) -> CreateViewTask {
|
||||
let expr = CreateViewExpr {
|
||||
catalog_name: "greptime".to_string(),
|
||||
schema_name: "public".to_string(),
|
||||
@@ -68,7 +39,6 @@ pub(crate) fn test_create_view_task(name: &str) -> CreateViewTask {
|
||||
or_replace: false,
|
||||
create_if_not_exists: false,
|
||||
logical_plan: vec![1, 2, 3],
|
||||
table_names,
|
||||
};
|
||||
|
||||
let view_info = RawTableInfo {
|
||||
@@ -100,11 +70,7 @@ async fn test_on_prepare_view_exists_err() {
|
||||
// Puts a value to table name key.
|
||||
ddl_context
|
||||
.table_metadata_manager
|
||||
.create_view_metadata(
|
||||
task.view_info.clone(),
|
||||
task.create_view.logical_plan.clone(),
|
||||
test_table_names(),
|
||||
)
|
||||
.create_view_metadata(task.view_info.clone(), &task.create_view.logical_plan)
|
||||
.await
|
||||
.unwrap();
|
||||
let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);
|
||||
@@ -124,11 +90,7 @@ async fn test_on_prepare_with_create_if_view_exists() {
|
||||
// Puts a value to table name key.
|
||||
ddl_context
|
||||
.table_metadata_manager
|
||||
.create_view_metadata(
|
||||
task.view_info.clone(),
|
||||
task.create_view.logical_plan.clone(),
|
||||
test_table_names(),
|
||||
)
|
||||
.create_view_metadata(task.view_info.clone(), &task.create_view.logical_plan)
|
||||
.await
|
||||
.unwrap();
|
||||
let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);
|
||||
|
||||
@@ -18,7 +18,6 @@ use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_procedure_test::execute_procedure_until_done;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use crate::ddl::drop_flow::DropFlowProcedure;
|
||||
use crate::ddl::test_util::create_table::test_create_table_task;
|
||||
@@ -27,6 +26,7 @@ use crate::ddl::tests::create_flow::create_test_flow;
|
||||
use crate::error;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::rpc::ddl::DropFlowTask;
|
||||
use crate::table_name::TableName;
|
||||
use crate::test_util::{new_ddl_context, MockFlownodeManager};
|
||||
|
||||
fn test_drop_flow_task(flow_name: &str, flow_id: u32, drop_if_exists: bool) -> DropFlowTask {
|
||||
|
||||
@@ -28,7 +28,6 @@ use snafu::{ensure, ResultExt};
|
||||
use store_api::storage::RegionId;
|
||||
use strum::AsRefStr;
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
use table::table_name::TableName;
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use super::utils::handle_retry_error;
|
||||
@@ -41,6 +40,7 @@ use crate::key::DeserializedValueWithBytes;
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
|
||||
use crate::rpc::ddl::TruncateTableTask;
|
||||
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
|
||||
use crate::table_name::TableName;
|
||||
use crate::{metrics, ClusterId};
|
||||
|
||||
pub struct TruncateTableProcedure {
|
||||
|
||||
@@ -489,7 +489,8 @@ async fn handle_create_table_task(
|
||||
|
||||
Ok(SubmitDdlTaskResponse {
|
||||
key: procedure_id.into(),
|
||||
table_ids: vec![table_id],
|
||||
table_id: Some(table_id),
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -533,6 +534,7 @@ async fn handle_create_logical_table_tasks(
|
||||
Ok(SubmitDdlTaskResponse {
|
||||
key: procedure_id.into(),
|
||||
table_ids,
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -688,7 +690,8 @@ async fn handle_create_view_task(
|
||||
|
||||
Ok(SubmitDdlTaskResponse {
|
||||
key: procedure_id.into(),
|
||||
table_ids: vec![view_id],
|
||||
table_id: Some(view_id),
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -20,11 +20,11 @@ use serde::{Deserialize, Serialize};
|
||||
use store_api::storage::{RegionId, RegionNumber};
|
||||
use strum::Display;
|
||||
use table::metadata::TableId;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use crate::flow_name::FlowName;
|
||||
use crate::key::schema_name::SchemaName;
|
||||
use crate::key::FlowId;
|
||||
use crate::table_name::TableName;
|
||||
use crate::{ClusterId, DatanodeId, FlownodeId};
|
||||
|
||||
#[derive(Eq, Hash, PartialEq, Clone, Debug, Serialize, Deserialize)]
|
||||
|
||||
@@ -89,6 +89,9 @@ pub mod flow;
|
||||
pub mod schema_name;
|
||||
pub mod table_info;
|
||||
pub mod table_name;
|
||||
// TODO(weny): removes it.
|
||||
#[allow(deprecated)]
|
||||
pub mod table_region;
|
||||
pub mod view_info;
|
||||
// TODO(weny): removes it.
|
||||
#[allow(deprecated)]
|
||||
@@ -116,7 +119,6 @@ use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::storage::RegionNumber;
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
use table::table_name::TableName;
|
||||
use table_info::{TableInfoKey, TableInfoManager, TableInfoValue};
|
||||
use table_name::{TableNameKey, TableNameManager, TableNameValue};
|
||||
use view_info::{ViewInfoKey, ViewInfoManager, ViewInfoValue};
|
||||
@@ -136,12 +138,14 @@ use crate::kv_backend::txn::{Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::router::{region_distribution, RegionRoute, RegionStatus};
|
||||
use crate::rpc::store::BatchDeleteRequest;
|
||||
use crate::table_name::TableName;
|
||||
use crate::DatanodeId;
|
||||
|
||||
pub const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.]*";
|
||||
pub const MAINTENANCE_KEY: &str = "maintenance";
|
||||
|
||||
const DATANODE_TABLE_KEY_PREFIX: &str = "__dn_table";
|
||||
const TABLE_REGION_KEY_PREFIX: &str = "__table_region";
|
||||
pub const TABLE_INFO_KEY_PREFIX: &str = "__table_info";
|
||||
pub const VIEW_INFO_KEY_PREFIX: &str = "__view_info";
|
||||
pub const TABLE_NAME_KEY_PREFIX: &str = "__table_name";
|
||||
@@ -486,8 +490,7 @@ impl TableMetadataManager {
|
||||
pub async fn create_view_metadata(
|
||||
&self,
|
||||
view_info: RawTableInfo,
|
||||
raw_logical_plan: Vec<u8>,
|
||||
table_names: HashSet<TableName>,
|
||||
raw_logical_plan: &Vec<u8>,
|
||||
) -> Result<()> {
|
||||
let view_id = view_info.ident.table_id;
|
||||
|
||||
@@ -509,7 +512,7 @@ impl TableMetadataManager {
|
||||
.build_create_txn(view_id, &table_info_value)?;
|
||||
|
||||
// Creates view info
|
||||
let view_info_value = ViewInfoValue::new(raw_logical_plan, table_names);
|
||||
let view_info_value = ViewInfoValue::new(raw_logical_plan);
|
||||
let (create_view_info_txn, on_create_view_info_failure) = self
|
||||
.view_info_manager()
|
||||
.build_create_txn(view_id, &view_info_value)?;
|
||||
@@ -801,33 +804,6 @@ impl TableMetadataManager {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn view_info_keys(&self, view_id: TableId, view_name: &TableName) -> Result<Vec<Vec<u8>>> {
|
||||
let mut keys = Vec::with_capacity(3);
|
||||
let view_name = TableNameKey::new(
|
||||
&view_name.catalog_name,
|
||||
&view_name.schema_name,
|
||||
&view_name.table_name,
|
||||
);
|
||||
let table_info_key = TableInfoKey::new(view_id);
|
||||
let view_info_key = ViewInfoKey::new(view_id);
|
||||
keys.push(view_name.to_bytes());
|
||||
keys.push(table_info_key.to_bytes());
|
||||
keys.push(view_info_key.to_bytes());
|
||||
|
||||
Ok(keys)
|
||||
}
|
||||
|
||||
/// Deletes metadata for view **permanently**.
|
||||
/// The caller MUST ensure it has the exclusive access to `ViewNameKey`.
|
||||
pub async fn destroy_view_info(&self, view_id: TableId, view_name: &TableName) -> Result<()> {
|
||||
let keys = self.view_info_keys(view_id, view_name)?;
|
||||
let _ = self
|
||||
.kv_backend
|
||||
.batch_delete(BatchDeleteRequest::new().with_keys(keys))
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Renames the table name and returns an error if different metadata exists.
|
||||
/// The caller MUST ensure it has the exclusive access to old and new `TableNameKey`s,
|
||||
/// and the new `TableNameKey` MUST be empty.
|
||||
@@ -927,9 +903,8 @@ impl TableMetadataManager {
|
||||
view_id: TableId,
|
||||
current_view_info_value: &DeserializedValueWithBytes<ViewInfoValue>,
|
||||
new_view_info: Vec<u8>,
|
||||
table_names: HashSet<TableName>,
|
||||
) -> Result<()> {
|
||||
let new_view_info_value = current_view_info_value.update(new_view_info, table_names);
|
||||
let new_view_info_value = current_view_info_value.update(new_view_info);
|
||||
|
||||
// Updates view info.
|
||||
let (update_view_info_txn, on_update_view_info_failure) = self
|
||||
@@ -1199,7 +1174,7 @@ impl_optional_meta_value! {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::sync::Arc;
|
||||
|
||||
use bytes::Bytes;
|
||||
@@ -1208,7 +1183,6 @@ mod tests {
|
||||
use futures::TryStreamExt;
|
||||
use store_api::storage::RegionId;
|
||||
use table::metadata::{RawTableInfo, TableInfo};
|
||||
use table::table_name::TableName;
|
||||
|
||||
use super::datanode_table::DatanodeTableKey;
|
||||
use super::test_utils;
|
||||
@@ -1223,6 +1197,7 @@ mod tests {
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::peer::Peer;
|
||||
use crate::rpc::router::{region_distribution, Region, RegionRoute, RegionStatus};
|
||||
use crate::table_name::TableName;
|
||||
|
||||
#[test]
|
||||
fn test_deserialized_value_with_bytes() {
|
||||
@@ -1275,21 +1250,6 @@ mod tests {
|
||||
test_utils::new_test_table_info(10, region_numbers)
|
||||
}
|
||||
|
||||
fn new_test_table_names() -> HashSet<TableName> {
|
||||
let mut set = HashSet::new();
|
||||
set.insert(TableName {
|
||||
catalog_name: "greptime".to_string(),
|
||||
schema_name: "public".to_string(),
|
||||
table_name: "a_table".to_string(),
|
||||
});
|
||||
set.insert(TableName {
|
||||
catalog_name: "greptime".to_string(),
|
||||
schema_name: "public".to_string(),
|
||||
table_name: "b_table".to_string(),
|
||||
});
|
||||
set
|
||||
}
|
||||
|
||||
async fn create_physical_table_metadata(
|
||||
table_metadata_manager: &TableMetadataManager,
|
||||
table_info: RawTableInfo,
|
||||
@@ -2001,11 +1961,9 @@ mod tests {
|
||||
|
||||
let logical_plan: Vec<u8> = vec![1, 2, 3];
|
||||
|
||||
let table_names = new_test_table_names();
|
||||
|
||||
// Create metadata
|
||||
table_metadata_manager
|
||||
.create_view_metadata(view_info.clone(), logical_plan.clone(), table_names.clone())
|
||||
.create_view_metadata(view_info.clone(), &logical_plan)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -2019,7 +1977,6 @@ mod tests {
|
||||
.unwrap()
|
||||
.into_inner();
|
||||
assert_eq!(current_view_info.view_info, logical_plan);
|
||||
assert_eq!(current_view_info.table_names, table_names);
|
||||
// assert table info
|
||||
let current_table_info = table_metadata_manager
|
||||
.table_info_manager()
|
||||
@@ -2032,43 +1989,16 @@ mod tests {
|
||||
}
|
||||
|
||||
let new_logical_plan: Vec<u8> = vec![4, 5, 6];
|
||||
let new_table_names = {
|
||||
let mut set = HashSet::new();
|
||||
set.insert(TableName {
|
||||
catalog_name: "greptime".to_string(),
|
||||
schema_name: "public".to_string(),
|
||||
table_name: "b_table".to_string(),
|
||||
});
|
||||
set.insert(TableName {
|
||||
catalog_name: "greptime".to_string(),
|
||||
schema_name: "public".to_string(),
|
||||
table_name: "c_table".to_string(),
|
||||
});
|
||||
set
|
||||
};
|
||||
|
||||
let current_view_info_value = DeserializedValueWithBytes::from_inner(ViewInfoValue::new(
|
||||
logical_plan.clone(),
|
||||
table_names,
|
||||
));
|
||||
let current_view_info_value =
|
||||
DeserializedValueWithBytes::from_inner(ViewInfoValue::new(&logical_plan));
|
||||
// should be ok.
|
||||
table_metadata_manager
|
||||
.update_view_info(
|
||||
view_id,
|
||||
¤t_view_info_value,
|
||||
new_logical_plan.clone(),
|
||||
new_table_names.clone(),
|
||||
)
|
||||
.update_view_info(view_id, ¤t_view_info_value, new_logical_plan.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
// if table info was updated, it should be ok.
|
||||
table_metadata_manager
|
||||
.update_view_info(
|
||||
view_id,
|
||||
¤t_view_info_value,
|
||||
new_logical_plan.clone(),
|
||||
new_table_names.clone(),
|
||||
)
|
||||
.update_view_info(view_id, ¤t_view_info_value, new_logical_plan.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -2081,21 +2011,14 @@ mod tests {
|
||||
.unwrap()
|
||||
.into_inner();
|
||||
assert_eq!(updated_view_info.view_info, new_logical_plan);
|
||||
assert_eq!(updated_view_info.table_names, new_table_names);
|
||||
|
||||
let wrong_view_info = logical_plan.clone();
|
||||
let wrong_view_info_value = DeserializedValueWithBytes::from_inner(
|
||||
current_view_info_value.update(wrong_view_info, new_table_names.clone()),
|
||||
);
|
||||
let wrong_view_info_value =
|
||||
DeserializedValueWithBytes::from_inner(current_view_info_value.update(wrong_view_info));
|
||||
// if the current_view_info_value is wrong, it should return an error.
|
||||
// The ABA problem.
|
||||
assert!(table_metadata_manager
|
||||
.update_view_info(
|
||||
view_id,
|
||||
&wrong_view_info_value,
|
||||
new_logical_plan.clone(),
|
||||
new_table_names.clone(),
|
||||
)
|
||||
.update_view_info(view_id, &wrong_view_info_value, new_logical_plan.clone())
|
||||
.await
|
||||
.is_err());
|
||||
|
||||
@@ -2108,6 +2031,5 @@ mod tests {
|
||||
.unwrap()
|
||||
.into_inner();
|
||||
assert_eq!(current_view_info.view_info, new_logical_plan);
|
||||
assert_eq!(current_view_info.table_names, new_table_names);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,8 +72,12 @@ impl DatanodeTableKey {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn prefix(datanode_id: DatanodeId) -> String {
|
||||
format!("{}/{datanode_id}/", DATANODE_TABLE_KEY_PREFIX)
|
||||
fn prefix(datanode_id: DatanodeId) -> String {
|
||||
format!("{}/{datanode_id}", DATANODE_TABLE_KEY_PREFIX)
|
||||
}
|
||||
|
||||
pub fn range_start_key(datanode_id: DatanodeId) -> String {
|
||||
format!("{}/", Self::prefix(datanode_id))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -110,7 +114,7 @@ impl<'a> MetaKey<'a, DatanodeTableKey> for DatanodeTableKey {
|
||||
|
||||
impl Display for DatanodeTableKey {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}{}", Self::prefix(self.datanode_id), self.table_id)
|
||||
write!(f, "{}/{}", Self::prefix(self.datanode_id), self.table_id)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -160,7 +164,7 @@ impl DatanodeTableManager {
|
||||
&self,
|
||||
datanode_id: DatanodeId,
|
||||
) -> BoxStream<'static, Result<DatanodeTableValue>> {
|
||||
let start_key = DatanodeTableKey::prefix(datanode_id);
|
||||
let start_key = DatanodeTableKey::range_start_key(datanode_id);
|
||||
let req = RangeRequest::new().with_prefix(start_key.as_bytes());
|
||||
|
||||
let stream = PaginationStream::new(
|
||||
|
||||
@@ -262,12 +262,12 @@ mod tests {
|
||||
|
||||
use futures::TryStreamExt;
|
||||
use table::metadata::TableId;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use super::*;
|
||||
use crate::key::flow::table_flow::TableFlowKey;
|
||||
use crate::key::FlowPartitionId;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::table_name::TableName;
|
||||
use crate::FlownodeId;
|
||||
|
||||
#[derive(Debug)]
|
||||
|
||||
@@ -20,7 +20,6 @@ use regex::Regex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::OptionExt;
|
||||
use table::metadata::TableId;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::flow::FlowScoped;
|
||||
@@ -28,6 +27,7 @@ use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::key::{DeserializedValueWithBytes, FlowId, FlowPartitionId, MetaKey, TableMetaValue};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::table_name::TableName;
|
||||
use crate::FlownodeId;
|
||||
|
||||
const FLOW_INFO_KEY_PREFIX: &str = "info";
|
||||
|
||||
@@ -69,7 +69,8 @@ impl FlownodeFlowKey {
|
||||
|
||||
/// The prefix used to retrieve all [FlownodeFlowKey]s with the specified `flownode_id`.
|
||||
pub fn range_start_key(flownode_id: FlownodeId) -> Vec<u8> {
|
||||
let inner = BytesAdapter::from(FlownodeFlowKeyInner::prefix(flownode_id).into_bytes());
|
||||
let inner =
|
||||
BytesAdapter::from(FlownodeFlowKeyInner::range_start_key(flownode_id).into_bytes());
|
||||
|
||||
FlowScoped::new(inner).to_bytes()
|
||||
}
|
||||
@@ -107,8 +108,13 @@ impl FlownodeFlowKeyInner {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn prefix(flownode_id: FlownodeId) -> String {
|
||||
format!("{}/{flownode_id}/", FLOWNODE_FLOW_KEY_PREFIX)
|
||||
fn prefix(flownode_id: FlownodeId) -> String {
|
||||
format!("{}/{flownode_id}", FLOWNODE_FLOW_KEY_PREFIX)
|
||||
}
|
||||
|
||||
/// The prefix used to retrieve all [FlownodeFlowKey]s with the specified `flownode_id`.
|
||||
fn range_start_key(flownode_id: FlownodeId) -> String {
|
||||
format!("{}/", Self::prefix(flownode_id))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -80,7 +80,7 @@ impl TableFlowKey {
|
||||
|
||||
/// The prefix used to retrieve all [TableFlowKey]s with the specified `table_id`.
|
||||
pub fn range_start_key(table_id: TableId) -> Vec<u8> {
|
||||
let inner = BytesAdapter::from(TableFlowKeyInner::prefix(table_id).into_bytes());
|
||||
let inner = BytesAdapter::from(TableFlowKeyInner::range_start_key(table_id).into_bytes());
|
||||
|
||||
FlowScoped::new(inner).to_bytes()
|
||||
}
|
||||
@@ -123,7 +123,12 @@ impl TableFlowKeyInner {
|
||||
}
|
||||
|
||||
fn prefix(table_id: TableId) -> String {
|
||||
format!("{}/{table_id}/", TABLE_FLOW_KEY_PREFIX)
|
||||
format!("{}/{table_id}", TABLE_FLOW_KEY_PREFIX)
|
||||
}
|
||||
|
||||
/// The prefix used to retrieve all [TableFlowKey]s with the specified `table_id`.
|
||||
fn range_start_key(table_id: TableId) -> String {
|
||||
format!("{}/", Self::prefix(table_id))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,7 +19,6 @@ use std::sync::Arc;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::OptionExt;
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
use table::table_name::TableName;
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use super::TABLE_INFO_KEY_PATTERN;
|
||||
@@ -29,6 +28,7 @@ use crate::key::{DeserializedValueWithBytes, MetaKey, TableMetaValue, TABLE_INFO
|
||||
use crate::kv_backend::txn::Txn;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::BatchGetRequest;
|
||||
use crate::table_name::TableName;
|
||||
|
||||
/// The key stores the metadata of the table.
|
||||
///
|
||||
|
||||
@@ -20,7 +20,6 @@ use futures_util::stream::BoxStream;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::OptionExt;
|
||||
use table::metadata::TableId;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use super::{MetaKey, TableMetaValue, TABLE_NAME_KEY_PATTERN, TABLE_NAME_KEY_PREFIX};
|
||||
use crate::error::{Error, InvalidTableMetadataSnafu, Result};
|
||||
@@ -30,6 +29,7 @@ use crate::kv_backend::KvBackendRef;
|
||||
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
|
||||
use crate::rpc::store::{BatchGetRequest, RangeRequest};
|
||||
use crate::rpc::KeyValue;
|
||||
use crate::table_name::TableName;
|
||||
|
||||
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
|
||||
pub struct TableNameKey<'a> {
|
||||
@@ -48,7 +48,7 @@ impl<'a> TableNameKey<'a> {
|
||||
}
|
||||
|
||||
pub fn prefix_to_table(catalog: &str, schema: &str) -> String {
|
||||
format!("{}/{}/{}/", TABLE_NAME_KEY_PREFIX, catalog, schema)
|
||||
format!("{}/{}/{}", TABLE_NAME_KEY_PREFIX, catalog, schema)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -56,7 +56,7 @@ impl Display for TableNameKey<'_> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{}{}",
|
||||
"{}/{}",
|
||||
Self::prefix_to_table(self.catalog, self.schema),
|
||||
self.table
|
||||
)
|
||||
@@ -268,11 +268,7 @@ impl TableNameManager {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use futures::StreamExt;
|
||||
|
||||
use super::*;
|
||||
use crate::kv_backend::KvBackend;
|
||||
use crate::rpc::store::PutRequest;
|
||||
|
||||
#[test]
|
||||
fn test_strip_table_name() {
|
||||
@@ -328,39 +324,4 @@ mod tests {
|
||||
assert_eq!(value.try_as_raw_value().unwrap(), literal);
|
||||
assert_eq!(TableNameValue::try_from_raw_value(literal).unwrap(), value);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_prefix_scan_tables() {
|
||||
let memory_kv = Arc::new(MemoryKvBackend::<crate::error::Error>::new());
|
||||
memory_kv
|
||||
.put(PutRequest {
|
||||
key: TableNameKey {
|
||||
catalog: "greptime",
|
||||
schema: "👉",
|
||||
table: "t",
|
||||
}
|
||||
.to_bytes(),
|
||||
value: vec![],
|
||||
prev_kv: false,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
memory_kv
|
||||
.put(PutRequest {
|
||||
key: TableNameKey {
|
||||
catalog: "greptime",
|
||||
schema: "👉👈",
|
||||
table: "t",
|
||||
}
|
||||
.to_bytes(),
|
||||
value: vec![],
|
||||
prev_kv: false,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let manager = TableNameManager::new(memory_kv);
|
||||
let items = manager.tables("greptime", "👉").collect::<Vec<_>>().await;
|
||||
assert_eq!(items.len(), 1);
|
||||
}
|
||||
}
|
||||
|
||||
130
src/common/meta/src/key/table_region.rs
Normal file
130
src/common/meta/src/key/table_region.rs
Normal file
@@ -0,0 +1,130 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
use std::fmt::Display;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::RegionNumber;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use super::{MetaKey, TABLE_REGION_KEY_PREFIX};
|
||||
use crate::error::{InvalidTableMetadataSnafu, Result, SerdeJsonSnafu};
|
||||
use crate::{impl_table_meta_value, DatanodeId};
|
||||
|
||||
pub type RegionDistribution = BTreeMap<DatanodeId, Vec<RegionNumber>>;
|
||||
|
||||
#[deprecated(
|
||||
since = "0.4.0",
|
||||
note = "Please use the TableRouteManager's get_region_distribution method instead"
|
||||
)]
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct TableRegionKey {
|
||||
table_id: TableId,
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref TABLE_REGION_KEY_PATTERN: Regex =
|
||||
Regex::new(&format!("^{TABLE_REGION_KEY_PREFIX}/([0-9]+)$")).unwrap();
|
||||
}
|
||||
|
||||
impl TableRegionKey {
|
||||
pub fn new(table_id: TableId) -> Self {
|
||||
Self { table_id }
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for TableRegionKey {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}/{}", TABLE_REGION_KEY_PREFIX, self.table_id)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetaKey<'a, TableRegionKey> for TableRegionKey {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.to_string().into_bytes()
|
||||
}
|
||||
|
||||
fn from_bytes(bytes: &'a [u8]) -> Result<TableRegionKey> {
|
||||
let key = std::str::from_utf8(bytes).map_err(|e| {
|
||||
InvalidTableMetadataSnafu {
|
||||
err_msg: format!(
|
||||
"TableRegionKey '{}' is not a valid UTF8 string: {e}",
|
||||
String::from_utf8_lossy(bytes)
|
||||
),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
let captures =
|
||||
TABLE_REGION_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidTableMetadataSnafu {
|
||||
err_msg: format!("Invalid TableRegionKey '{key}'"),
|
||||
})?;
|
||||
// Safety: pass the regex check above
|
||||
let table_id = captures[1].parse::<TableId>().unwrap();
|
||||
Ok(TableRegionKey { table_id })
|
||||
}
|
||||
}
|
||||
|
||||
#[deprecated(
|
||||
since = "0.4.0",
|
||||
note = "Please use the TableRouteManager's get_region_distribution method instead"
|
||||
)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub struct TableRegionValue {
|
||||
pub region_distribution: RegionDistribution,
|
||||
version: u64,
|
||||
}
|
||||
|
||||
impl TableRegionValue {
|
||||
pub fn new(region_distribution: RegionDistribution) -> Self {
|
||||
Self {
|
||||
region_distribution,
|
||||
version: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl_table_meta_value! {TableRegionValue}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::key::TableMetaValue;
|
||||
|
||||
#[test]
|
||||
fn test_serialization() {
|
||||
let key = TableRegionKey::new(24);
|
||||
let raw_key = key.to_bytes();
|
||||
assert_eq!(raw_key, b"__table_region/24");
|
||||
let deserialized = TableRegionKey::from_bytes(b"__table_region/24").unwrap();
|
||||
assert_eq!(key, deserialized);
|
||||
|
||||
let value = TableRegionValue {
|
||||
region_distribution: RegionDistribution::from([(1, vec![1, 2, 3]), (2, vec![4, 5, 6])]),
|
||||
version: 0,
|
||||
};
|
||||
let literal = br#"{"region_distribution":{"1":[1,2,3],"2":[4,5,6]},"version":0}"#;
|
||||
|
||||
assert_eq!(value.try_as_raw_value().unwrap(), literal);
|
||||
assert_eq!(
|
||||
TableRegionValue::try_from_raw_value(literal).unwrap(),
|
||||
value,
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -12,14 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Display;
|
||||
use std::sync::Arc;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::OptionExt;
|
||||
use table::metadata::TableId;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use super::VIEW_INFO_KEY_PATTERN;
|
||||
use crate::error::{InvalidViewInfoSnafu, Result};
|
||||
@@ -82,30 +80,21 @@ impl<'a> MetaKey<'a, ViewInfoKey> for ViewInfoKey {
|
||||
/// The VIEW info value that keeps the metadata.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub struct ViewInfoValue {
|
||||
/// The encoded logical plan
|
||||
pub view_info: RawViewLogicalPlan,
|
||||
/// The resolved fully table names in logical plan
|
||||
pub table_names: HashSet<TableName>,
|
||||
version: u64,
|
||||
}
|
||||
|
||||
impl ViewInfoValue {
|
||||
pub fn new(view_info: RawViewLogicalPlan, table_names: HashSet<TableName>) -> Self {
|
||||
pub fn new(view_info: &RawViewLogicalPlan) -> Self {
|
||||
Self {
|
||||
view_info,
|
||||
table_names,
|
||||
view_info: view_info.clone(),
|
||||
version: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn update(
|
||||
&self,
|
||||
new_view_info: RawViewLogicalPlan,
|
||||
table_names: HashSet<TableName>,
|
||||
) -> Self {
|
||||
pub(crate) fn update(&self, new_view_info: RawViewLogicalPlan) -> Self {
|
||||
Self {
|
||||
view_info: new_view_info,
|
||||
table_names,
|
||||
version: self.version + 1,
|
||||
}
|
||||
}
|
||||
@@ -116,8 +105,6 @@ pub struct ViewInfoManager {
|
||||
kv_backend: KvBackendRef,
|
||||
}
|
||||
|
||||
pub type ViewInfoManagerRef = Arc<ViewInfoManager>;
|
||||
|
||||
impl ViewInfoManager {
|
||||
pub fn new(kv_backend: KvBackendRef) -> Self {
|
||||
Self { kv_backend }
|
||||
@@ -267,25 +254,9 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_value_serialization() {
|
||||
let table_names = {
|
||||
let mut set = HashSet::new();
|
||||
set.insert(TableName {
|
||||
catalog_name: "greptime".to_string(),
|
||||
schema_name: "public".to_string(),
|
||||
table_name: "a_table".to_string(),
|
||||
});
|
||||
set.insert(TableName {
|
||||
catalog_name: "greptime".to_string(),
|
||||
schema_name: "public".to_string(),
|
||||
table_name: "b_table".to_string(),
|
||||
});
|
||||
set
|
||||
};
|
||||
|
||||
let value = ViewInfoValue {
|
||||
view_info: vec![1, 2, 3],
|
||||
version: 1,
|
||||
table_names,
|
||||
};
|
||||
let serialized = value.try_as_raw_value().unwrap();
|
||||
let deserialized = ViewInfoValue::try_from_raw_value(&serialized).unwrap();
|
||||
|
||||
@@ -40,6 +40,7 @@ pub mod region_keeper;
|
||||
pub mod rpc;
|
||||
pub mod sequence;
|
||||
pub mod state_store;
|
||||
pub mod table_name;
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub mod test_util;
|
||||
pub mod util;
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::collections::HashMap;
|
||||
use std::result;
|
||||
|
||||
use api::v1::meta::ddl_task_request::Task;
|
||||
@@ -39,11 +39,11 @@ use serde_with::{serde_as, DefaultOnNull};
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
use table::table_name::TableName;
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::FlowId;
|
||||
use crate::table_name::TableName;
|
||||
|
||||
/// DDL tasks
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -274,7 +274,10 @@ impl TryFrom<SubmitDdlTaskRequest> for PbDdlTaskRequest {
|
||||
#[derive(Debug, Default)]
|
||||
pub struct SubmitDdlTaskResponse {
|
||||
pub key: Vec<u8>,
|
||||
// `table_id`s for `CREATE TABLE` or `CREATE LOGICAL TABLES` task.
|
||||
// For create physical table
|
||||
// TODO(jeremy): remove it?
|
||||
pub table_id: Option<TableId>,
|
||||
// For create multi logical tables
|
||||
pub table_ids: Vec<TableId>,
|
||||
}
|
||||
|
||||
@@ -282,9 +285,11 @@ impl TryFrom<PbDdlTaskResponse> for SubmitDdlTaskResponse {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(resp: PbDdlTaskResponse) -> Result<Self> {
|
||||
let table_id = resp.table_id.map(|t| t.id);
|
||||
let table_ids = resp.table_ids.into_iter().map(|t| t.id).collect();
|
||||
Ok(Self {
|
||||
key: resp.pid.map(|pid| pid.key).unwrap_or_default(),
|
||||
table_id,
|
||||
table_ids,
|
||||
})
|
||||
}
|
||||
@@ -294,6 +299,9 @@ impl From<SubmitDdlTaskResponse> for PbDdlTaskResponse {
|
||||
fn from(val: SubmitDdlTaskResponse) -> Self {
|
||||
Self {
|
||||
pid: Some(ProcedureId { key: val.key }),
|
||||
table_id: val
|
||||
.table_id
|
||||
.map(|table_id| api::v1::TableId { id: table_id }),
|
||||
table_ids: val
|
||||
.table_ids
|
||||
.into_iter()
|
||||
@@ -324,14 +332,6 @@ impl CreateViewTask {
|
||||
pub fn raw_logical_plan(&self) -> &Vec<u8> {
|
||||
&self.create_view.logical_plan
|
||||
}
|
||||
|
||||
pub fn table_names(&self) -> HashSet<TableName> {
|
||||
self.create_view
|
||||
.table_names
|
||||
.iter()
|
||||
.map(|t| t.clone().into())
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<PbCreateViewTask> for CreateViewTask {
|
||||
|
||||
@@ -25,11 +25,11 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
use snafu::OptionExt;
|
||||
use store_api::storage::{RegionId, RegionNumber};
|
||||
use strum::AsRefStr;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::RegionDistribution;
|
||||
use crate::peer::Peer;
|
||||
use crate::table_name::TableName;
|
||||
use crate::DatanodeId;
|
||||
|
||||
pub fn region_distribution(region_routes: &[RegionRoute]) -> RegionDistribution {
|
||||
|
||||
@@ -16,8 +16,7 @@ use std::fmt::{Display, Formatter};
|
||||
|
||||
use api::v1::TableName as PbTableName;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::table_reference::TableReference;
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
#[derive(Debug, Clone, Hash, Eq, PartialEq, Deserialize, Serialize)]
|
||||
pub struct TableName {
|
||||
@@ -4,16 +4,12 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
testing = []
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
async-trait.workspace = true
|
||||
bytes.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
|
||||
@@ -206,13 +206,6 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to decode logical plan: {source}"))]
|
||||
DecodePlan {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to do table mutation"))]
|
||||
TableMutation {
|
||||
source: BoxedError,
|
||||
@@ -289,12 +282,11 @@ impl ErrorExt for Error {
|
||||
| Error::InvalidFuncArgs { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::ConvertDfRecordBatchStream { source, .. } => source.status_code(),
|
||||
|
||||
Error::DecodePlan { source, .. }
|
||||
| Error::Execute { source, .. }
|
||||
| Error::ExecutePhysicalPlan { source, .. }
|
||||
| Error::ProcedureService { source, .. }
|
||||
| Error::TableMutation { source, .. } => source.status_code(),
|
||||
Error::ExecutePhysicalPlan { source, .. } => source.status_code(),
|
||||
Error::Execute { source, .. } => source.status_code(),
|
||||
Error::ProcedureService { source, .. } | Error::TableMutation { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::PermissionDenied { .. } => StatusCode::PermissionDenied,
|
||||
}
|
||||
|
||||
@@ -18,8 +18,7 @@ mod function;
|
||||
pub mod logical_plan;
|
||||
pub mod prelude;
|
||||
mod signature;
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub mod test_util;
|
||||
|
||||
use std::fmt::{Debug, Display, Formatter};
|
||||
use std::sync::Arc;
|
||||
|
||||
|
||||
@@ -19,15 +19,12 @@ mod udf;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use datafusion::catalog::CatalogProviderList;
|
||||
use datafusion::logical_expr::LogicalPlan;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
pub use expr::build_filter_from_timestamp;
|
||||
|
||||
pub use self::accumulator::{Accumulator, AggregateFunctionCreator, AggregateFunctionCreatorRef};
|
||||
pub use self::udaf::AggregateFunction;
|
||||
pub use self::udf::ScalarUdf;
|
||||
use crate::error::Result;
|
||||
use crate::function::{ReturnTypeFunction, ScalarFunctionImplementation};
|
||||
use crate::logical_plan::accumulator::*;
|
||||
use crate::signature::{Signature, Volatility};
|
||||
@@ -71,25 +68,6 @@ pub fn create_aggregate_function(
|
||||
)
|
||||
}
|
||||
|
||||
/// The datafusion `[LogicalPlan]` decoder.
|
||||
#[async_trait::async_trait]
|
||||
pub trait SubstraitPlanDecoder {
|
||||
/// Decode the [`LogicalPlan`] from bytes with the [`CatalogProviderList`].
|
||||
/// When `optimize` is true, it will do the optimization for decoded plan.
|
||||
///
|
||||
/// TODO(dennis): It's not a good design for an API to do many things.
|
||||
/// The `optimize` was introduced because of `query` and `catalog` cyclic dependency issue
|
||||
/// I am happy to refactor it if we have a better solution.
|
||||
async fn decode(
|
||||
&self,
|
||||
message: bytes::Bytes,
|
||||
catalog_list: Arc<dyn CatalogProviderList>,
|
||||
optimize: bool,
|
||||
) -> Result<LogicalPlan>;
|
||||
}
|
||||
|
||||
pub type SubstraitPlanDecoderRef = Arc<dyn SubstraitPlanDecoder + Send + Sync>;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -10,15 +10,19 @@ workspace = true
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
bytes.workspace = true
|
||||
catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
common-function.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
datafusion.workspace = true
|
||||
datafusion-common.workspace = true
|
||||
datafusion-expr.workspace = true
|
||||
datafusion-substrait.workspace = true
|
||||
datatypes.workspace = true
|
||||
promql.workspace = true
|
||||
prost.workspace = true
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
|
||||
[dependencies.substrait_proto]
|
||||
|
||||
@@ -16,19 +16,26 @@ use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use bytes::{Buf, Bytes, BytesMut};
|
||||
use common_function::function_registry::FUNCTION_REGISTRY;
|
||||
use common_function::scalars::udf::create_udf;
|
||||
use datafusion::catalog::CatalogProviderList;
|
||||
use datafusion::execution::context::SessionState;
|
||||
use datafusion::execution::runtime_env::RuntimeEnv;
|
||||
use datafusion::execution::FunctionRegistry;
|
||||
use datafusion::prelude::{SessionConfig, SessionContext};
|
||||
use datafusion_expr::LogicalPlan;
|
||||
use datafusion_substrait::logical_plan::consumer::from_substrait_plan;
|
||||
use datafusion_substrait::logical_plan::producer::to_substrait_plan;
|
||||
use datafusion_substrait::substrait::proto::Plan;
|
||||
use prost::Message;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{DecodeDfPlanSnafu, DecodeRelSnafu, EncodeDfPlanSnafu, EncodeRelSnafu, Error};
|
||||
use crate::{SerializerRegistry, SubstraitPlan};
|
||||
use crate::error::{
|
||||
DFInternalSnafu, DecodeDfPlanSnafu, DecodeRelSnafu, EncodeDfPlanSnafu, EncodeRelSnafu, Error,
|
||||
};
|
||||
use crate::extension_serializer::ExtensionSerializer;
|
||||
use crate::SubstraitPlan;
|
||||
|
||||
pub struct DFLogicalSubstraitConvertor;
|
||||
|
||||
@@ -42,8 +49,15 @@ impl SubstraitPlan for DFLogicalSubstraitConvertor {
|
||||
&self,
|
||||
message: B,
|
||||
catalog_list: Arc<dyn CatalogProviderList>,
|
||||
state: SessionState,
|
||||
mut state: SessionState,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<Self::Plan, Self::Error> {
|
||||
// substrait decoder will look up the UDFs in SessionState, so we need to register them
|
||||
for func in FUNCTION_REGISTRY.functions() {
|
||||
let udf = Arc::new(create_udf(func, query_ctx.clone(), Default::default()).into());
|
||||
state.register_udf(udf).context(DFInternalSnafu)?;
|
||||
}
|
||||
|
||||
let mut context = SessionContext::new_with_state(state);
|
||||
context.register_catalog_list(catalog_list);
|
||||
let plan = Plan::decode(message).context(DecodeRelSnafu)?;
|
||||
@@ -53,13 +67,10 @@ impl SubstraitPlan for DFLogicalSubstraitConvertor {
|
||||
Ok(df_plan)
|
||||
}
|
||||
|
||||
fn encode(
|
||||
&self,
|
||||
plan: &Self::Plan,
|
||||
serializer: impl SerializerRegistry + 'static,
|
||||
) -> Result<Bytes, Self::Error> {
|
||||
fn encode(&self, plan: &Self::Plan) -> Result<Bytes, Self::Error> {
|
||||
let mut buf = BytesMut::new();
|
||||
let substrait_plan = self.to_sub_plan(plan, serializer)?;
|
||||
|
||||
let substrait_plan = self.to_sub_plan(plan)?;
|
||||
substrait_plan.encode(&mut buf).context(EncodeRelSnafu)?;
|
||||
|
||||
Ok(buf.freeze())
|
||||
@@ -67,14 +78,10 @@ impl SubstraitPlan for DFLogicalSubstraitConvertor {
|
||||
}
|
||||
|
||||
impl DFLogicalSubstraitConvertor {
|
||||
pub fn to_sub_plan(
|
||||
&self,
|
||||
plan: &LogicalPlan,
|
||||
serializer: impl SerializerRegistry + 'static,
|
||||
) -> Result<Box<Plan>, Error> {
|
||||
pub fn to_sub_plan(&self, plan: &LogicalPlan) -> Result<Box<Plan>, Error> {
|
||||
let session_state =
|
||||
SessionState::new_with_config_rt(SessionConfig::new(), Arc::new(RuntimeEnv::default()))
|
||||
.with_serializer_registry(Arc::new(serializer));
|
||||
.with_serializer_registry(Arc::new(ExtensionSerializer));
|
||||
let context = SessionContext::new_with_state(session_state);
|
||||
|
||||
to_substrait_plan(plan, &context).context(EncodeDfPlanSnafu)
|
||||
|
||||
@@ -18,6 +18,7 @@ use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use datafusion::error::DataFusionError;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use prost::{DecodeError, EncodeError};
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
@@ -25,6 +26,34 @@ use snafu::{Location, Snafu};
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Unsupported physical plan: {}", name))]
|
||||
UnsupportedPlan {
|
||||
name: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Unsupported expr: {}", name))]
|
||||
UnsupportedExpr {
|
||||
name: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Unsupported concrete type: {:?}", ty))]
|
||||
UnsupportedConcreteType {
|
||||
ty: ConcreteDataType,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Unsupported substrait type: {}", ty))]
|
||||
UnsupportedSubstraitType {
|
||||
ty: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to decode substrait relation"))]
|
||||
DecodeRel {
|
||||
#[snafu(source)]
|
||||
@@ -41,6 +70,33 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Input plan is empty"))]
|
||||
EmptyPlan {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Input expression is empty"))]
|
||||
EmptyExpr {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing required field in protobuf, field: {}, plan: {}", field, plan))]
|
||||
MissingField {
|
||||
field: String,
|
||||
plan: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid parameters: {}", reason))]
|
||||
InvalidParameters {
|
||||
reason: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Internal error from DataFusion"))]
|
||||
DFInternal {
|
||||
#[snafu(source)]
|
||||
@@ -62,6 +118,35 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Schema from Substrait proto doesn't match with the schema in storage.
|
||||
Substrait schema: {:?}
|
||||
Storage schema: {:?}",
|
||||
substrait_schema,
|
||||
storage_schema
|
||||
))]
|
||||
SchemaNotMatch {
|
||||
substrait_schema: datafusion::arrow::datatypes::SchemaRef,
|
||||
storage_schema: datafusion::arrow::datatypes::SchemaRef,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to convert DataFusion schema"))]
|
||||
ConvertDfSchema {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Unable to resolve table: {table_name}, error: "))]
|
||||
ResolveTable {
|
||||
table_name: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: catalog::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to encode DataFusion plan"))]
|
||||
EncodeDfPlan {
|
||||
#[snafu(source)]
|
||||
@@ -84,13 +169,24 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::UnknownPlan { .. } | Error::EncodeRel { .. } | Error::DecodeRel { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::UnsupportedConcreteType { .. }
|
||||
| Error::UnsupportedPlan { .. }
|
||||
| Error::UnsupportedExpr { .. }
|
||||
| Error::UnsupportedSubstraitType { .. } => StatusCode::Unsupported,
|
||||
Error::UnknownPlan { .. }
|
||||
| Error::EncodeRel { .. }
|
||||
| Error::DecodeRel { .. }
|
||||
| Error::EmptyPlan { .. }
|
||||
| Error::EmptyExpr { .. }
|
||||
| Error::MissingField { .. }
|
||||
| Error::InvalidParameters { .. }
|
||||
| Error::SchemaNotMatch { .. } => StatusCode::InvalidArguments,
|
||||
Error::DFInternal { .. }
|
||||
| Error::Internal { .. }
|
||||
| Error::EncodeDfPlan { .. }
|
||||
| Error::DecodeDfPlan { .. } => StatusCode::Internal,
|
||||
Error::ConvertDfSchema { source, .. } => source.status_code(),
|
||||
Error::ResolveTable { source, .. } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -67,6 +67,7 @@ impl SerializerRegistry for ExtensionSerializer {
|
||||
name if name == EmptyMetric::name() => Err(DataFusionError::Substrait(
|
||||
"EmptyMetric should not be serialized".to_string(),
|
||||
)),
|
||||
"MergeScan" => Ok(vec![]),
|
||||
other => Err(DataFusionError::NotImplemented(format!(
|
||||
"Serizlize logical plan for {}",
|
||||
other
|
||||
|
||||
@@ -23,11 +23,11 @@ use async_trait::async_trait;
|
||||
use bytes::{Buf, Bytes};
|
||||
use datafusion::catalog::CatalogProviderList;
|
||||
use datafusion::execution::context::SessionState;
|
||||
pub use datafusion::execution::registry::SerializerRegistry;
|
||||
/// Re-export the Substrait module of datafusion,
|
||||
/// note this is a different version of the `substrait_proto` crate
|
||||
pub use datafusion_substrait::substrait as substrait_proto_df;
|
||||
pub use datafusion_substrait::{logical_plan as df_logical_plan, variation_const};
|
||||
use session::context::QueryContextRef;
|
||||
pub use substrait_proto;
|
||||
|
||||
pub use crate::df_substrait::DFLogicalSubstraitConvertor;
|
||||
@@ -42,11 +42,8 @@ pub trait SubstraitPlan {
|
||||
message: B,
|
||||
catalog_list: Arc<dyn CatalogProviderList>,
|
||||
state: SessionState,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<Self::Plan, Self::Error>;
|
||||
|
||||
fn encode(
|
||||
&self,
|
||||
plan: &Self::Plan,
|
||||
serializer: impl SerializerRegistry + 'static,
|
||||
) -> Result<Bytes, Self::Error>;
|
||||
fn encode(&self, plan: &Self::Plan) -> Result<Bytes, Self::Error>;
|
||||
}
|
||||
|
||||
@@ -94,7 +94,7 @@ pub fn init_default_ut_logging() {
|
||||
env::var("UNITTEST_LOG_DIR").unwrap_or_else(|_| "/tmp/__unittest_logs".to_string());
|
||||
|
||||
let level = env::var("UNITTEST_LOG_LEVEL").unwrap_or_else(|_|
|
||||
"debug,hyper=warn,tower=warn,datafusion=warn,reqwest=warn,sqlparser=warn,h2=info,opendal=info,rskafka=info".to_string()
|
||||
"debug,hyper=warn,tower=warn,datafusion=warn,reqwest=warn,sqlparser=warn,h2=info,opendal=info".to_string()
|
||||
);
|
||||
let opts = LoggingOptions {
|
||||
dir: dir.clone(),
|
||||
|
||||
@@ -57,6 +57,7 @@ servers.workspace = true
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
store-api.workspace = true
|
||||
substrait.workspace = true
|
||||
table.workspace = true
|
||||
tokio.workspace = true
|
||||
toml.workspace = true
|
||||
|
||||
@@ -64,18 +64,11 @@ pub enum Error {
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create plan decoder"))]
|
||||
NewPlanDecoder {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to decode logical plan"))]
|
||||
DecodeLogicalPlan {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_query::error::Error,
|
||||
source: substrait::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Incorrect internal state: {}", state))]
|
||||
@@ -395,9 +388,7 @@ impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
use Error::*;
|
||||
match self {
|
||||
NewPlanDecoder { source, .. } | ExecuteLogicalPlan { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
ExecuteLogicalPlan { source, .. } => source.status_code(),
|
||||
|
||||
BuildRegionRequests { source, .. } => source.status_code(),
|
||||
HandleHeartbeatResponse { source, .. } | GetMetadata { source, .. } => {
|
||||
|
||||
@@ -41,13 +41,19 @@ pub struct RegionServerEventSender(pub(crate) UnboundedSender<RegionServerEvent>
|
||||
impl RegionServerEventListener for RegionServerEventSender {
|
||||
fn on_region_registered(&self, region_id: RegionId) {
|
||||
if let Err(e) = self.0.send(RegionServerEvent::Registered(region_id)) {
|
||||
error!(e; "Failed to send registering region: {region_id} event");
|
||||
error!(
|
||||
"Failed to send registering region: {region_id} event, source: {}",
|
||||
e
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn on_region_deregistered(&self, region_id: RegionId) {
|
||||
if let Err(e) = self.0.send(RegionServerEvent::Deregistered(region_id)) {
|
||||
error!(e; "Failed to send deregistering region: {region_id} event");
|
||||
error!(
|
||||
"Failed to send deregistering region: {region_id} event, source: {}",
|
||||
e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -213,7 +213,6 @@ impl HeartbeatTask {
|
||||
let epoch = self.region_alive_keeper.epoch();
|
||||
|
||||
self.region_alive_keeper.start(Some(event_receiver)).await?;
|
||||
let mut last_sent = Instant::now();
|
||||
|
||||
common_runtime::spawn_bg(async move {
|
||||
let sleep = tokio::time::sleep(Duration::from_millis(0));
|
||||
@@ -272,10 +271,6 @@ impl HeartbeatTask {
|
||||
}
|
||||
};
|
||||
if let Some(req) = req {
|
||||
metrics::LAST_SENT_HEARTBEAT_ELAPSED
|
||||
.set(last_sent.elapsed().as_millis() as i64);
|
||||
// Resets the timer.
|
||||
last_sent = Instant::now();
|
||||
debug!("Sending heartbeat request: {:?}", req);
|
||||
if let Err(e) = tx.send(req).await {
|
||||
error!(e; "Failed to send heartbeat to metasrv");
|
||||
|
||||
@@ -35,12 +35,6 @@ lazy_static! {
|
||||
"last received heartbeat lease elapsed",
|
||||
)
|
||||
.unwrap();
|
||||
/// The elapsed time since the last sent heartbeat.
|
||||
pub static ref LAST_SENT_HEARTBEAT_ELAPSED: IntGauge = register_int_gauge!(
|
||||
"greptime_last_sent_heartbeat_lease_elapsed",
|
||||
"last sent heartbeat lease elapsed",
|
||||
)
|
||||
.unwrap();
|
||||
pub static ref LEASE_EXPIRED_REGION: IntGaugeVec = register_int_gauge_vec!(
|
||||
"greptime_lease_expired_region",
|
||||
"lease expired region",
|
||||
|
||||
@@ -51,13 +51,13 @@ use store_api::metric_engine_consts::{
|
||||
use store_api::region_engine::{RegionEngineRef, RegionRole, SetReadonlyResponse};
|
||||
use store_api::region_request::{AffectedRows, RegionCloseRequest, RegionRequest};
|
||||
use store_api::storage::RegionId;
|
||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||
use tonic::{Request, Response, Result as TonicResult};
|
||||
|
||||
use crate::error::{
|
||||
self, BuildRegionRequestsSnafu, DecodeLogicalPlanSnafu, ExecuteLogicalPlanSnafu,
|
||||
FindLogicalRegionsSnafu, HandleRegionRequestSnafu, NewPlanDecoderSnafu,
|
||||
RegionEngineNotFoundSnafu, RegionNotFoundSnafu, Result, StopRegionEngineSnafu, UnexpectedSnafu,
|
||||
UnsupportedOutputSnafu,
|
||||
FindLogicalRegionsSnafu, HandleRegionRequestSnafu, RegionEngineNotFoundSnafu,
|
||||
RegionNotFoundSnafu, Result, StopRegionEngineSnafu, UnexpectedSnafu, UnsupportedOutputSnafu,
|
||||
};
|
||||
use crate::event_listener::RegionServerEventListenerRef;
|
||||
|
||||
@@ -189,7 +189,7 @@ impl RegionServer {
|
||||
|
||||
pub async fn region_disk_usage(&self, region_id: RegionId) -> Option<i64> {
|
||||
match self.inner.region_map.get(®ion_id) {
|
||||
Some(e) => e.region_disk_usage(region_id),
|
||||
Some(e) => e.region_disk_usage(region_id).await,
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
@@ -409,7 +409,9 @@ impl RegionServerInner {
|
||||
let engine = match region_change {
|
||||
RegionChange::Register(attribute) => match current_region_status {
|
||||
Some(status) => match status.clone() {
|
||||
RegionEngineWithStatus::Registering(engine) => engine,
|
||||
RegionEngineWithStatus::Registering(_) => {
|
||||
return Ok(CurrentEngine::EarlyReturn(0))
|
||||
}
|
||||
RegionEngineWithStatus::Deregistering(_) => {
|
||||
return error::RegionBusySnafu { region_id }.fail()
|
||||
}
|
||||
@@ -653,13 +655,14 @@ impl RegionServerInner {
|
||||
|
||||
let catalog_list = Arc::new(DummyCatalogList::with_table_provider(table_provider));
|
||||
let query_engine_ctx = self.query_engine.engine_context(ctx.clone());
|
||||
let plan_decoder = query_engine_ctx
|
||||
.new_plan_decoder()
|
||||
.context(NewPlanDecoderSnafu)?;
|
||||
|
||||
// decode substrait plan to logical plan and execute it
|
||||
let logical_plan = plan_decoder
|
||||
.decode(Bytes::from(plan), catalog_list, false)
|
||||
let logical_plan = DFLogicalSubstraitConvertor
|
||||
.decode(
|
||||
Bytes::from(plan),
|
||||
catalog_list,
|
||||
query_engine_ctx.state().clone(),
|
||||
ctx.clone(),
|
||||
)
|
||||
.await
|
||||
.context(DecodeLogicalPlanSnafu)?;
|
||||
|
||||
@@ -778,32 +781,34 @@ mod tests {
|
||||
let mut mock_region_server = mock_region_server();
|
||||
let (engine, _receiver) = MockRegionEngine::new(MITO_ENGINE_NAME);
|
||||
let engine_name = engine.name();
|
||||
|
||||
mock_region_server.register_engine(engine.clone());
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
let builder = CreateRequestBuilder::new();
|
||||
let create_req = builder.build();
|
||||
|
||||
// Tries to create/open a registering region.
|
||||
mock_region_server.inner.region_map.insert(
|
||||
region_id,
|
||||
RegionEngineWithStatus::Registering(engine.clone()),
|
||||
);
|
||||
|
||||
let response = mock_region_server
|
||||
.handle_request(region_id, RegionRequest::Create(create_req))
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(response.affected_rows, 0);
|
||||
|
||||
let status = mock_region_server
|
||||
.inner
|
||||
.region_map
|
||||
.get(®ion_id)
|
||||
.unwrap()
|
||||
.clone();
|
||||
assert!(matches!(status, RegionEngineWithStatus::Ready(_)));
|
||||
|
||||
mock_region_server.inner.region_map.insert(
|
||||
region_id,
|
||||
RegionEngineWithStatus::Registering(engine.clone()),
|
||||
);
|
||||
assert!(matches!(status, RegionEngineWithStatus::Registering(_)));
|
||||
|
||||
let response = mock_region_server
|
||||
.handle_request(
|
||||
region_id,
|
||||
@@ -817,13 +822,14 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(response.affected_rows, 0);
|
||||
|
||||
let status = mock_region_server
|
||||
.inner
|
||||
.region_map
|
||||
.get(®ion_id)
|
||||
.unwrap()
|
||||
.clone();
|
||||
assert!(matches!(status, RegionEngineWithStatus::Ready(_)));
|
||||
assert!(matches!(status, RegionEngineWithStatus::Registering(_)));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -1014,7 +1020,7 @@ mod tests {
|
||||
region_change: RegionChange::Register(RegionAttribute::Mito),
|
||||
assert: Box::new(|result| {
|
||||
let current_engine = result.unwrap();
|
||||
assert_matches!(current_engine, CurrentEngine::Engine(_));
|
||||
assert_matches!(current_engine, CurrentEngine::EarlyReturn(_));
|
||||
}),
|
||||
},
|
||||
CurrentEngineTest {
|
||||
|
||||
@@ -200,7 +200,7 @@ impl RegionEngine for MockRegionEngine {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn region_disk_usage(&self, _region_id: RegionId) -> Option<i64> {
|
||||
async fn region_disk_usage(&self, _region_id: RegionId) -> Option<i64> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
|
||||
@@ -107,7 +107,7 @@ impl RegionEngine for FileRegionEngine {
|
||||
self.inner.stop().await.map_err(BoxedError::new)
|
||||
}
|
||||
|
||||
fn region_disk_usage(&self, _: RegionId) -> Option<i64> {
|
||||
async fn region_disk_usage(&self, _: RegionId) -> Option<i64> {
|
||||
None
|
||||
}
|
||||
|
||||
@@ -229,9 +229,8 @@ impl EngineInner {
|
||||
let res = FileRegion::create(region_id, request, &self.object_store).await;
|
||||
let region = res.inspect_err(|err| {
|
||||
error!(
|
||||
err;
|
||||
"Failed to create region, region_id: {}",
|
||||
region_id
|
||||
"Failed to create region, region_id: {}, err: {}",
|
||||
region_id, err
|
||||
);
|
||||
})?;
|
||||
self.regions.write().unwrap().insert(region_id, region);
|
||||
@@ -260,9 +259,8 @@ impl EngineInner {
|
||||
let res = FileRegion::open(region_id, request, &self.object_store).await;
|
||||
let region = res.inspect_err(|err| {
|
||||
error!(
|
||||
err;
|
||||
"Failed to open region, region_id: {}",
|
||||
region_id
|
||||
"Failed to open region, region_id: {}, err: {}",
|
||||
region_id, err
|
||||
);
|
||||
})?;
|
||||
self.regions.write().unwrap().insert(region_id, region);
|
||||
@@ -304,9 +302,8 @@ impl EngineInner {
|
||||
let res = FileRegion::drop(®ion, &self.object_store).await;
|
||||
res.inspect_err(|err| {
|
||||
error!(
|
||||
err;
|
||||
"Failed to drop region, region_id: {}",
|
||||
region_id
|
||||
"Failed to drop region, region_id: {}, err: {}",
|
||||
region_id, err
|
||||
);
|
||||
})?;
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ use common_base::Plugins;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_frontend::handler::FrontendInvoker;
|
||||
use common_meta::key::TableMetadataManagerRef;
|
||||
use common_query::prelude::GREPTIME_TIMESTAMP;
|
||||
use common_runtime::JoinHandle;
|
||||
use common_telemetry::{debug, info};
|
||||
use datatypes::schema::ColumnSchema;
|
||||
@@ -35,12 +36,12 @@ use itertools::Itertools;
|
||||
use query::{QueryEngine, QueryEngineFactory};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use session::context::QueryContext;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ConcreteDataType, RegionId};
|
||||
use table::metadata::TableId;
|
||||
use tokio::sync::{oneshot, watch, Mutex, RwLock};
|
||||
|
||||
use crate::adapter::error::{ExternalSnafu, InternalSnafu, TableNotFoundSnafu, UnexpectedSnafu};
|
||||
use crate::adapter::error::{ExternalSnafu, TableNotFoundSnafu, UnexpectedSnafu};
|
||||
pub(crate) use crate::adapter::node_context::FlownodeContext;
|
||||
use crate::adapter::table_source::TableSource;
|
||||
use crate::adapter::util::column_schemas_to_proto;
|
||||
@@ -66,11 +67,6 @@ use error::Error;
|
||||
|
||||
pub const PER_REQ_MAX_ROW_CNT: usize = 8192;
|
||||
|
||||
// TODO: replace this with `GREPTIME_TIMESTAMP` before v0.9
|
||||
pub const AUTO_CREATED_PLACEHOLDER_TS_COL: &str = "__ts_placeholder";
|
||||
|
||||
pub const UPDATE_AT_TS_COL: &str = "update_at";
|
||||
|
||||
// TODO: refactor common types for flow to a separate module
|
||||
/// FlowId is a unique identifier for a flow task
|
||||
pub type FlowId = u64;
|
||||
@@ -159,7 +155,7 @@ pub struct FlownodeManager {
|
||||
table_info_source: TableSource,
|
||||
frontend_invoker: RwLock<Option<Box<dyn FrontendInvoker + Send + Sync>>>,
|
||||
/// contains mapping from table name to global id, and table schema
|
||||
node_context: RwLock<FlownodeContext>,
|
||||
node_context: Mutex<FlownodeContext>,
|
||||
flow_err_collectors: RwLock<BTreeMap<FlowId, ErrCollector>>,
|
||||
src_send_buf_lens: RwLock<BTreeMap<TableId, watch::Receiver<usize>>>,
|
||||
tick_manager: FlowTickManager,
|
||||
@@ -194,7 +190,7 @@ impl FlownodeManager {
|
||||
query_engine,
|
||||
table_info_source: srv_map,
|
||||
frontend_invoker: RwLock::new(None),
|
||||
node_context: RwLock::new(node_context),
|
||||
node_context: Mutex::new(node_context),
|
||||
flow_err_collectors: Default::default(),
|
||||
src_send_buf_lens: Default::default(),
|
||||
tick_manager,
|
||||
@@ -284,21 +280,15 @@ impl FlownodeManager {
|
||||
.map(|i| meta.schema.column_schemas[i].name.clone())
|
||||
.collect_vec();
|
||||
let schema = meta.schema.column_schemas;
|
||||
// check if the last column is the auto created timestamp column, hence the table is auto created from
|
||||
// flow's plan type
|
||||
let is_auto_create = {
|
||||
let correct_name = schema
|
||||
.last()
|
||||
.map(|s| s.name == AUTO_CREATED_PLACEHOLDER_TS_COL)
|
||||
.unwrap_or(false);
|
||||
let correct_time_index = meta.schema.timestamp_index == Some(schema.len() - 1);
|
||||
correct_name && correct_time_index
|
||||
};
|
||||
let is_auto_create = schema
|
||||
.last()
|
||||
.map(|s| s.name == GREPTIME_TIMESTAMP)
|
||||
.unwrap_or(false);
|
||||
(primary_keys, schema, is_auto_create)
|
||||
} else {
|
||||
// TODO(discord9): condiser remove buggy auto create by schema
|
||||
|
||||
let node_ctx = self.node_context.read().await;
|
||||
let node_ctx = self.node_context.lock().await;
|
||||
let gid: GlobalId = node_ctx
|
||||
.table_repr
|
||||
.get_by_name(&table_name)
|
||||
@@ -324,13 +314,13 @@ impl FlownodeManager {
|
||||
})
|
||||
.unwrap_or_default();
|
||||
let update_at = ColumnSchema::new(
|
||||
UPDATE_AT_TS_COL,
|
||||
"update_at",
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
true,
|
||||
);
|
||||
// TODO(discord9): bugged so we can't infer time index from flow plan, so we have to manually set one
|
||||
let ts_col = ColumnSchema::new(
|
||||
AUTO_CREATED_PLACEHOLDER_TS_COL,
|
||||
GREPTIME_TIMESTAMP,
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
true,
|
||||
)
|
||||
@@ -358,7 +348,7 @@ impl FlownodeManager {
|
||||
|
||||
(primary_keys, with_ts, true)
|
||||
};
|
||||
let schema_len = schema.len();
|
||||
|
||||
let proto_schema = column_schemas_to_proto(schema, &primary_keys)?;
|
||||
|
||||
debug!(
|
||||
@@ -367,7 +357,16 @@ impl FlownodeManager {
|
||||
table_name.join("."),
|
||||
reqs
|
||||
);
|
||||
let now = self.tick_manager.tick();
|
||||
let now = SystemTime::now();
|
||||
let now = now
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.map(|s| s.as_millis() as repr::Timestamp)
|
||||
.unwrap_or_else(|_| {
|
||||
-(SystemTime::UNIX_EPOCH
|
||||
.duration_since(now)
|
||||
.unwrap()
|
||||
.as_millis() as repr::Timestamp)
|
||||
});
|
||||
for req in reqs {
|
||||
match req {
|
||||
DiffRequest::Insert(insert) => {
|
||||
@@ -380,23 +379,13 @@ impl FlownodeManager {
|
||||
))]);
|
||||
// ts col, if auto create
|
||||
if is_auto_create {
|
||||
ensure!(
|
||||
row.len() == schema_len - 1,
|
||||
InternalSnafu {
|
||||
reason: format!(
|
||||
"Row len mismatch, expect {} got {}",
|
||||
schema_len - 1,
|
||||
row.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
row.extend([Value::from(
|
||||
common_time::Timestamp::new_millisecond(0),
|
||||
)]);
|
||||
}
|
||||
Ok(row.into())
|
||||
row.into()
|
||||
})
|
||||
.collect::<Result<Vec<_>, Error>>()?;
|
||||
.collect::<Vec<_>>();
|
||||
let table_name = table_name.last().unwrap().clone();
|
||||
let req = RowInsertRequest {
|
||||
table_name,
|
||||
@@ -462,7 +451,7 @@ impl FlownodeManager {
|
||||
let mut output = BTreeMap::new();
|
||||
for (name, sink_recv) in self
|
||||
.node_context
|
||||
.write()
|
||||
.lock()
|
||||
.await
|
||||
.sink_receiver
|
||||
.iter_mut()
|
||||
@@ -510,12 +499,9 @@ impl FlownodeManager {
|
||||
debug!("Starting to run");
|
||||
loop {
|
||||
// TODO(discord9): only run when new inputs arrive or scheduled to
|
||||
debug!("call run_available in run every second");
|
||||
self.run_available(true).await.unwrap();
|
||||
debug!("call send_writeback_requests in run every second");
|
||||
self.run_available().await.unwrap();
|
||||
// TODO(discord9): error handling
|
||||
self.send_writeback_requests().await.unwrap();
|
||||
debug!("call log_all_errors in run every second");
|
||||
self.log_all_errors().await;
|
||||
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
||||
}
|
||||
@@ -524,33 +510,17 @@ impl FlownodeManager {
|
||||
/// Run all available subgraph in the flow node
|
||||
/// This will try to run all dataflow in this node
|
||||
///
|
||||
/// set `blocking` to true to wait until lock is acquired
|
||||
/// and false to return immediately if lock is not acquired
|
||||
/// However this is not blocking and can sometimes return while actual computation is still running in worker thread
|
||||
/// TODO(discord9): add flag for subgraph that have input since last run
|
||||
pub async fn run_available(&self, blocking: bool) -> Result<(), Error> {
|
||||
pub async fn run_available(&self) -> Result<(), Error> {
|
||||
loop {
|
||||
let now = self.tick_manager.tick();
|
||||
for worker in self.worker_handles.iter() {
|
||||
// TODO(discord9): consider how to handle error in individual worker
|
||||
if blocking {
|
||||
worker.lock().await.run_available(now).await?;
|
||||
} else if let Ok(worker) = worker.try_lock() {
|
||||
worker.run_available(now).await?;
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
worker.lock().await.run_available(now).await.unwrap();
|
||||
}
|
||||
// first check how many inputs were sent
|
||||
let (flush_res, buf_len) = if blocking {
|
||||
let ctx = self.node_context.read().await;
|
||||
(ctx.flush_all_sender().await, ctx.get_send_buf_size().await)
|
||||
} else {
|
||||
match self.node_context.try_read() {
|
||||
Ok(ctx) => (ctx.flush_all_sender().await, ctx.get_send_buf_size().await),
|
||||
Err(_) => return Ok(()),
|
||||
}
|
||||
};
|
||||
match flush_res {
|
||||
match self.node_context.lock().await.flush_all_sender() {
|
||||
Ok(_) => (),
|
||||
Err(err) => {
|
||||
common_telemetry::error!("Flush send buf errors: {:?}", err);
|
||||
@@ -558,6 +528,7 @@ impl FlownodeManager {
|
||||
}
|
||||
};
|
||||
// if no thing in send buf then break
|
||||
let buf_len = self.node_context.lock().await.get_send_buf_size();
|
||||
if buf_len == 0 {
|
||||
break;
|
||||
} else {
|
||||
@@ -580,9 +551,9 @@ impl FlownodeManager {
|
||||
rows.len()
|
||||
);
|
||||
let table_id = region_id.table_id();
|
||||
self.node_context.read().await.send(table_id, rows).await?;
|
||||
self.node_context.lock().await.send(table_id, rows)?;
|
||||
// TODO(discord9): put it in a background task?
|
||||
// self.run_available(false).await?;
|
||||
self.run_available().await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -628,7 +599,7 @@ impl FlownodeManager {
|
||||
}
|
||||
}
|
||||
|
||||
let mut node_ctx = self.node_context.write().await;
|
||||
let mut node_ctx = self.node_context.lock().await;
|
||||
// assign global id to source and sink table
|
||||
for source in source_table_ids {
|
||||
node_ctx
|
||||
@@ -695,9 +666,7 @@ impl FlownodeManager {
|
||||
/// TSO coord mess
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct FlowTickManager {
|
||||
/// The starting instant of the flow, used with `start_timestamp` to calculate the current timestamp
|
||||
start: Instant,
|
||||
/// The timestamp when the flow started
|
||||
start_timestamp: repr::Timestamp,
|
||||
}
|
||||
|
||||
|
||||
@@ -116,7 +116,7 @@ impl Flownode for FlownodeManager {
|
||||
let now = self.tick_manager.tick();
|
||||
|
||||
let fetch_order = {
|
||||
let ctx = self.node_context.read().await;
|
||||
let ctx = self.node_context.lock().await;
|
||||
let table_col_names = ctx
|
||||
.table_repr
|
||||
.get_by_table_id(&table_id)
|
||||
|
||||
@@ -21,7 +21,7 @@ use common_telemetry::debug;
|
||||
use session::context::QueryContext;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::metadata::TableId;
|
||||
use tokio::sync::{broadcast, mpsc, RwLock};
|
||||
use tokio::sync::{broadcast, mpsc};
|
||||
|
||||
use crate::adapter::error::{Error, EvalSnafu, TableNotFoundSnafu};
|
||||
use crate::adapter::{FlowId, TableName, TableSource};
|
||||
@@ -65,20 +65,18 @@ pub struct FlownodeContext {
|
||||
#[derive(Debug)]
|
||||
pub struct SourceSender {
|
||||
sender: broadcast::Sender<DiffRow>,
|
||||
send_buf: RwLock<VecDeque<DiffRow>>,
|
||||
send_buf: VecDeque<DiffRow>,
|
||||
}
|
||||
|
||||
impl Default for SourceSender {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
// TODO(discord9): found a better way then increase this to prevent lagging and hence missing input data
|
||||
sender: broadcast::Sender::new(BROADCAST_CAP * 2),
|
||||
sender: broadcast::Sender::new(BROADCAST_CAP),
|
||||
send_buf: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: make all send operation immut
|
||||
impl SourceSender {
|
||||
pub fn get_receiver(&self) -> broadcast::Receiver<DiffRow> {
|
||||
self.sender.subscribe()
|
||||
@@ -86,16 +84,15 @@ impl SourceSender {
|
||||
|
||||
/// send as many as possible rows from send buf
|
||||
/// until send buf is empty or broadchannel is full
|
||||
pub async fn try_send_all(&self) -> Result<usize, Error> {
|
||||
pub fn try_send_all(&mut self) -> Result<usize, Error> {
|
||||
let mut row_cnt = 0;
|
||||
loop {
|
||||
let mut send_buf = self.send_buf.write().await;
|
||||
// if inner sender channel is empty or send buf is empty, there
|
||||
// is nothing to do for now, just break
|
||||
if self.sender.len() >= BROADCAST_CAP || send_buf.is_empty() {
|
||||
if self.sender.len() >= BROADCAST_CAP || self.send_buf.is_empty() {
|
||||
break;
|
||||
}
|
||||
if let Some(row) = send_buf.pop_front() {
|
||||
if let Some(row) = self.send_buf.pop_front() {
|
||||
self.sender
|
||||
.send(row)
|
||||
.map_err(|err| {
|
||||
@@ -110,20 +107,17 @@ impl SourceSender {
|
||||
}
|
||||
if row_cnt > 0 {
|
||||
debug!("Send {} rows", row_cnt);
|
||||
debug!(
|
||||
"Remaining Send buf.len() = {}",
|
||||
self.send_buf.read().await.len()
|
||||
);
|
||||
debug!("Send buf len = {}", self.send_buf.len());
|
||||
}
|
||||
|
||||
Ok(row_cnt)
|
||||
}
|
||||
|
||||
/// return number of rows it actual send(including what's in the buffer)
|
||||
pub async fn send_rows(&self, rows: Vec<DiffRow>) -> Result<usize, Error> {
|
||||
self.send_buf.write().await.extend(rows);
|
||||
pub fn send_rows(&mut self, rows: Vec<DiffRow>) -> Result<usize, Error> {
|
||||
self.send_buf.extend(rows);
|
||||
|
||||
let row_cnt = self.try_send_all().await?;
|
||||
let row_cnt = self.try_send_all()?;
|
||||
|
||||
Ok(row_cnt)
|
||||
}
|
||||
@@ -133,35 +127,30 @@ impl FlownodeContext {
|
||||
/// return number of rows it actual send(including what's in the buffer)
|
||||
///
|
||||
/// TODO(discord9): make this concurrent
|
||||
pub async fn send(&self, table_id: TableId, rows: Vec<DiffRow>) -> Result<usize, Error> {
|
||||
pub fn send(&mut self, table_id: TableId, rows: Vec<DiffRow>) -> Result<usize, Error> {
|
||||
let sender = self
|
||||
.source_sender
|
||||
.get(&table_id)
|
||||
.get_mut(&table_id)
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
name: table_id.to_string(),
|
||||
})?;
|
||||
// debug!("FlownodeContext::send: trying to send {} rows", rows.len());
|
||||
sender.send_rows(rows).await
|
||||
sender.send_rows(rows)
|
||||
}
|
||||
|
||||
/// flush all sender's buf
|
||||
///
|
||||
/// return numbers being sent
|
||||
pub async fn flush_all_sender(&self) -> Result<usize, Error> {
|
||||
let mut sum = 0;
|
||||
for sender in self.source_sender.values() {
|
||||
sender.try_send_all().await.inspect(|x| sum += x)?;
|
||||
}
|
||||
Ok(sum)
|
||||
pub fn flush_all_sender(&mut self) -> Result<usize, Error> {
|
||||
self.source_sender
|
||||
.iter_mut()
|
||||
.map(|(_table_id, src_sender)| src_sender.try_send_all())
|
||||
.try_fold(0, |acc, x| x.map(|x| x + acc))
|
||||
}
|
||||
|
||||
/// Return the sum number of rows in all send buf
|
||||
pub async fn get_send_buf_size(&self) -> usize {
|
||||
let mut sum = 0;
|
||||
for sender in self.source_sender.values() {
|
||||
sum += sender.send_buf.read().await.len();
|
||||
}
|
||||
sum
|
||||
pub fn get_send_buf_size(&self) -> usize {
|
||||
self.source_sender.values().map(|v| v.send_buf.len()).sum()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -285,8 +285,8 @@ impl<'s> Worker<'s> {
|
||||
Ok(Some((id, resp))) => {
|
||||
if let Err(err) = self.itc_server.blocking_lock().resp(id, resp) {
|
||||
common_telemetry::error!(
|
||||
err;
|
||||
"Worker's itc server has been closed unexpectedly, shutting down worker"
|
||||
"Worker's itc server has been closed unexpectedly, shutting down worker: {}",
|
||||
err
|
||||
);
|
||||
break;
|
||||
};
|
||||
|
||||
@@ -406,15 +406,10 @@ fn reduce_accum_subgraph(
|
||||
err_collector.run(|| {
|
||||
if let Some(expired) = expire_man.get_expire_duration(now, &key)? {
|
||||
is_expired = true;
|
||||
// expired data is ignored in computation, and a simple warning is logged
|
||||
common_telemetry::warn!(
|
||||
"Data already expired: {}",
|
||||
DataAlreadyExpiredSnafu {
|
||||
expired_by: expired,
|
||||
}
|
||||
.build()
|
||||
);
|
||||
Ok(())
|
||||
DataAlreadyExpiredSnafu {
|
||||
expired_by: expired,
|
||||
}
|
||||
.fail()
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -20,14 +20,12 @@ use common_telemetry::{debug, info};
|
||||
use hydroflow::scheduled::graph_ext::GraphExt;
|
||||
use itertools::Itertools;
|
||||
use snafu::OptionExt;
|
||||
use tokio::sync::broadcast::error::TryRecvError;
|
||||
use tokio::sync::{broadcast, mpsc};
|
||||
|
||||
use crate::adapter::error::{Error, PlanSnafu};
|
||||
use crate::compute::render::Context;
|
||||
use crate::compute::types::{Arranged, Collection, CollectionBundle, Toff};
|
||||
use crate::expr::error::InternalSnafu;
|
||||
use crate::expr::{EvalError, GlobalId};
|
||||
use crate::expr::GlobalId;
|
||||
use crate::repr::{DiffRow, Row, BROADCAST_CAP};
|
||||
|
||||
#[allow(clippy::mutable_key_type)]
|
||||
@@ -67,33 +65,11 @@ impl<'referred, 'df> Context<'referred, 'df> {
|
||||
let mut to_send = Vec::new();
|
||||
let mut to_arrange = Vec::new();
|
||||
// TODO(discord9): handling tokio broadcast error
|
||||
loop {
|
||||
match src_recv.try_recv() {
|
||||
Ok((r, t, d)) => {
|
||||
if t <= now {
|
||||
to_send.push((r, t, d));
|
||||
} else {
|
||||
to_arrange.push(((r, Row::empty()), t, d));
|
||||
}
|
||||
}
|
||||
Err(TryRecvError::Empty) => {
|
||||
break;
|
||||
}
|
||||
Err(TryRecvError::Lagged(lag_offset)) => {
|
||||
common_telemetry::error!("Flow missing {} rows behind", lag_offset);
|
||||
break;
|
||||
}
|
||||
Err(err) => {
|
||||
err_collector.run(|| -> Result<(), EvalError> {
|
||||
InternalSnafu {
|
||||
reason: format!(
|
||||
"Error receiving from broadcast channel: {}",
|
||||
err
|
||||
),
|
||||
}
|
||||
.fail()
|
||||
});
|
||||
}
|
||||
while let Ok((r, t, d)) = src_recv.try_recv() {
|
||||
if t <= now {
|
||||
to_send.push((r, t, d));
|
||||
} else {
|
||||
to_arrange.push(((r, Row::empty()), t, d));
|
||||
}
|
||||
}
|
||||
let all = prev_avail.chain(to_send).collect_vec();
|
||||
|
||||
@@ -76,13 +76,6 @@ impl UnmaterializableFunc {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_valid_func_name(name: &str) -> bool {
|
||||
matches!(
|
||||
name.to_lowercase().as_str(),
|
||||
"now" | "current_schema" | "tumble"
|
||||
)
|
||||
}
|
||||
|
||||
/// Create a UnmaterializableFunc from a string of the function name
|
||||
pub fn from_str_args(name: &str, args: Vec<TypedExpr>) -> Result<Self, Error> {
|
||||
match name.to_lowercase().as_str() {
|
||||
@@ -190,13 +183,6 @@ impl UnaryFunc {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_valid_func_name(name: &str) -> bool {
|
||||
matches!(
|
||||
name.to_lowercase().as_str(),
|
||||
"not" | "is_null" | "is_true" | "is_false" | "step_timestamp" | "cast"
|
||||
)
|
||||
}
|
||||
|
||||
/// Create a UnaryFunc from a string of the function name and given argument type(optional)
|
||||
pub fn from_str_and_type(
|
||||
name: &str,
|
||||
@@ -593,27 +579,6 @@ impl BinaryFunc {
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
pub fn is_valid_func_name(name: &str) -> bool {
|
||||
matches!(
|
||||
name.to_lowercase().as_str(),
|
||||
"eq" | "equal"
|
||||
| "not_eq"
|
||||
| "not_equal"
|
||||
| "lt"
|
||||
| "lte"
|
||||
| "gt"
|
||||
| "gte"
|
||||
| "add"
|
||||
| "sub"
|
||||
| "subtract"
|
||||
| "mul"
|
||||
| "multiply"
|
||||
| "div"
|
||||
| "divide"
|
||||
| "mod"
|
||||
)
|
||||
}
|
||||
|
||||
/// choose the appropriate specialization based on the input types
|
||||
/// return a specialization of the binary function and it's actual input and output type(so no null type present)
|
||||
///
|
||||
@@ -805,10 +770,6 @@ impl VariadicFunc {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_valid_func_name(name: &str) -> bool {
|
||||
matches!(name.to_lowercase().as_str(), "and" | "or")
|
||||
}
|
||||
|
||||
/// Create a VariadicFunc from a string of the function name and given argument types(optional)
|
||||
pub fn from_str_and_types(
|
||||
name: &str,
|
||||
|
||||
@@ -23,7 +23,6 @@ use literal::{from_substrait_literal, from_substrait_type};
|
||||
use prost::Message;
|
||||
use query::parser::QueryLanguageParser;
|
||||
use query::plan::LogicalPlan;
|
||||
use query::query_engine::DefaultSerializer;
|
||||
use query::QueryEngine;
|
||||
use session::context::QueryContext;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
@@ -122,7 +121,7 @@ pub async fn sql_to_flow_plan(
|
||||
.context(ExternalSnafu)?;
|
||||
let LogicalPlan::DfPlan(plan) = plan;
|
||||
let sub_plan = DFLogicalSubstraitConvertor {}
|
||||
.to_sub_plan(&plan, DefaultSerializer)
|
||||
.to_sub_plan(&plan)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
|
||||
@@ -295,9 +294,7 @@ mod test {
|
||||
let LogicalPlan::DfPlan(plan) = plan;
|
||||
|
||||
// encode then decode so to rely on the impl of conversion from logical plan to substrait plan
|
||||
let bytes = DFLogicalSubstraitConvertor {}
|
||||
.encode(&plan, DefaultSerializer)
|
||||
.unwrap();
|
||||
let bytes = DFLogicalSubstraitConvertor {}.encode(&plan).unwrap();
|
||||
|
||||
proto::Plan::decode(bytes).unwrap()
|
||||
}
|
||||
|
||||
@@ -101,7 +101,7 @@ impl TypedExpr {
|
||||
.unzip();
|
||||
|
||||
match arg_len {
|
||||
1 if UnaryFunc::is_valid_func_name(fn_name) => {
|
||||
1 if UnaryFunc::from_str_and_type(fn_name, None).is_ok() => {
|
||||
let func = UnaryFunc::from_str_and_type(fn_name, None)?;
|
||||
let arg = arg_exprs[0].clone();
|
||||
let ret_type = ColumnType::new_nullable(func.signature().output.clone());
|
||||
@@ -123,7 +123,13 @@ impl TypedExpr {
|
||||
|
||||
Ok(TypedExpr::new(arg.call_unary(func), ret_type))
|
||||
}
|
||||
2 if BinaryFunc::is_valid_func_name(fn_name) => {
|
||||
2 if BinaryFunc::from_str_expr_and_type(
|
||||
fn_name,
|
||||
&arg_exprs,
|
||||
arg_types.get(0..2).expect("arg have 2 elements"),
|
||||
)
|
||||
.is_ok() =>
|
||||
{
|
||||
let (func, signature) =
|
||||
BinaryFunc::from_str_expr_and_type(fn_name, &arg_exprs, &arg_types[0..2])?;
|
||||
|
||||
@@ -165,8 +171,7 @@ impl TypedExpr {
|
||||
Ok(TypedExpr::new(ret_expr, ret_type))
|
||||
}
|
||||
_var => {
|
||||
if VariadicFunc::is_valid_func_name(fn_name) {
|
||||
let func = VariadicFunc::from_str_and_types(fn_name, &arg_types)?;
|
||||
if let Ok(func) = VariadicFunc::from_str_and_types(fn_name, &arg_types) {
|
||||
let ret_type = ColumnType::new_nullable(func.signature().output.clone());
|
||||
let mut expr = ScalarExpr::CallVariadic {
|
||||
func,
|
||||
@@ -174,8 +179,9 @@ impl TypedExpr {
|
||||
};
|
||||
expr.optimize();
|
||||
Ok(TypedExpr::new(expr, ret_type))
|
||||
} else if UnmaterializableFunc::is_valid_func_name(fn_name) {
|
||||
let func = UnmaterializableFunc::from_str_args(fn_name, arg_typed_exprs)?;
|
||||
} else if let Ok(func) =
|
||||
UnmaterializableFunc::from_str_args(fn_name, arg_typed_exprs)
|
||||
{
|
||||
let ret_type = ColumnType::new_nullable(func.signature().output.clone());
|
||||
Ok(TypedExpr::new(
|
||||
ScalarExpr::CallUnmaterializable(func),
|
||||
@@ -322,12 +328,8 @@ impl TypedExpr {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::collections::HashMap;
|
||||
|
||||
use common_time::{DateTime, Interval};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::value::Value;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
use super::*;
|
||||
use crate::expr::{GlobalId, MapFilterProject};
|
||||
@@ -512,162 +514,4 @@ mod test {
|
||||
|
||||
assert_eq!(flow_plan.unwrap(), expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_func_sig() {
|
||||
fn lit(v: impl ToString) -> substrait_proto::proto::FunctionArgument {
|
||||
use substrait_proto::proto::expression;
|
||||
let expr = Expression {
|
||||
rex_type: Some(expression::RexType::Literal(expression::Literal {
|
||||
nullable: false,
|
||||
type_variation_reference: 0,
|
||||
literal_type: Some(expression::literal::LiteralType::String(v.to_string())),
|
||||
})),
|
||||
};
|
||||
substrait_proto::proto::FunctionArgument {
|
||||
arg_type: Some(substrait_proto::proto::function_argument::ArgType::Value(
|
||||
expr,
|
||||
)),
|
||||
}
|
||||
}
|
||||
fn col(i: usize) -> substrait_proto::proto::FunctionArgument {
|
||||
use substrait_proto::proto::expression;
|
||||
let expr = Expression {
|
||||
rex_type: Some(expression::RexType::Selection(Box::new(
|
||||
expression::FieldReference {
|
||||
reference_type: Some(
|
||||
expression::field_reference::ReferenceType::DirectReference(
|
||||
expression::ReferenceSegment {
|
||||
reference_type: Some(
|
||||
expression::reference_segment::ReferenceType::StructField(
|
||||
Box::new(expression::reference_segment::StructField {
|
||||
field: i as i32,
|
||||
child: None,
|
||||
}),
|
||||
),
|
||||
),
|
||||
},
|
||||
),
|
||||
),
|
||||
root_type: None,
|
||||
},
|
||||
))),
|
||||
};
|
||||
substrait_proto::proto::FunctionArgument {
|
||||
arg_type: Some(substrait_proto::proto::function_argument::ArgType::Value(
|
||||
expr,
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
let f = substrait_proto::proto::expression::ScalarFunction {
|
||||
function_reference: 0,
|
||||
arguments: vec![col(0)],
|
||||
options: vec![],
|
||||
output_type: None,
|
||||
..Default::default()
|
||||
};
|
||||
let input_schema = RelationType::new(vec![ColumnType::new(CDT::uint32_datatype(), false)]);
|
||||
let extensions = FunctionExtensions {
|
||||
anchor_to_name: HashMap::from([(0, "is_null".to_string())]),
|
||||
};
|
||||
let res = TypedExpr::from_substrait_scalar_func(&f, &input_schema, &extensions).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
TypedExpr {
|
||||
expr: ScalarExpr::Column(0).call_unary(UnaryFunc::IsNull),
|
||||
typ: ColumnType {
|
||||
scalar_type: CDT::boolean_datatype(),
|
||||
nullable: true,
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
let f = substrait_proto::proto::expression::ScalarFunction {
|
||||
function_reference: 0,
|
||||
arguments: vec![col(0), col(1)],
|
||||
options: vec![],
|
||||
output_type: None,
|
||||
..Default::default()
|
||||
};
|
||||
let input_schema = RelationType::new(vec![
|
||||
ColumnType::new(CDT::uint32_datatype(), false),
|
||||
ColumnType::new(CDT::uint32_datatype(), false),
|
||||
]);
|
||||
let extensions = FunctionExtensions {
|
||||
anchor_to_name: HashMap::from([(0, "add".to_string())]),
|
||||
};
|
||||
let res = TypedExpr::from_substrait_scalar_func(&f, &input_schema, &extensions).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
TypedExpr {
|
||||
expr: ScalarExpr::Column(0)
|
||||
.call_binary(ScalarExpr::Column(1), BinaryFunc::AddUInt32,),
|
||||
typ: ColumnType {
|
||||
scalar_type: CDT::uint32_datatype(),
|
||||
nullable: true,
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
let f = substrait_proto::proto::expression::ScalarFunction {
|
||||
function_reference: 0,
|
||||
arguments: vec![col(0), lit("1 second"), lit("2021-07-01 00:00:00")],
|
||||
options: vec![],
|
||||
output_type: None,
|
||||
..Default::default()
|
||||
};
|
||||
let input_schema = RelationType::new(vec![
|
||||
ColumnType::new(CDT::timestamp_nanosecond_datatype(), false),
|
||||
ColumnType::new(CDT::string_datatype(), false),
|
||||
]);
|
||||
let extensions = FunctionExtensions {
|
||||
anchor_to_name: HashMap::from([(0, "tumble".to_string())]),
|
||||
};
|
||||
let res = TypedExpr::from_substrait_scalar_func(&f, &input_schema, &extensions).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
ScalarExpr::CallUnmaterializable(UnmaterializableFunc::TumbleWindow {
|
||||
ts: Box::new(
|
||||
ScalarExpr::Column(0)
|
||||
.with_type(ColumnType::new(CDT::timestamp_nanosecond_datatype(), false))
|
||||
),
|
||||
window_size: Interval::from_month_day_nano(0, 0, 1_000_000_000),
|
||||
start_time: Some(DateTime::new(1625097600000))
|
||||
})
|
||||
.with_type(ColumnType::new(CDT::timestamp_millisecond_datatype(), true)),
|
||||
);
|
||||
|
||||
let f = substrait_proto::proto::expression::ScalarFunction {
|
||||
function_reference: 0,
|
||||
arguments: vec![col(0), lit("1 second")],
|
||||
options: vec![],
|
||||
output_type: None,
|
||||
..Default::default()
|
||||
};
|
||||
let input_schema = RelationType::new(vec![
|
||||
ColumnType::new(CDT::timestamp_nanosecond_datatype(), false),
|
||||
ColumnType::new(CDT::string_datatype(), false),
|
||||
]);
|
||||
let extensions = FunctionExtensions {
|
||||
anchor_to_name: HashMap::from([(0, "tumble".to_string())]),
|
||||
};
|
||||
let res = TypedExpr::from_substrait_scalar_func(&f, &input_schema, &extensions).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
ScalarExpr::CallUnmaterializable(UnmaterializableFunc::TumbleWindow {
|
||||
ts: Box::new(
|
||||
ScalarExpr::Column(0)
|
||||
.with_type(ColumnType::new(CDT::timestamp_nanosecond_datatype(), false))
|
||||
),
|
||||
window_size: Interval::from_month_day_nano(0, 0, 1_000_000_000),
|
||||
start_time: None
|
||||
})
|
||||
.with_type(ColumnType::new(CDT::timestamp_millisecond_datatype(), true)),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,5 +68,5 @@ datanode.workspace = true
|
||||
futures = "0.3"
|
||||
meta-srv = { workspace = true, features = ["mock"] }
|
||||
strfmt = "0.2"
|
||||
tower.workspace = true
|
||||
tower = "0.4"
|
||||
uuid.workspace = true
|
||||
|
||||
@@ -554,7 +554,6 @@ pub fn check_permission(
|
||||
Statement::ShowIndex(stmt) => {
|
||||
validate_db_permission!(stmt, query_ctx);
|
||||
}
|
||||
Statement::ShowStatus(_stmt) => {}
|
||||
Statement::DescribeTable(stmt) => {
|
||||
validate_param(stmt.name(), query_ctx)?;
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ use api::v1::query_request::Query;
|
||||
use api::v1::{DeleteRequests, DropFlowExpr, InsertRequests, RowDeleteRequests, RowInsertRequests};
|
||||
use async_trait::async_trait;
|
||||
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
|
||||
use common_meta::table_name::TableName;
|
||||
use common_query::Output;
|
||||
use common_telemetry::tracing;
|
||||
use query::parser::PromQuery;
|
||||
@@ -26,7 +27,6 @@ use servers::query_handler::grpc::GrpcQueryHandler;
|
||||
use servers::query_handler::sql::SqlQueryHandler;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::table_name::TableName;
|
||||
|
||||
use crate::error::{
|
||||
Error, IncompleteGrpcRequestSnafu, NotSupportedSnafu, PermissionSnafu, Result,
|
||||
|
||||
@@ -72,12 +72,12 @@ mod python {
|
||||
use arc_swap::ArcSwap;
|
||||
use catalog::RegisterSystemTableRequest;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::table_name::TableName;
|
||||
use common_telemetry::{error, info};
|
||||
use script::manager::ScriptManager;
|
||||
use servers::query_handler::grpc::GrpcQueryHandler;
|
||||
use session::context::QueryContext;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::table_name::TableName;
|
||||
|
||||
use super::*;
|
||||
use crate::error::{CatalogSnafu, TableNotFoundSnafu};
|
||||
|
||||
@@ -256,7 +256,7 @@ impl ExternalSorter {
|
||||
IntermediateWriter::new(writer).write_all(values, bitmap_leading_zeros as _).await.inspect(|_|
|
||||
debug!("Dumped {entries} entries ({memory_usage} bytes) to intermediate file {file_id} for index {index_name}")
|
||||
).inspect_err(|e|
|
||||
error!(e; "Failed to dump {entries} entries to intermediate file {file_id} for index {index_name}")
|
||||
error!("Failed to dump {entries} entries to intermediate file {file_id} for index {index_name}. Error: {e}")
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -21,18 +21,12 @@ use serde_json::error::Error as JsonError;
|
||||
use snafu::{Location, Snafu};
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use crate::kafka::NamespaceImpl as KafkaNamespace;
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Invalid provider type, expected: {}, actual: {}", expected, actual))]
|
||||
InvalidProvider {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
expected: String,
|
||||
actual: String,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start log store gc task"))]
|
||||
StartGcTask {
|
||||
#[snafu(implicit)]
|
||||
@@ -176,28 +170,34 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to produce records to Kafka, topic: {}, size: {}", topic, size))]
|
||||
#[snafu(display(
|
||||
"Failed to produce records to Kafka, topic: {}, size: {}, limit: {}",
|
||||
topic,
|
||||
size,
|
||||
limit,
|
||||
))]
|
||||
ProduceRecord {
|
||||
topic: String,
|
||||
size: usize,
|
||||
limit: usize,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: rskafka::client::producer::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to read a record from Kafka, topic: {}", topic))]
|
||||
#[snafu(display("Failed to read a record from Kafka, ns: {}", ns))]
|
||||
ConsumeRecord {
|
||||
topic: String,
|
||||
ns: KafkaNamespace,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: rskafka::client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to get the latest offset, topic: {}", topic))]
|
||||
#[snafu(display("Failed to get the latest offset, ns: {}", ns))]
|
||||
GetOffset {
|
||||
topic: String,
|
||||
ns: KafkaNamespace,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
|
||||
@@ -12,12 +12,17 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::mem::size_of;
|
||||
pub(crate) mod client_manager;
|
||||
pub mod log_store;
|
||||
pub(crate) mod util;
|
||||
|
||||
use std::fmt::Display;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use store_api::logstore::entry::Id as EntryId;
|
||||
use store_api::logstore::entry::{Entry, Id as EntryId, RawEntry};
|
||||
use store_api::logstore::namespace::Namespace;
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
/// Kafka Namespace implementation.
|
||||
#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
|
||||
@@ -26,6 +31,18 @@ pub struct NamespaceImpl {
|
||||
pub topic: String,
|
||||
}
|
||||
|
||||
impl Namespace for NamespaceImpl {
|
||||
fn id(&self) -> u64 {
|
||||
self.region_id
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for NamespaceImpl {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "[topic: {}, region: {}]", self.topic, self.region_id)
|
||||
}
|
||||
}
|
||||
|
||||
/// Kafka Entry implementation.
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub struct EntryImpl {
|
||||
@@ -36,3 +53,65 @@ pub struct EntryImpl {
|
||||
/// The namespace used to identify and isolate log entries from different regions.
|
||||
pub ns: NamespaceImpl,
|
||||
}
|
||||
|
||||
impl Entry for EntryImpl {
|
||||
fn into_raw_entry(self) -> RawEntry {
|
||||
RawEntry {
|
||||
region_id: self.region_id(),
|
||||
entry_id: self.id(),
|
||||
data: self.data,
|
||||
}
|
||||
}
|
||||
|
||||
fn data(&self) -> &[u8] {
|
||||
&self.data
|
||||
}
|
||||
|
||||
fn id(&self) -> EntryId {
|
||||
self.id
|
||||
}
|
||||
|
||||
fn region_id(&self) -> RegionId {
|
||||
RegionId::from_u64(self.ns.region_id)
|
||||
}
|
||||
|
||||
fn estimated_size(&self) -> usize {
|
||||
size_of::<Self>() + self.data.capacity() * size_of::<u8>() + self.ns.topic.capacity()
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for EntryImpl {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"Entry [ns: {}, id: {}, data_len: {}]",
|
||||
self.ns,
|
||||
self.id,
|
||||
self.data.len()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::mem::size_of;
|
||||
|
||||
use store_api::logstore::entry::Entry;
|
||||
|
||||
use crate::kafka::{EntryImpl, NamespaceImpl};
|
||||
|
||||
#[test]
|
||||
fn test_estimated_size() {
|
||||
let entry = EntryImpl {
|
||||
data: Vec::with_capacity(100),
|
||||
id: 0,
|
||||
ns: NamespaceImpl {
|
||||
region_id: 0,
|
||||
topic: String::with_capacity(10),
|
||||
},
|
||||
};
|
||||
let expected = size_of::<EntryImpl>() + 100 * size_of::<u8>() + 10;
|
||||
let got = entry.estimated_size();
|
||||
assert_eq!(expected, got);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,7 +27,6 @@ use tokio::sync::RwLock;
|
||||
use crate::error::{
|
||||
BuildClientSnafu, BuildPartitionClientSnafu, ResolveKafkaEndpointSnafu, Result,
|
||||
};
|
||||
use crate::kafka::util::record::MIN_BATCH_SIZE;
|
||||
|
||||
// Each topic only has one partition for now.
|
||||
// The `DEFAULT_PARTITION` refers to the index of the partition.
|
||||
@@ -49,8 +48,7 @@ pub(crate) struct Client {
|
||||
impl Client {
|
||||
/// Creates a Client from the raw client.
|
||||
pub(crate) fn new(raw_client: Arc<PartitionClient>, config: &DatanodeKafkaConfig) -> Self {
|
||||
let record_aggregator =
|
||||
RecordAggregator::new((config.max_batch_size.as_bytes() as usize).max(MIN_BATCH_SIZE));
|
||||
let record_aggregator = RecordAggregator::new(config.max_batch_size.as_bytes() as usize);
|
||||
let batch_producer = BatchProducerBuilder::new(raw_client.clone())
|
||||
.with_compression(config.compression)
|
||||
.with_linger(config.linger)
|
||||
|
||||
@@ -17,23 +17,21 @@ use std::sync::Arc;
|
||||
|
||||
use common_telemetry::{debug, warn};
|
||||
use common_wal::config::kafka::DatanodeKafkaConfig;
|
||||
use common_wal::options::WalOptions;
|
||||
use futures_util::StreamExt;
|
||||
use rskafka::client::consumer::{StartOffset, StreamConsumerBuilder};
|
||||
use rskafka::client::partition::OffsetAt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::logstore::entry::{
|
||||
Entry, Id as EntryId, MultiplePartEntry, MultiplePartHeader, NaiveEntry,
|
||||
};
|
||||
use store_api::logstore::provider::{KafkaProvider, Provider};
|
||||
use store_api::logstore::{AppendBatchResponse, LogStore, SendableEntryStream};
|
||||
use store_api::storage::RegionId;
|
||||
use snafu::ResultExt;
|
||||
use store_api::logstore::entry::{Entry as EntryTrait, Id as EntryId};
|
||||
use store_api::logstore::entry_stream::SendableEntryStream;
|
||||
use store_api::logstore::namespace::Id as NamespaceId;
|
||||
use store_api::logstore::{AppendBatchResponse, AppendResponse, LogStore};
|
||||
|
||||
use crate::error::{self, ConsumeRecordSnafu, Error, GetOffsetSnafu, InvalidProviderSnafu, Result};
|
||||
use crate::error::{ConsumeRecordSnafu, Error, GetOffsetSnafu, IllegalSequenceSnafu, Result};
|
||||
use crate::kafka::client_manager::{ClientManager, ClientManagerRef};
|
||||
use crate::kafka::util::offset::Offset;
|
||||
use crate::kafka::util::record::{
|
||||
maybe_emit_entry, remaining_entries, Record, RecordProducer, ESTIMATED_META_SIZE,
|
||||
};
|
||||
use crate::kafka::util::record::{maybe_emit_entry, Record, RecordProducer};
|
||||
use crate::kafka::{EntryImpl, NamespaceImpl};
|
||||
use crate::metrics;
|
||||
|
||||
/// A log store backed by Kafka.
|
||||
@@ -54,81 +52,41 @@ impl KafkaLogStore {
|
||||
}
|
||||
}
|
||||
|
||||
fn build_entry(
|
||||
data: &mut Vec<u8>,
|
||||
entry_id: EntryId,
|
||||
region_id: RegionId,
|
||||
provider: &Provider,
|
||||
max_data_size: usize,
|
||||
) -> Entry {
|
||||
if data.len() <= max_data_size {
|
||||
Entry::Naive(NaiveEntry {
|
||||
provider: provider.clone(),
|
||||
region_id,
|
||||
entry_id,
|
||||
data: std::mem::take(data),
|
||||
})
|
||||
} else {
|
||||
let parts = std::mem::take(data)
|
||||
.chunks(max_data_size)
|
||||
.map(|s| s.into())
|
||||
.collect::<Vec<_>>();
|
||||
let num_parts = parts.len();
|
||||
|
||||
let mut headers = Vec::with_capacity(num_parts);
|
||||
headers.push(MultiplePartHeader::First);
|
||||
headers.extend((1..num_parts - 1).map(MultiplePartHeader::Middle));
|
||||
headers.push(MultiplePartHeader::Last);
|
||||
|
||||
Entry::MultiplePart(MultiplePartEntry {
|
||||
provider: provider.clone(),
|
||||
region_id,
|
||||
entry_id,
|
||||
headers,
|
||||
parts,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl LogStore for KafkaLogStore {
|
||||
type Error = Error;
|
||||
type Entry = EntryImpl;
|
||||
type Namespace = NamespaceImpl;
|
||||
|
||||
/// Creates an [Entry].
|
||||
fn entry(
|
||||
&self,
|
||||
data: &mut Vec<u8>,
|
||||
entry_id: EntryId,
|
||||
region_id: RegionId,
|
||||
provider: &Provider,
|
||||
) -> Result<Entry> {
|
||||
provider
|
||||
.as_kafka_provider()
|
||||
.with_context(|| InvalidProviderSnafu {
|
||||
expected: KafkaProvider::type_name(),
|
||||
actual: provider.type_name(),
|
||||
})?;
|
||||
|
||||
let max_data_size =
|
||||
self.client_manager.config.max_batch_size.as_bytes() as usize - ESTIMATED_META_SIZE;
|
||||
Ok(build_entry(
|
||||
data,
|
||||
entry_id,
|
||||
region_id,
|
||||
provider,
|
||||
max_data_size,
|
||||
))
|
||||
/// Creates an entry of the associated Entry type.
|
||||
fn entry(&self, data: &mut Vec<u8>, entry_id: EntryId, ns: Self::Namespace) -> Self::Entry {
|
||||
EntryImpl {
|
||||
data: std::mem::take(data),
|
||||
id: entry_id,
|
||||
ns,
|
||||
}
|
||||
}
|
||||
|
||||
/// Appends an entry to the log store and returns a response containing the entry id of the appended entry.
|
||||
async fn append(&self, entry: Self::Entry) -> Result<AppendResponse> {
|
||||
let entry_id = RecordProducer::new(entry.ns.clone())
|
||||
.with_entries(vec![entry])
|
||||
.produce(&self.client_manager)
|
||||
.await
|
||||
.map(TryInto::try_into)??;
|
||||
Ok(AppendResponse {
|
||||
last_entry_id: entry_id,
|
||||
})
|
||||
}
|
||||
|
||||
// TODO(weny): refactor the writing.
|
||||
/// Appends a batch of entries and returns a response containing a map where the key is a region id
|
||||
/// while the value is the id of the last successfully written entry of the region.
|
||||
async fn append_batch(&self, entries: Vec<Entry>) -> Result<AppendBatchResponse> {
|
||||
async fn append_batch(&self, entries: Vec<Self::Entry>) -> Result<AppendBatchResponse> {
|
||||
metrics::METRIC_KAFKA_APPEND_BATCH_CALLS_TOTAL.inc();
|
||||
metrics::METRIC_KAFKA_APPEND_BATCH_BYTES_TOTAL.inc_by(
|
||||
entries
|
||||
.iter()
|
||||
.map(|entry| entry.estimated_size())
|
||||
.map(EntryTrait::estimated_size)
|
||||
.sum::<usize>() as u64,
|
||||
);
|
||||
let _timer = metrics::METRIC_KAFKA_APPEND_BATCH_ELAPSED.start_timer();
|
||||
@@ -140,17 +98,9 @@ impl LogStore for KafkaLogStore {
|
||||
// Groups entries by region id and pushes them to an associated record producer.
|
||||
let mut producers = HashMap::with_capacity(entries.len());
|
||||
for entry in entries {
|
||||
let provider = entry
|
||||
.provider()
|
||||
.as_kafka_provider()
|
||||
.context(error::InvalidProviderSnafu {
|
||||
expected: KafkaProvider::type_name(),
|
||||
actual: entry.provider().type_name(),
|
||||
})?
|
||||
.clone();
|
||||
producers
|
||||
.entry(entry.region_id())
|
||||
.or_insert_with(|| RecordProducer::new(provider))
|
||||
.entry(entry.ns.region_id)
|
||||
.or_insert_with(|| RecordProducer::new(entry.ns.clone()))
|
||||
.push(entry);
|
||||
}
|
||||
|
||||
@@ -172,27 +122,20 @@ impl LogStore for KafkaLogStore {
|
||||
Ok(AppendBatchResponse { last_entry_ids })
|
||||
}
|
||||
|
||||
/// Creates a new `EntryStream` to asynchronously generates `Entry` with entry ids.
|
||||
/// Returns entries belonging to `provider`, starting from `entry_id`.
|
||||
/// Creates a new `EntryStream` to asynchronously generates `Entry` with entry ids
|
||||
/// starting from `entry_id`. The generated entries will be filtered by the namespace.
|
||||
async fn read(
|
||||
&self,
|
||||
provider: &Provider,
|
||||
ns: &Self::Namespace,
|
||||
entry_id: EntryId,
|
||||
) -> Result<SendableEntryStream<'static, Entry, Self::Error>> {
|
||||
let provider = provider
|
||||
.as_kafka_provider()
|
||||
.with_context(|| InvalidProviderSnafu {
|
||||
expected: KafkaProvider::type_name(),
|
||||
actual: provider.type_name(),
|
||||
})?;
|
||||
|
||||
) -> Result<SendableEntryStream<Self::Entry, Self::Error>> {
|
||||
metrics::METRIC_KAFKA_READ_CALLS_TOTAL.inc();
|
||||
let _timer = metrics::METRIC_KAFKA_READ_ELAPSED.start_timer();
|
||||
|
||||
// Gets the client associated with the topic.
|
||||
let client = self
|
||||
.client_manager
|
||||
.get_or_insert(&provider.topic)
|
||||
.get_or_insert(&ns.topic)
|
||||
.await?
|
||||
.raw_client
|
||||
.clone();
|
||||
@@ -204,16 +147,14 @@ impl LogStore for KafkaLogStore {
|
||||
let end_offset = client
|
||||
.get_offset(OffsetAt::Latest)
|
||||
.await
|
||||
.context(GetOffsetSnafu {
|
||||
topic: &provider.topic,
|
||||
})?
|
||||
.context(GetOffsetSnafu { ns: ns.clone() })?
|
||||
- 1;
|
||||
// Reads entries with offsets in the range [start_offset, end_offset].
|
||||
let start_offset = Offset::try_from(entry_id)?.0;
|
||||
|
||||
debug!(
|
||||
"Start reading entries in range [{}, {}] for ns {}",
|
||||
start_offset, end_offset, provider
|
||||
start_offset, end_offset, ns
|
||||
);
|
||||
|
||||
// Abort if there're no new entries.
|
||||
@@ -221,7 +162,7 @@ impl LogStore for KafkaLogStore {
|
||||
if start_offset > end_offset {
|
||||
warn!(
|
||||
"No new entries for ns {} in range [{}, {}]",
|
||||
provider, start_offset, end_offset
|
||||
ns, start_offset, end_offset
|
||||
);
|
||||
return Ok(futures_util::stream::empty().boxed());
|
||||
}
|
||||
@@ -233,20 +174,20 @@ impl LogStore for KafkaLogStore {
|
||||
|
||||
debug!(
|
||||
"Built a stream consumer for ns {} to consume entries in range [{}, {}]",
|
||||
provider, start_offset, end_offset
|
||||
ns, start_offset, end_offset
|
||||
);
|
||||
|
||||
// A buffer is used to collect records to construct a complete entry.
|
||||
let mut entry_records: HashMap<RegionId, Vec<Record>> = HashMap::new();
|
||||
let provider = provider.clone();
|
||||
// Key: entry id, Value: the records associated with the entry.
|
||||
let mut entry_records: HashMap<_, Vec<_>> = HashMap::new();
|
||||
let ns_clone = ns.clone();
|
||||
let stream = async_stream::stream!({
|
||||
while let Some(consume_result) = stream_consumer.next().await {
|
||||
// Each next on the stream consumer produces a `RecordAndOffset` and a high watermark offset.
|
||||
// The `RecordAndOffset` contains the record data and its start offset.
|
||||
// The high watermark offset is the offset of the last record plus one.
|
||||
let (record_and_offset, high_watermark) =
|
||||
consume_result.context(ConsumeRecordSnafu {
|
||||
topic: &provider.topic,
|
||||
consume_result.with_context(|_| ConsumeRecordSnafu {
|
||||
ns: ns_clone.clone(),
|
||||
})?;
|
||||
let (kafka_record, offset) = (record_and_offset.record, record_and_offset.offset);
|
||||
|
||||
@@ -254,35 +195,37 @@ impl LogStore for KafkaLogStore {
|
||||
.inc_by(kafka_record.approximate_size() as u64);
|
||||
|
||||
debug!(
|
||||
"Read a record at offset {} for topic {}, high watermark: {}",
|
||||
offset, provider.topic, high_watermark
|
||||
"Read a record at offset {} for ns {}, high watermark: {}",
|
||||
offset, ns_clone, high_watermark
|
||||
);
|
||||
|
||||
// Ignores no-op records.
|
||||
if kafka_record.value.is_none() {
|
||||
if check_termination(offset, end_offset) {
|
||||
if let Some(entries) = remaining_entries(&provider, &mut entry_records) {
|
||||
yield Ok(entries);
|
||||
}
|
||||
if check_termination(offset, end_offset, &entry_records)? {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Filters records by namespace.
|
||||
let record = Record::try_from(kafka_record)?;
|
||||
if record.meta.ns != ns_clone {
|
||||
if check_termination(offset, end_offset, &entry_records)? {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Tries to construct an entry from records consumed so far.
|
||||
if let Some(mut entry) = maybe_emit_entry(&provider, record, &mut entry_records)? {
|
||||
if let Some(mut entry) = maybe_emit_entry(record, &mut entry_records)? {
|
||||
// We don't rely on the EntryId generated by mito2.
|
||||
// Instead, we use the offset return from Kafka as EntryId.
|
||||
// Therefore, we MUST overwrite the EntryId with RecordOffset.
|
||||
entry.set_entry_id(offset as u64);
|
||||
entry.id = offset as u64;
|
||||
yield Ok(vec![entry]);
|
||||
}
|
||||
|
||||
if check_termination(offset, end_offset) {
|
||||
if let Some(entries) = remaining_entries(&provider, &mut entry_records) {
|
||||
yield Ok(entries);
|
||||
}
|
||||
if check_termination(offset, end_offset, &entry_records)? {
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -290,25 +233,39 @@ impl LogStore for KafkaLogStore {
|
||||
Ok(Box::pin(stream))
|
||||
}
|
||||
|
||||
/// Creates a namespace of the associated Namespace type.
|
||||
fn namespace(&self, ns_id: NamespaceId, wal_options: &WalOptions) -> Self::Namespace {
|
||||
// Safety: upon start, the datanode checks the consistency of the wal providers in the wal config of the
|
||||
// datanode and that of the metasrv. Therefore, the wal options passed into the kafka log store
|
||||
// must be of type WalOptions::Kafka.
|
||||
let WalOptions::Kafka(kafka_options) = wal_options else {
|
||||
unreachable!()
|
||||
};
|
||||
NamespaceImpl {
|
||||
region_id: ns_id,
|
||||
topic: kafka_options.topic.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new `Namespace` from the given ref.
|
||||
async fn create_namespace(&self, _provider: &Provider) -> Result<()> {
|
||||
async fn create_namespace(&self, _ns: &Self::Namespace) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Deletes an existing `Namespace` specified by the given ref.
|
||||
async fn delete_namespace(&self, _provider: &Provider) -> Result<()> {
|
||||
async fn delete_namespace(&self, _ns: &Self::Namespace) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Lists all existing namespaces.
|
||||
async fn list_namespaces(&self) -> Result<Vec<Provider>> {
|
||||
async fn list_namespaces(&self) -> Result<Vec<Self::Namespace>> {
|
||||
Ok(vec![])
|
||||
}
|
||||
|
||||
/// Marks all entries with ids `<=entry_id` of the given `namespace` as obsolete,
|
||||
/// so that the log store can safely delete those entries. This method does not guarantee
|
||||
/// that the obsolete entries are deleted immediately.
|
||||
async fn obsolete(&self, _provider: &Provider, _entry_id: EntryId) -> Result<()> {
|
||||
async fn obsolete(&self, _ns: Self::Namespace, _entry_id: EntryId) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -318,249 +275,227 @@ impl LogStore for KafkaLogStore {
|
||||
}
|
||||
}
|
||||
|
||||
fn check_termination(offset: i64, end_offset: i64) -> bool {
|
||||
fn check_termination(
|
||||
offset: i64,
|
||||
end_offset: i64,
|
||||
entry_records: &HashMap<EntryId, Vec<Record>>,
|
||||
) -> Result<bool> {
|
||||
// Terminates the stream if the entry with the end offset was read.
|
||||
if offset >= end_offset {
|
||||
debug!("Stream consumer terminates at offset {}", offset);
|
||||
// There must have no records when the stream terminates.
|
||||
true
|
||||
if !entry_records.is_empty() {
|
||||
return IllegalSequenceSnafu {
|
||||
error: "Found records leftover",
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
Ok(true)
|
||||
} else {
|
||||
false
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::tracing::warn;
|
||||
use common_wal::config::kafka::DatanodeKafkaConfig;
|
||||
use futures::TryStreamExt;
|
||||
use rand::prelude::SliceRandom;
|
||||
use rand::Rng;
|
||||
use store_api::logstore::entry::{Entry, MultiplePartEntry, MultiplePartHeader, NaiveEntry};
|
||||
use store_api::logstore::provider::Provider;
|
||||
use store_api::logstore::LogStore;
|
||||
use store_api::storage::RegionId;
|
||||
use rand::seq::IteratorRandom;
|
||||
|
||||
use super::build_entry;
|
||||
use crate::kafka::log_store::KafkaLogStore;
|
||||
use super::*;
|
||||
use crate::test_util::kafka::{
|
||||
create_topics, entries_with_random_data, new_namespace, EntryBuilder,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_build_naive_entry() {
|
||||
let provider = Provider::kafka_provider("my_topic".to_string());
|
||||
let region_id = RegionId::new(1, 1);
|
||||
let entry = build_entry(&mut vec![1; 100], 1, region_id, &provider, 120);
|
||||
// Stores test context for a region.
|
||||
struct RegionContext {
|
||||
ns: NamespaceImpl,
|
||||
entry_builder: EntryBuilder,
|
||||
expected: Vec<EntryImpl>,
|
||||
flushed_entry_id: EntryId,
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
entry.into_naive_entry().unwrap(),
|
||||
NaiveEntry {
|
||||
provider,
|
||||
region_id,
|
||||
entry_id: 1,
|
||||
data: vec![1; 100]
|
||||
}
|
||||
/// Prepares for a test in that a log store is constructed and a collection of topics is created.
|
||||
async fn prepare(
|
||||
test_name: &str,
|
||||
num_topics: usize,
|
||||
broker_endpoints: Vec<String>,
|
||||
) -> (KafkaLogStore, Vec<String>) {
|
||||
let topics = create_topics(
|
||||
num_topics,
|
||||
|i| format!("{test_name}_{}_{}", i, uuid::Uuid::new_v4()),
|
||||
&broker_endpoints,
|
||||
)
|
||||
}
|
||||
.await;
|
||||
|
||||
#[test]
|
||||
fn test_build_into_multiple_part_entry() {
|
||||
let provider = Provider::kafka_provider("my_topic".to_string());
|
||||
let region_id = RegionId::new(1, 1);
|
||||
let entry = build_entry(&mut vec![1; 100], 1, region_id, &provider, 50);
|
||||
|
||||
assert_eq!(
|
||||
entry.into_multiple_part_entry().unwrap(),
|
||||
MultiplePartEntry {
|
||||
provider: provider.clone(),
|
||||
region_id,
|
||||
entry_id: 1,
|
||||
headers: vec![MultiplePartHeader::First, MultiplePartHeader::Last],
|
||||
parts: vec![vec![1; 50], vec![1; 50]],
|
||||
}
|
||||
);
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
let entry = build_entry(&mut vec![1; 100], 1, region_id, &provider, 21);
|
||||
|
||||
assert_eq!(
|
||||
entry.into_multiple_part_entry().unwrap(),
|
||||
MultiplePartEntry {
|
||||
provider,
|
||||
region_id,
|
||||
entry_id: 1,
|
||||
headers: vec![
|
||||
MultiplePartHeader::First,
|
||||
MultiplePartHeader::Middle(1),
|
||||
MultiplePartHeader::Middle(2),
|
||||
MultiplePartHeader::Middle(3),
|
||||
MultiplePartHeader::Last
|
||||
],
|
||||
parts: vec![
|
||||
vec![1; 21],
|
||||
vec![1; 21],
|
||||
vec![1; 21],
|
||||
vec![1; 21],
|
||||
vec![1; 16]
|
||||
],
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
fn generate_entries(
|
||||
logstore: &KafkaLogStore,
|
||||
provider: &Provider,
|
||||
num_entries: usize,
|
||||
region_id: RegionId,
|
||||
data_len: usize,
|
||||
) -> Vec<Entry> {
|
||||
(0..num_entries)
|
||||
.map(|_| {
|
||||
let mut data: Vec<u8> = (0..data_len).map(|_| rand::random::<u8>()).collect();
|
||||
// Always set `entry_id` to 0, the real entry_id will be set during the read.
|
||||
logstore.entry(&mut data, 0, region_id, provider).unwrap()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_append_batch_basic() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let Ok(broker_endpoints) = std::env::var("GT_KAFKA_ENDPOINTS") else {
|
||||
warn!("The endpoints is empty, skipping the test 'test_append_batch_basic'");
|
||||
return;
|
||||
};
|
||||
let broker_endpoints = broker_endpoints
|
||||
.split(',')
|
||||
.map(|s| s.trim().to_string())
|
||||
.collect::<Vec<_>>();
|
||||
let config = DatanodeKafkaConfig {
|
||||
broker_endpoints,
|
||||
max_batch_size: ReadableSize::kb(32),
|
||||
..Default::default()
|
||||
};
|
||||
let logstore = KafkaLogStore::try_new(&config).await.unwrap();
|
||||
let topic_name = uuid::Uuid::new_v4().to_string();
|
||||
let provider = Provider::kafka_provider(topic_name);
|
||||
let region_entries = (0..5)
|
||||
.map(|i| {
|
||||
let region_id = RegionId::new(1, i);
|
||||
(
|
||||
region_id,
|
||||
generate_entries(&logstore, &provider, 20, region_id, 1024),
|
||||
)
|
||||
})
|
||||
.collect::<HashMap<RegionId, Vec<_>>>();
|
||||
|
||||
let mut all_entries = region_entries
|
||||
.values()
|
||||
.flatten()
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
all_entries.shuffle(&mut rand::thread_rng());
|
||||
|
||||
let response = logstore.append_batch(all_entries.clone()).await.unwrap();
|
||||
// 5 region
|
||||
assert_eq!(response.last_entry_ids.len(), 5);
|
||||
let got_entries = logstore
|
||||
.read(&provider, 0)
|
||||
.await
|
||||
.unwrap()
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect::<Vec<_>>();
|
||||
for (region_id, _) in region_entries {
|
||||
let expected_entries = all_entries
|
||||
.iter()
|
||||
.filter(|entry| entry.region_id() == region_id)
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
let mut actual_entries = got_entries
|
||||
.iter()
|
||||
.filter(|entry| entry.region_id() == region_id)
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
actual_entries
|
||||
.iter_mut()
|
||||
.for_each(|entry| entry.set_entry_id(0));
|
||||
assert_eq!(expected_entries, actual_entries);
|
||||
// Appends a no-op record to each topic.
|
||||
for topic in topics.iter() {
|
||||
let last_entry_id = logstore
|
||||
.append(EntryImpl {
|
||||
data: vec![],
|
||||
id: 0,
|
||||
ns: new_namespace(topic, 0),
|
||||
})
|
||||
.await
|
||||
.unwrap()
|
||||
.last_entry_id;
|
||||
assert_eq!(last_entry_id, 0);
|
||||
}
|
||||
|
||||
(logstore, topics)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_append_batch_basic_large() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
/// Creates a vector containing indexes of all regions if the `all` is true.
|
||||
/// Otherwise, creates a subset of the indexes. The cardinality of the subset
|
||||
/// is nearly a quarter of that of the universe set.
|
||||
fn all_or_subset(all: bool, num_regions: usize) -> Vec<u64> {
|
||||
assert!(num_regions > 0);
|
||||
let amount = if all {
|
||||
num_regions
|
||||
} else {
|
||||
(num_regions / 4).max(1)
|
||||
};
|
||||
(0..num_regions as u64).choose_multiple(&mut rand::thread_rng(), amount)
|
||||
}
|
||||
|
||||
/// Builds entries for regions specified by `which`. Builds large entries if `large` is true.
|
||||
/// Returns the aggregated entries.
|
||||
fn build_entries(
|
||||
region_contexts: &mut HashMap<u64, RegionContext>,
|
||||
which: &[u64],
|
||||
large: bool,
|
||||
) -> Vec<EntryImpl> {
|
||||
let mut aggregated = Vec::with_capacity(which.len());
|
||||
for region_id in which {
|
||||
let ctx = region_contexts.get_mut(region_id).unwrap();
|
||||
// Builds entries for the region.
|
||||
ctx.expected = if !large {
|
||||
entries_with_random_data(3, &ctx.entry_builder)
|
||||
} else {
|
||||
// Builds a large entry of size 256KB which is way greater than the configured `max_batch_size` which is 32KB.
|
||||
let large_entry = ctx.entry_builder.with_data([b'1'; 256 * 1024]);
|
||||
vec![large_entry]
|
||||
};
|
||||
// Aggregates entries of all regions.
|
||||
aggregated.push(ctx.expected.clone());
|
||||
}
|
||||
aggregated.into_iter().flatten().collect()
|
||||
}
|
||||
|
||||
/// Starts a test with:
|
||||
/// * `test_name` - The name of the test.
|
||||
/// * `num_topics` - Number of topics to be created in the preparation phase.
|
||||
/// * `num_regions` - Number of regions involved in the test.
|
||||
/// * `num_appends` - Number of append operations to be performed.
|
||||
/// * `all` - All regions will be involved in an append operation if `all` is true. Otherwise,
|
||||
/// an append operation will only randomly choose a subset of regions.
|
||||
/// * `large` - Builds large entries for each region is `large` is true.
|
||||
async fn test_with(
|
||||
test_name: &str,
|
||||
num_topics: usize,
|
||||
num_regions: usize,
|
||||
num_appends: usize,
|
||||
all: bool,
|
||||
large: bool,
|
||||
) {
|
||||
let Ok(broker_endpoints) = std::env::var("GT_KAFKA_ENDPOINTS") else {
|
||||
warn!("The endpoints is empty, skipping the test 'test_append_batch_basic_large'");
|
||||
warn!("The endpoints is empty, skipping the test {test_name}");
|
||||
return;
|
||||
};
|
||||
let data_size_kb = rand::thread_rng().gen_range(9..31usize);
|
||||
info!("Entry size: {}Ki", data_size_kb);
|
||||
let broker_endpoints = broker_endpoints
|
||||
.split(',')
|
||||
.map(|s| s.trim().to_string())
|
||||
.collect::<Vec<_>>();
|
||||
let config = DatanodeKafkaConfig {
|
||||
broker_endpoints,
|
||||
max_batch_size: ReadableSize::kb(8),
|
||||
..Default::default()
|
||||
};
|
||||
let logstore = KafkaLogStore::try_new(&config).await.unwrap();
|
||||
let topic_name = uuid::Uuid::new_v4().to_string();
|
||||
let provider = Provider::kafka_provider(topic_name);
|
||||
let region_entries = (0..5)
|
||||
|
||||
let (logstore, topics) = prepare(test_name, num_topics, broker_endpoints).await;
|
||||
let mut region_contexts = (0..num_regions)
|
||||
.map(|i| {
|
||||
let region_id = RegionId::new(1, i);
|
||||
let topic = &topics[i % topics.len()];
|
||||
let ns = new_namespace(topic, i as u64);
|
||||
let entry_builder = EntryBuilder::new(ns.clone());
|
||||
(
|
||||
region_id,
|
||||
generate_entries(&logstore, &provider, 20, region_id, data_size_kb * 1024),
|
||||
i as u64,
|
||||
RegionContext {
|
||||
ns,
|
||||
entry_builder,
|
||||
expected: Vec::new(),
|
||||
flushed_entry_id: 0,
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect::<HashMap<RegionId, Vec<_>>>();
|
||||
.collect();
|
||||
|
||||
let mut all_entries = region_entries
|
||||
.values()
|
||||
.flatten()
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
assert_matches!(all_entries[0], Entry::MultiplePart(_));
|
||||
all_entries.shuffle(&mut rand::thread_rng());
|
||||
for _ in 0..num_appends {
|
||||
// Appends entries for a subset of regions.
|
||||
let which = all_or_subset(all, num_regions);
|
||||
let entries = build_entries(&mut region_contexts, &which, large);
|
||||
let last_entry_ids = logstore.append_batch(entries).await.unwrap().last_entry_ids;
|
||||
|
||||
let response = logstore.append_batch(all_entries.clone()).await.unwrap();
|
||||
// 5 region
|
||||
assert_eq!(response.last_entry_ids.len(), 5);
|
||||
let got_entries = logstore
|
||||
.read(&provider, 0)
|
||||
.await
|
||||
.unwrap()
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect::<Vec<_>>();
|
||||
for (region_id, _) in region_entries {
|
||||
let expected_entries = all_entries
|
||||
.iter()
|
||||
.filter(|entry| entry.region_id() == region_id)
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
let mut actual_entries = got_entries
|
||||
.iter()
|
||||
.filter(|entry| entry.region_id() == region_id)
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
actual_entries
|
||||
.iter_mut()
|
||||
.for_each(|entry| entry.set_entry_id(0));
|
||||
assert_eq!(expected_entries, actual_entries);
|
||||
// Reads entries for regions and checks for each region that the gotten entries are identical with the expected ones.
|
||||
for region_id in which {
|
||||
let ctx = region_contexts.get_mut(®ion_id).unwrap();
|
||||
let stream = logstore
|
||||
.read(&ctx.ns, ctx.flushed_entry_id + 1)
|
||||
.await
|
||||
.unwrap();
|
||||
let mut got = stream
|
||||
.collect::<Vec<_>>()
|
||||
.await
|
||||
.into_iter()
|
||||
.flat_map(|x| x.unwrap())
|
||||
.collect::<Vec<_>>();
|
||||
//FIXME(weny): https://github.com/GreptimeTeam/greptimedb/issues/3152
|
||||
ctx.expected.iter_mut().for_each(|entry| entry.id = 0);
|
||||
got.iter_mut().for_each(|entry| entry.id = 0);
|
||||
assert_eq!(ctx.expected, got);
|
||||
}
|
||||
|
||||
// Simulates a flush for regions.
|
||||
for (region_id, last_entry_id) in last_entry_ids {
|
||||
let ctx = region_contexts.get_mut(®ion_id).unwrap();
|
||||
ctx.flushed_entry_id = last_entry_id;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Appends entries for one region and checks all entries can be read successfully.
|
||||
#[tokio::test]
|
||||
async fn test_one_region() {
|
||||
test_with("test_one_region", 1, 1, 1, true, false).await;
|
||||
}
|
||||
|
||||
/// Appends entries for multiple regions and checks entries for each region can be read successfully.
|
||||
/// A topic is assigned only a single region.
|
||||
#[tokio::test]
|
||||
async fn test_multi_regions_disjoint() {
|
||||
test_with("test_multi_regions_disjoint", 5, 5, 1, true, false).await;
|
||||
}
|
||||
|
||||
/// Appends entries for multiple regions and checks entries for each region can be read successfully.
|
||||
/// A topic is assigned multiple regions.
|
||||
#[tokio::test]
|
||||
async fn test_multi_regions_overlapped() {
|
||||
test_with("test_multi_regions_overlapped", 5, 20, 1, true, false).await;
|
||||
}
|
||||
|
||||
/// Appends entries for multiple regions and checks entries for each region can be read successfully.
|
||||
/// A topic may be assigned multiple regions. The append operation repeats for a several iterations.
|
||||
/// Each append operation will only append entries for a subset of randomly chosen regions.
|
||||
#[tokio::test]
|
||||
async fn test_multi_appends() {
|
||||
test_with("test_multi_appends", 5, 20, 3, false, false).await;
|
||||
}
|
||||
|
||||
/// Appends large entries for multiple regions and checks entries for each region can be read successfully.
|
||||
/// A topic may be assigned multiple regions.
|
||||
#[tokio::test]
|
||||
async fn test_append_large_entries() {
|
||||
test_with("test_append_large_entries", 5, 20, 3, true, true).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,14 +13,10 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use rskafka::record::Record as KafkaRecord;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::logstore::entry::{Entry, MultiplePartEntry, MultiplePartHeader, NaiveEntry};
|
||||
use store_api::logstore::provider::{KafkaProvider, Provider};
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use crate::error::{
|
||||
DecodeJsonSnafu, EmptyEntriesSnafu, EncodeJsonSnafu, GetClientSnafu, IllegalSequenceSnafu,
|
||||
@@ -28,7 +24,7 @@ use crate::error::{
|
||||
};
|
||||
use crate::kafka::client_manager::ClientManagerRef;
|
||||
use crate::kafka::util::offset::Offset;
|
||||
use crate::kafka::{EntryId, NamespaceImpl};
|
||||
use crate::kafka::{EntryId, EntryImpl, NamespaceImpl};
|
||||
use crate::metrics;
|
||||
|
||||
/// The current version of Record.
|
||||
@@ -36,10 +32,7 @@ pub(crate) const VERSION: u32 = 0;
|
||||
|
||||
/// The estimated size in bytes of a serialized RecordMeta.
|
||||
/// A record is guaranteed to have sizeof(meta) + sizeof(data) <= max_batch_size - ESTIMATED_META_SIZE.
|
||||
pub(crate) const ESTIMATED_META_SIZE: usize = 256;
|
||||
|
||||
/// The minimum batch size
|
||||
pub(crate) const MIN_BATCH_SIZE: usize = 4 * 1024;
|
||||
const ESTIMATED_META_SIZE: usize = 256;
|
||||
|
||||
/// The type of a record.
|
||||
///
|
||||
@@ -117,25 +110,43 @@ impl TryFrom<KafkaRecord> for Record {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<Record>> for EntryImpl {
|
||||
fn from(records: Vec<Record>) -> Self {
|
||||
let entry_id = records[0].meta.entry_id;
|
||||
let ns = records[0].meta.ns.clone();
|
||||
let data = records.into_iter().flat_map(|record| record.data).collect();
|
||||
EntryImpl {
|
||||
data,
|
||||
id: entry_id,
|
||||
ns,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Produces a record to a kafka topic.
|
||||
pub(crate) struct RecordProducer {
|
||||
/// The provide of the entries.
|
||||
provider: Arc<KafkaProvider>,
|
||||
/// The namespace of the entries.
|
||||
ns: NamespaceImpl,
|
||||
/// Entries are buffered before being built into a record.
|
||||
entries: Vec<Entry>,
|
||||
entries: Vec<EntryImpl>,
|
||||
}
|
||||
|
||||
impl RecordProducer {
|
||||
/// Creates a new producer for producing entries with the given namespace.
|
||||
pub(crate) fn new(provider: Arc<KafkaProvider>) -> Self {
|
||||
pub(crate) fn new(ns: NamespaceImpl) -> Self {
|
||||
Self {
|
||||
provider,
|
||||
ns,
|
||||
entries: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Populates the entry buffer with the given entries.
|
||||
pub(crate) fn with_entries(self, entries: Vec<EntryImpl>) -> Self {
|
||||
Self { entries, ..self }
|
||||
}
|
||||
|
||||
/// Pushes an entry into the entry buffer.
|
||||
pub(crate) fn push(&mut self, entry: Entry) {
|
||||
pub(crate) fn push(&mut self, entry: EntryImpl) {
|
||||
self.entries.push(entry);
|
||||
}
|
||||
|
||||
@@ -147,11 +158,11 @@ impl RecordProducer {
|
||||
|
||||
// Gets the producer in which a record buffer is maintained.
|
||||
let producer = client_manager
|
||||
.get_or_insert(&self.provider.topic)
|
||||
.get_or_insert(&self.ns.topic)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
GetClientSnafu {
|
||||
topic: &self.provider.topic,
|
||||
topic: &self.ns.topic,
|
||||
error: e.to_string(),
|
||||
}
|
||||
.build()
|
||||
@@ -160,8 +171,10 @@ impl RecordProducer {
|
||||
|
||||
// Stores the offset of the last successfully produced record.
|
||||
let mut last_offset = None;
|
||||
let max_record_size =
|
||||
client_manager.config.max_batch_size.as_bytes() as usize - ESTIMATED_META_SIZE;
|
||||
for entry in self.entries {
|
||||
for record in convert_to_records(entry) {
|
||||
for record in build_records(entry, max_record_size) {
|
||||
let kafka_record = KafkaRecord::try_from(record)?;
|
||||
|
||||
metrics::METRIC_KAFKA_PRODUCE_RECORD_COUNTS.inc();
|
||||
@@ -174,8 +187,9 @@ impl RecordProducer {
|
||||
.await
|
||||
.map(Offset)
|
||||
.with_context(|_| ProduceRecordSnafu {
|
||||
topic: &self.provider.topic,
|
||||
topic: &self.ns.topic,
|
||||
size: kafka_record.approximate_size(),
|
||||
limit: max_record_size,
|
||||
})?;
|
||||
last_offset = Some(offset);
|
||||
}
|
||||
@@ -185,188 +199,100 @@ impl RecordProducer {
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_to_records(entry: Entry) -> Vec<Record> {
|
||||
match entry {
|
||||
Entry::Naive(entry) => vec![Record {
|
||||
fn record_type(seq: usize, num_records: usize) -> RecordType {
|
||||
if seq == 0 {
|
||||
RecordType::First
|
||||
} else if seq == num_records - 1 {
|
||||
RecordType::Last
|
||||
} else {
|
||||
RecordType::Middle(seq)
|
||||
}
|
||||
}
|
||||
|
||||
fn build_records(entry: EntryImpl, max_record_size: usize) -> Vec<Record> {
|
||||
if entry.data.len() <= max_record_size {
|
||||
let record = Record {
|
||||
meta: RecordMeta {
|
||||
version: VERSION,
|
||||
tp: RecordType::Full,
|
||||
// TODO(weny): refactor the record meta.
|
||||
entry_id: 0,
|
||||
ns: NamespaceImpl {
|
||||
region_id: entry.region_id.as_u64(),
|
||||
// TODO(weny): refactor the record meta.
|
||||
topic: String::new(),
|
||||
},
|
||||
entry_id: entry.id,
|
||||
ns: entry.ns,
|
||||
},
|
||||
data: entry.data,
|
||||
}],
|
||||
Entry::MultiplePart(entry) => {
|
||||
let mut entries = Vec::with_capacity(entry.parts.len());
|
||||
|
||||
for (idx, part) in entry.parts.into_iter().enumerate() {
|
||||
let tp = match entry.headers[idx] {
|
||||
MultiplePartHeader::First => RecordType::First,
|
||||
MultiplePartHeader::Middle(i) => RecordType::Middle(i),
|
||||
MultiplePartHeader::Last => RecordType::Last,
|
||||
};
|
||||
entries.push(Record {
|
||||
meta: RecordMeta {
|
||||
version: VERSION,
|
||||
tp,
|
||||
// TODO(weny): refactor the record meta.
|
||||
entry_id: 0,
|
||||
ns: NamespaceImpl {
|
||||
region_id: entry.region_id.as_u64(),
|
||||
topic: String::new(),
|
||||
},
|
||||
},
|
||||
data: part,
|
||||
})
|
||||
}
|
||||
entries
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_to_naive_entry(provider: Arc<KafkaProvider>, record: Record) -> Entry {
|
||||
let region_id = RegionId::from_u64(record.meta.ns.region_id);
|
||||
|
||||
Entry::Naive(NaiveEntry {
|
||||
provider: Provider::Kafka(provider),
|
||||
region_id,
|
||||
// TODO(weny): should be the offset in the topic
|
||||
entry_id: record.meta.entry_id,
|
||||
data: record.data,
|
||||
})
|
||||
}
|
||||
|
||||
fn convert_to_multiple_entry(
|
||||
provider: Arc<KafkaProvider>,
|
||||
region_id: RegionId,
|
||||
records: Vec<Record>,
|
||||
) -> Entry {
|
||||
let mut headers = Vec::with_capacity(records.len());
|
||||
let mut parts = Vec::with_capacity(records.len());
|
||||
|
||||
for record in records {
|
||||
let header = match record.meta.tp {
|
||||
RecordType::Full => unreachable!(),
|
||||
RecordType::First => MultiplePartHeader::First,
|
||||
RecordType::Middle(i) => MultiplePartHeader::Middle(i),
|
||||
RecordType::Last => MultiplePartHeader::Last,
|
||||
};
|
||||
headers.push(header);
|
||||
parts.push(record.data);
|
||||
return vec![record];
|
||||
}
|
||||
|
||||
Entry::MultiplePart(MultiplePartEntry {
|
||||
provider: Provider::Kafka(provider),
|
||||
region_id,
|
||||
// TODO(weny): should be the offset in the topic
|
||||
entry_id: 0,
|
||||
headers,
|
||||
parts,
|
||||
})
|
||||
let chunks = entry.data.chunks(max_record_size);
|
||||
let num_chunks = chunks.len();
|
||||
chunks
|
||||
.enumerate()
|
||||
.map(|(i, chunk)| Record {
|
||||
meta: RecordMeta {
|
||||
version: VERSION,
|
||||
tp: record_type(i, num_chunks),
|
||||
entry_id: entry.id,
|
||||
ns: entry.ns.clone(),
|
||||
},
|
||||
data: chunk.to_vec(),
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Constructs entries from `buffered_records`
|
||||
pub fn remaining_entries(
|
||||
provider: &Arc<KafkaProvider>,
|
||||
buffered_records: &mut HashMap<RegionId, Vec<Record>>,
|
||||
) -> Option<Vec<Entry>> {
|
||||
if buffered_records.is_empty() {
|
||||
None
|
||||
} else {
|
||||
let mut entries = Vec::with_capacity(buffered_records.len());
|
||||
for (region_id, records) in buffered_records.drain() {
|
||||
entries.push(convert_to_multiple_entry(
|
||||
provider.clone(),
|
||||
region_id,
|
||||
records,
|
||||
));
|
||||
}
|
||||
Some(entries)
|
||||
}
|
||||
}
|
||||
|
||||
/// For type of [Entry::Naive] Entry:
|
||||
/// - Emits a [RecordType::Full] type record immediately.
|
||||
///
|
||||
/// For type of [Entry::MultiplePart] Entry:
|
||||
/// - Emits a complete or incomplete [Entry] while the next same [RegionId] record arrives.
|
||||
///
|
||||
/// **Incomplete Entry:**
|
||||
/// If the records arrive in the following order, it emits **the incomplete [Entry]** when the next record arrives.
|
||||
/// - **[RecordType::First], [RecordType::Middle]**, [RecordType::First]
|
||||
/// - **[RecordType::Middle]**, [RecordType::First]
|
||||
/// - **[RecordType::Last]**
|
||||
pub(crate) fn maybe_emit_entry(
|
||||
provider: &Arc<KafkaProvider>,
|
||||
pub fn maybe_emit_entry(
|
||||
record: Record,
|
||||
buffered_records: &mut HashMap<RegionId, Vec<Record>>,
|
||||
) -> Result<Option<Entry>> {
|
||||
entry_records: &mut HashMap<EntryId, Vec<Record>>,
|
||||
) -> Result<Option<EntryImpl>> {
|
||||
let mut entry = None;
|
||||
match record.meta.tp {
|
||||
RecordType::Full => entry = Some(convert_to_naive_entry(provider.clone(), record)),
|
||||
RecordType::Full => {
|
||||
entry = Some(EntryImpl::from(vec![record]));
|
||||
}
|
||||
RecordType::First => {
|
||||
let region_id = record.meta.ns.region_id.into();
|
||||
if let Some(records) = buffered_records.insert(region_id, vec![record]) {
|
||||
// Incomplete entry
|
||||
entry = Some(convert_to_multiple_entry(
|
||||
provider.clone(),
|
||||
region_id,
|
||||
records,
|
||||
))
|
||||
}
|
||||
ensure!(
|
||||
!entry_records.contains_key(&record.meta.entry_id),
|
||||
IllegalSequenceSnafu {
|
||||
error: "First record must be the first"
|
||||
}
|
||||
);
|
||||
entry_records.insert(record.meta.entry_id, vec![record]);
|
||||
}
|
||||
RecordType::Middle(seq) => {
|
||||
let region_id = record.meta.ns.region_id.into();
|
||||
let records = buffered_records.entry(region_id).or_default();
|
||||
let prefix =
|
||||
entry_records
|
||||
.get_mut(&record.meta.entry_id)
|
||||
.context(IllegalSequenceSnafu {
|
||||
error: "Middle record must not be the first",
|
||||
})?;
|
||||
// Safety: the records are guaranteed not empty if the key exists.
|
||||
let last_record = prefix.last().unwrap();
|
||||
let legal = match last_record.meta.tp {
|
||||
// Legal if this record follows a First record.
|
||||
RecordType::First => seq == 1,
|
||||
// Legal if this record follows a Middle record just prior to this record.
|
||||
RecordType::Middle(last_seq) => last_seq + 1 == seq,
|
||||
// Illegal sequence.
|
||||
_ => false,
|
||||
};
|
||||
ensure!(
|
||||
legal,
|
||||
IllegalSequenceSnafu {
|
||||
error: "Illegal prefix for a Middle record"
|
||||
}
|
||||
);
|
||||
|
||||
// Only validate complete entries.
|
||||
if !records.is_empty() {
|
||||
// Safety: the records are guaranteed not empty if the key exists.
|
||||
let last_record = records.last().unwrap();
|
||||
let legal = match last_record.meta.tp {
|
||||
// Legal if this record follows a First record.
|
||||
RecordType::First => seq == 1,
|
||||
// Legal if this record follows a Middle record just prior to this record.
|
||||
RecordType::Middle(last_seq) => last_seq + 1 == seq,
|
||||
// Illegal sequence.
|
||||
_ => false,
|
||||
};
|
||||
ensure!(
|
||||
legal,
|
||||
IllegalSequenceSnafu {
|
||||
error: format!(
|
||||
"Illegal sequence of a middle record, last record: {:?}, incoming record: {:?}",
|
||||
last_record.meta.tp,
|
||||
record.meta.tp
|
||||
)
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
records.push(record);
|
||||
prefix.push(record);
|
||||
}
|
||||
RecordType::Last => {
|
||||
let region_id = record.meta.ns.region_id.into();
|
||||
if let Some(mut records) = buffered_records.remove(®ion_id) {
|
||||
records.push(record);
|
||||
entry = Some(convert_to_multiple_entry(
|
||||
provider.clone(),
|
||||
region_id,
|
||||
records,
|
||||
))
|
||||
} else {
|
||||
// Incomplete entry
|
||||
entry = Some(convert_to_multiple_entry(
|
||||
provider.clone(),
|
||||
region_id,
|
||||
vec![record],
|
||||
))
|
||||
}
|
||||
// There must have a sequence prefix before a Last record is read.
|
||||
let mut records =
|
||||
entry_records
|
||||
.remove(&record.meta.entry_id)
|
||||
.context(IllegalSequenceSnafu {
|
||||
error: "Missing prefix for a Last record",
|
||||
})?;
|
||||
records.push(record);
|
||||
entry = Some(EntryImpl::from(records));
|
||||
}
|
||||
}
|
||||
Ok(entry)
|
||||
@@ -374,141 +300,278 @@ pub(crate) fn maybe_emit_entry(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::*;
|
||||
use crate::error;
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_wal::config::kafka::DatanodeKafkaConfig;
|
||||
use common_wal::test_util::run_test_with_kafka_wal;
|
||||
use uuid::Uuid;
|
||||
|
||||
fn new_test_record(tp: RecordType, entry_id: EntryId, region_id: u64, data: Vec<u8>) -> Record {
|
||||
Record {
|
||||
meta: RecordMeta {
|
||||
version: VERSION,
|
||||
tp,
|
||||
ns: NamespaceImpl {
|
||||
region_id,
|
||||
topic: "greptimedb_wal_topic".to_string(),
|
||||
use super::*;
|
||||
use crate::kafka::client_manager::ClientManager;
|
||||
|
||||
// Implements some utility methods for testing.
|
||||
impl Default for Record {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
meta: RecordMeta {
|
||||
version: VERSION,
|
||||
tp: RecordType::Full,
|
||||
ns: NamespaceImpl {
|
||||
region_id: 0,
|
||||
topic: "greptimedb_wal_topic".to_string(),
|
||||
},
|
||||
entry_id: 0,
|
||||
},
|
||||
entry_id,
|
||||
},
|
||||
data,
|
||||
data: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Record {
|
||||
/// Overrides tp.
|
||||
fn with_tp(&self, tp: RecordType) -> Self {
|
||||
Self {
|
||||
meta: RecordMeta {
|
||||
tp,
|
||||
..self.meta.clone()
|
||||
},
|
||||
..self.clone()
|
||||
}
|
||||
}
|
||||
|
||||
/// Overrides data with the given data.
|
||||
fn with_data(&self, data: &[u8]) -> Self {
|
||||
Self {
|
||||
data: data.to_vec(),
|
||||
..self.clone()
|
||||
}
|
||||
}
|
||||
|
||||
/// Overrides entry id.
|
||||
fn with_entry_id(&self, entry_id: EntryId) -> Self {
|
||||
Self {
|
||||
meta: RecordMeta {
|
||||
entry_id,
|
||||
..self.meta.clone()
|
||||
},
|
||||
..self.clone()
|
||||
}
|
||||
}
|
||||
|
||||
/// Overrides namespace.
|
||||
fn with_ns(&self, ns: NamespaceImpl) -> Self {
|
||||
Self {
|
||||
meta: RecordMeta { ns, ..self.meta },
|
||||
..self.clone()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn new_test_entry<D: AsRef<[u8]>>(data: D, entry_id: EntryId, ns: NamespaceImpl) -> EntryImpl {
|
||||
EntryImpl {
|
||||
data: data.as_ref().to_vec(),
|
||||
id: entry_id,
|
||||
ns,
|
||||
}
|
||||
}
|
||||
|
||||
/// Tests that the `build_records` works as expected.
|
||||
#[test]
|
||||
fn test_maybe_emit_entry_emit_naive_entry() {
|
||||
let provider = Arc::new(KafkaProvider::new("my_topic".to_string()));
|
||||
let region_id = RegionId::new(1, 1);
|
||||
let mut buffer = HashMap::new();
|
||||
let record = new_test_record(RecordType::Full, 1, region_id.as_u64(), vec![1; 100]);
|
||||
let entry = maybe_emit_entry(&provider, record, &mut buffer)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
entry,
|
||||
Entry::Naive(NaiveEntry {
|
||||
provider: Provider::Kafka(provider),
|
||||
region_id,
|
||||
fn test_build_records() {
|
||||
let max_record_size = 128;
|
||||
|
||||
// On a small entry.
|
||||
let ns = NamespaceImpl {
|
||||
region_id: 1,
|
||||
topic: "greptimedb_wal_topic".to_string(),
|
||||
};
|
||||
let entry = new_test_entry([b'1'; 100], 0, ns.clone());
|
||||
let records = build_records(entry.clone(), max_record_size);
|
||||
assert!(records.len() == 1);
|
||||
assert_eq!(entry.data, records[0].data);
|
||||
|
||||
// On a large entry.
|
||||
let entry = new_test_entry([b'1'; 150], 0, ns.clone());
|
||||
let records = build_records(entry.clone(), max_record_size);
|
||||
assert!(records.len() == 2);
|
||||
assert_eq!(&records[0].data, &[b'1'; 128]);
|
||||
assert_eq!(&records[1].data, &[b'1'; 22]);
|
||||
|
||||
// On a way-too large entry.
|
||||
let entry = new_test_entry([b'1'; 5000], 0, ns.clone());
|
||||
let records = build_records(entry.clone(), max_record_size);
|
||||
let matched = entry
|
||||
.data
|
||||
.chunks(max_record_size)
|
||||
.enumerate()
|
||||
.all(|(i, chunk)| records[i].data == chunk);
|
||||
assert!(matched);
|
||||
}
|
||||
|
||||
/// Tests that Record and KafkaRecord are able to be converted back and forth.
|
||||
#[test]
|
||||
fn test_record_conversion() {
|
||||
let record = Record {
|
||||
meta: RecordMeta {
|
||||
version: VERSION,
|
||||
tp: RecordType::Full,
|
||||
entry_id: 1,
|
||||
data: vec![1; 100]
|
||||
})
|
||||
ns: NamespaceImpl {
|
||||
region_id: 1,
|
||||
topic: "greptimedb_wal_topic".to_string(),
|
||||
},
|
||||
},
|
||||
data: b"12345".to_vec(),
|
||||
};
|
||||
let kafka_record: KafkaRecord = record.clone().try_into().unwrap();
|
||||
let got = Record::try_from(kafka_record).unwrap();
|
||||
assert_eq!(record, got);
|
||||
}
|
||||
|
||||
/// Tests that the reconstruction of an entry works as expected.
|
||||
#[test]
|
||||
fn test_reconstruct_entry() {
|
||||
let template = Record::default();
|
||||
let records = vec![
|
||||
template.with_data(b"111").with_tp(RecordType::First),
|
||||
template.with_data(b"222").with_tp(RecordType::Middle(1)),
|
||||
template.with_data(b"333").with_tp(RecordType::Last),
|
||||
];
|
||||
let entry = EntryImpl::from(records.clone());
|
||||
assert_eq!(records[0].meta.entry_id, entry.id);
|
||||
assert_eq!(records[0].meta.ns, entry.ns);
|
||||
assert_eq!(
|
||||
entry.data,
|
||||
records
|
||||
.into_iter()
|
||||
.flat_map(|record| record.data)
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
}
|
||||
|
||||
/// Tests that `maybe_emit_entry` works as expected.
|
||||
/// This test does not check for illegal record sequences since they're already tested in the `test_check_records` test.
|
||||
#[test]
|
||||
fn test_maybe_emit_entry_emit_incomplete_entry() {
|
||||
let provider = Arc::new(KafkaProvider::new("my_topic".to_string()));
|
||||
let region_id = RegionId::new(1, 1);
|
||||
// `First` overwrite `First`
|
||||
let mut buffer = HashMap::new();
|
||||
let record = new_test_record(RecordType::First, 1, region_id.as_u64(), vec![1; 100]);
|
||||
assert!(maybe_emit_entry(&provider, record, &mut buffer)
|
||||
.unwrap()
|
||||
.is_none());
|
||||
let record = new_test_record(RecordType::First, 2, region_id.as_u64(), vec![2; 100]);
|
||||
let incomplete_entry = maybe_emit_entry(&provider, record, &mut buffer)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
fn test_maybe_emit_entry() {
|
||||
let ns = NamespaceImpl {
|
||||
region_id: 1,
|
||||
topic: "greptimedb_wal_topic".to_string(),
|
||||
};
|
||||
let template = Record::default().with_ns(ns);
|
||||
let mut entry_records = HashMap::from([
|
||||
(
|
||||
1,
|
||||
vec![template.with_entry_id(1).with_tp(RecordType::First)],
|
||||
),
|
||||
(
|
||||
2,
|
||||
vec![template.with_entry_id(2).with_tp(RecordType::First)],
|
||||
),
|
||||
(
|
||||
3,
|
||||
vec![
|
||||
template.with_entry_id(3).with_tp(RecordType::First),
|
||||
template.with_entry_id(3).with_tp(RecordType::Middle(1)),
|
||||
],
|
||||
),
|
||||
]);
|
||||
|
||||
assert_eq!(
|
||||
incomplete_entry,
|
||||
Entry::MultiplePart(MultiplePartEntry {
|
||||
provider: Provider::Kafka(provider.clone()),
|
||||
region_id,
|
||||
// TODO(weny): always be 0.
|
||||
entry_id: 0,
|
||||
headers: vec![MultiplePartHeader::First],
|
||||
parts: vec![vec![1; 100]],
|
||||
})
|
||||
// A Full record arrives.
|
||||
let got = maybe_emit_entry(
|
||||
template.with_entry_id(0).with_tp(RecordType::Full),
|
||||
&mut entry_records,
|
||||
)
|
||||
.unwrap();
|
||||
assert!(got.is_some());
|
||||
|
||||
// A First record arrives with no prefix.
|
||||
let got = maybe_emit_entry(
|
||||
template.with_entry_id(0).with_tp(RecordType::First),
|
||||
&mut entry_records,
|
||||
)
|
||||
.unwrap();
|
||||
assert!(got.is_none());
|
||||
|
||||
// A First record arrives with some prefix.
|
||||
let got = maybe_emit_entry(
|
||||
template.with_entry_id(1).with_tp(RecordType::First),
|
||||
&mut entry_records,
|
||||
);
|
||||
assert!(got.is_err());
|
||||
|
||||
// `Last` overwrite `None`
|
||||
let mut buffer = HashMap::new();
|
||||
let record = new_test_record(RecordType::Last, 1, region_id.as_u64(), vec![1; 100]);
|
||||
let incomplete_entry = maybe_emit_entry(&provider, record, &mut buffer)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
// A Middle record arrives with legal prefix (First).
|
||||
let got = maybe_emit_entry(
|
||||
template.with_entry_id(2).with_tp(RecordType::Middle(1)),
|
||||
&mut entry_records,
|
||||
)
|
||||
.unwrap();
|
||||
assert!(got.is_none());
|
||||
|
||||
assert_eq!(
|
||||
incomplete_entry,
|
||||
Entry::MultiplePart(MultiplePartEntry {
|
||||
provider: Provider::Kafka(provider.clone()),
|
||||
region_id,
|
||||
// TODO(weny): always be 0.
|
||||
entry_id: 0,
|
||||
headers: vec![MultiplePartHeader::Last],
|
||||
parts: vec![vec![1; 100]],
|
||||
})
|
||||
// A Middle record arrives with legal prefix (Middle).
|
||||
let got = maybe_emit_entry(
|
||||
template.with_entry_id(2).with_tp(RecordType::Middle(2)),
|
||||
&mut entry_records,
|
||||
)
|
||||
.unwrap();
|
||||
assert!(got.is_none());
|
||||
|
||||
// A Middle record arrives with illegal prefix.
|
||||
let got = maybe_emit_entry(
|
||||
template.with_entry_id(2).with_tp(RecordType::Middle(1)),
|
||||
&mut entry_records,
|
||||
);
|
||||
assert!(got.is_err());
|
||||
|
||||
// `First` overwrite `Middle(0)`
|
||||
let mut buffer = HashMap::new();
|
||||
let record = new_test_record(RecordType::Middle(0), 1, region_id.as_u64(), vec![1; 100]);
|
||||
assert!(maybe_emit_entry(&provider, record, &mut buffer)
|
||||
.unwrap()
|
||||
.is_none());
|
||||
let record = new_test_record(RecordType::First, 2, region_id.as_u64(), vec![2; 100]);
|
||||
let incomplete_entry = maybe_emit_entry(&provider, record, &mut buffer)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
incomplete_entry,
|
||||
Entry::MultiplePart(MultiplePartEntry {
|
||||
provider: Provider::Kafka(provider),
|
||||
region_id,
|
||||
// TODO(weny): always be 0.
|
||||
entry_id: 0,
|
||||
headers: vec![MultiplePartHeader::Middle(0)],
|
||||
parts: vec![vec![1; 100]],
|
||||
})
|
||||
// A Middle record arrives with no prefix.
|
||||
let got = maybe_emit_entry(
|
||||
template.with_entry_id(22).with_tp(RecordType::Middle(1)),
|
||||
&mut entry_records,
|
||||
);
|
||||
assert!(got.is_err());
|
||||
|
||||
// A Last record arrives with no prefix.
|
||||
let got = maybe_emit_entry(
|
||||
template.with_entry_id(33).with_tp(RecordType::Last),
|
||||
&mut entry_records,
|
||||
);
|
||||
assert!(got.is_err());
|
||||
|
||||
// A Last record arrives with legal prefix.
|
||||
let got = maybe_emit_entry(
|
||||
template.with_entry_id(3).with_tp(RecordType::Last),
|
||||
&mut entry_records,
|
||||
)
|
||||
.unwrap();
|
||||
assert!(got.is_some());
|
||||
|
||||
// Check state.
|
||||
assert_eq!(entry_records.len(), 3);
|
||||
assert_eq!(entry_records[&0].len(), 1);
|
||||
assert_eq!(entry_records[&1].len(), 1);
|
||||
assert_eq!(entry_records[&2].len(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_maybe_emit_entry_illegal_seq() {
|
||||
let provider = Arc::new(KafkaProvider::new("my_topic".to_string()));
|
||||
let region_id = RegionId::new(1, 1);
|
||||
let mut buffer = HashMap::new();
|
||||
let record = new_test_record(RecordType::First, 1, region_id.as_u64(), vec![1; 100]);
|
||||
assert!(maybe_emit_entry(&provider, record, &mut buffer)
|
||||
.unwrap()
|
||||
.is_none());
|
||||
let record = new_test_record(RecordType::Middle(2), 1, region_id.as_u64(), vec![2; 100]);
|
||||
let err = maybe_emit_entry(&provider, record, &mut buffer).unwrap_err();
|
||||
assert_matches!(err, error::Error::IllegalSequence { .. });
|
||||
|
||||
let mut buffer = HashMap::new();
|
||||
let record = new_test_record(RecordType::First, 1, region_id.as_u64(), vec![1; 100]);
|
||||
assert!(maybe_emit_entry(&provider, record, &mut buffer)
|
||||
.unwrap()
|
||||
.is_none());
|
||||
let record = new_test_record(RecordType::Middle(1), 1, region_id.as_u64(), vec![2; 100]);
|
||||
assert!(maybe_emit_entry(&provider, record, &mut buffer)
|
||||
.unwrap()
|
||||
.is_none());
|
||||
let record = new_test_record(RecordType::Middle(3), 1, region_id.as_u64(), vec![2; 100]);
|
||||
let err = maybe_emit_entry(&provider, record, &mut buffer).unwrap_err();
|
||||
assert_matches!(err, error::Error::IllegalSequence { .. });
|
||||
#[tokio::test]
|
||||
async fn test_produce_large_entry() {
|
||||
run_test_with_kafka_wal(|broker_endpoints| {
|
||||
Box::pin(async {
|
||||
let topic = format!("greptimedb_wal_topic_{}", Uuid::new_v4());
|
||||
let ns = NamespaceImpl {
|
||||
region_id: 1,
|
||||
topic,
|
||||
};
|
||||
let entry = new_test_entry([b'1'; 2000000], 0, ns.clone());
|
||||
let producer = RecordProducer::new(ns.clone()).with_entries(vec![entry]);
|
||||
let config = DatanodeKafkaConfig {
|
||||
broker_endpoints,
|
||||
max_batch_size: ReadableSize::mb(1),
|
||||
..Default::default()
|
||||
};
|
||||
let manager = Arc::new(ClientManager::try_new(&config).await.unwrap());
|
||||
producer.produce(&manager).await.unwrap();
|
||||
})
|
||||
})
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,10 +14,12 @@
|
||||
|
||||
#![feature(let_chains)]
|
||||
#![feature(io_error_more)]
|
||||
#![feature(assert_matches)]
|
||||
|
||||
pub mod error;
|
||||
pub mod kafka;
|
||||
pub mod metrics;
|
||||
mod noop;
|
||||
pub mod raft_engine;
|
||||
pub mod test_util;
|
||||
|
||||
pub use noop::NoopLogStore;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user