mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-10 15:22:56 +00:00
Compare commits
11 Commits
v0.8.1
...
avoid-quer
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1bfba48755 | ||
|
|
457998f0fe | ||
|
|
b02c256157 | ||
|
|
45fee948e9 | ||
|
|
ea49f8a5c4 | ||
|
|
43afea1a9d | ||
|
|
fcfcf86385 | ||
|
|
26b112ab57 | ||
|
|
24612f62dd | ||
|
|
85a231850d | ||
|
|
f024054ed3 |
9
.github/workflows/develop.yml
vendored
9
.github/workflows/develop.yml
vendored
@@ -212,7 +212,14 @@ jobs:
|
|||||||
path: .
|
path: .
|
||||||
- name: Unzip binaries
|
- name: Unzip binaries
|
||||||
run: tar -xvf ./bins.tar.gz
|
run: tar -xvf ./bins.tar.gz
|
||||||
- name: Fuzz Test
|
- name: Build Fuzz Test
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
cd tests-fuzz &
|
||||||
|
cargo install cargo-gc-bin &
|
||||||
|
cargo gc &
|
||||||
|
cd ..
|
||||||
|
- name: Run Fuzz Test
|
||||||
uses: ./.github/actions/fuzz-test
|
uses: ./.github/actions/fuzz-test
|
||||||
env:
|
env:
|
||||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||||
|
|||||||
257
Cargo.lock
generated
257
Cargo.lock
generated
@@ -83,7 +83,7 @@ dependencies = [
|
|||||||
"axum",
|
"axum",
|
||||||
"bytes",
|
"bytes",
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
"http 0.2.12",
|
"http",
|
||||||
"indexmap 1.9.3",
|
"indexmap 1.9.3",
|
||||||
"schemars",
|
"schemars",
|
||||||
"serde",
|
"serde",
|
||||||
@@ -764,9 +764,9 @@ dependencies = [
|
|||||||
"bytes",
|
"bytes",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"headers",
|
"headers",
|
||||||
"http 0.2.12",
|
"http",
|
||||||
"http-body 0.4.6",
|
"http-body",
|
||||||
"hyper 0.14.28",
|
"hyper",
|
||||||
"itoa",
|
"itoa",
|
||||||
"matchit",
|
"matchit",
|
||||||
"memchr",
|
"memchr",
|
||||||
@@ -794,8 +794,8 @@ dependencies = [
|
|||||||
"async-trait",
|
"async-trait",
|
||||||
"bytes",
|
"bytes",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"http 0.2.12",
|
"http",
|
||||||
"http-body 0.4.6",
|
"http-body",
|
||||||
"mime",
|
"mime",
|
||||||
"rustversion",
|
"rustversion",
|
||||||
"tower-layer",
|
"tower-layer",
|
||||||
@@ -1227,6 +1227,7 @@ dependencies = [
|
|||||||
"common-meta",
|
"common-meta",
|
||||||
"moka",
|
"moka",
|
||||||
"snafu 0.8.2",
|
"snafu 0.8.2",
|
||||||
|
"substrait 0.8.1",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -1260,6 +1261,8 @@ dependencies = [
|
|||||||
"arrow-schema",
|
"arrow-schema",
|
||||||
"async-stream",
|
"async-stream",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
"bytes",
|
||||||
|
"cache",
|
||||||
"catalog",
|
"catalog",
|
||||||
"chrono",
|
"chrono",
|
||||||
"common-catalog",
|
"common-catalog",
|
||||||
@@ -1851,7 +1854,7 @@ dependencies = [
|
|||||||
"common-telemetry",
|
"common-telemetry",
|
||||||
"common-test-util",
|
"common-test-util",
|
||||||
"common-version",
|
"common-version",
|
||||||
"hyper 0.14.28",
|
"hyper",
|
||||||
"reqwest",
|
"reqwest",
|
||||||
"serde",
|
"serde",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
@@ -1960,7 +1963,7 @@ dependencies = [
|
|||||||
"futures-util",
|
"futures-util",
|
||||||
"hex",
|
"hex",
|
||||||
"humantime-serde",
|
"humantime-serde",
|
||||||
"hyper 0.14.28",
|
"hyper",
|
||||||
"itertools 0.10.5",
|
"itertools 0.10.5",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"moka",
|
"moka",
|
||||||
@@ -2026,6 +2029,7 @@ version = "0.8.1"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
"bytes",
|
||||||
"common-base",
|
"common-base",
|
||||||
"common-error",
|
"common-error",
|
||||||
"common-macro",
|
"common-macro",
|
||||||
@@ -3197,7 +3201,6 @@ dependencies = [
|
|||||||
"session",
|
"session",
|
||||||
"snafu 0.8.2",
|
"snafu 0.8.2",
|
||||||
"store-api",
|
"store-api",
|
||||||
"substrait 0.8.1",
|
|
||||||
"table",
|
"table",
|
||||||
"tokio",
|
"tokio",
|
||||||
"toml 0.8.12",
|
"toml 0.8.12",
|
||||||
@@ -3522,6 +3525,15 @@ version = "0.3.6"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f"
|
checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "encoding_rs"
|
||||||
|
version = "0.8.34"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "endian-type"
|
name = "endian-type"
|
||||||
version = "0.1.2"
|
version = "0.1.2"
|
||||||
@@ -3592,7 +3604,7 @@ name = "etcd-client"
|
|||||||
version = "0.12.4"
|
version = "0.12.4"
|
||||||
source = "git+https://github.com/MichaelScofield/etcd-client.git?rev=4c371e9b3ea8e0a8ee2f9cbd7ded26e54a45df3b#4c371e9b3ea8e0a8ee2f9cbd7ded26e54a45df3b"
|
source = "git+https://github.com/MichaelScofield/etcd-client.git?rev=4c371e9b3ea8e0a8ee2f9cbd7ded26e54a45df3b#4c371e9b3ea8e0a8ee2f9cbd7ded26e54a45df3b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"http 0.2.12",
|
"http",
|
||||||
"prost 0.12.4",
|
"prost 0.12.4",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-stream",
|
"tokio-stream",
|
||||||
@@ -4185,7 +4197,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "greptime-proto"
|
name = "greptime-proto"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=902f75fdd170c572e90b1f640161d90995f20218#902f75fdd170c572e90b1f640161d90995f20218"
|
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=ae26136accd82fbdf8be540cd502f2e94951077e#ae26136accd82fbdf8be540cd502f2e94951077e"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"prost 0.12.4",
|
"prost 0.12.4",
|
||||||
"serde",
|
"serde",
|
||||||
@@ -4207,7 +4219,7 @@ dependencies = [
|
|||||||
"futures-core",
|
"futures-core",
|
||||||
"futures-sink",
|
"futures-sink",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"http 0.2.12",
|
"http",
|
||||||
"indexmap 2.2.6",
|
"indexmap 2.2.6",
|
||||||
"slab",
|
"slab",
|
||||||
"tokio",
|
"tokio",
|
||||||
@@ -4291,7 +4303,7 @@ dependencies = [
|
|||||||
"base64 0.21.7",
|
"base64 0.21.7",
|
||||||
"bytes",
|
"bytes",
|
||||||
"headers-core",
|
"headers-core",
|
||||||
"http 0.2.12",
|
"http",
|
||||||
"httpdate",
|
"httpdate",
|
||||||
"mime",
|
"mime",
|
||||||
"sha1",
|
"sha1",
|
||||||
@@ -4303,7 +4315,7 @@ version = "0.2.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429"
|
checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"http 0.2.12",
|
"http",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -4406,17 +4418,6 @@ dependencies = [
|
|||||||
"itoa",
|
"itoa",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "http"
|
|
||||||
version = "1.1.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258"
|
|
||||||
dependencies = [
|
|
||||||
"bytes",
|
|
||||||
"fnv",
|
|
||||||
"itoa",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "http-body"
|
name = "http-body"
|
||||||
version = "0.4.6"
|
version = "0.4.6"
|
||||||
@@ -4424,30 +4425,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2"
|
checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"http 0.2.12",
|
"http",
|
||||||
"pin-project-lite",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "http-body"
|
|
||||||
version = "1.0.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643"
|
|
||||||
dependencies = [
|
|
||||||
"bytes",
|
|
||||||
"http 1.1.0",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "http-body-util"
|
|
||||||
version = "0.1.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d"
|
|
||||||
dependencies = [
|
|
||||||
"bytes",
|
|
||||||
"futures-core",
|
|
||||||
"http 1.1.0",
|
|
||||||
"http-body 1.0.0",
|
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -4604,8 +4582,8 @@ dependencies = [
|
|||||||
"futures-core",
|
"futures-core",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"h2",
|
"h2",
|
||||||
"http 0.2.12",
|
"http",
|
||||||
"http-body 0.4.6",
|
"http-body",
|
||||||
"httparse",
|
"httparse",
|
||||||
"httpdate",
|
"httpdate",
|
||||||
"itoa",
|
"itoa",
|
||||||
@@ -4617,40 +4595,18 @@ dependencies = [
|
|||||||
"want",
|
"want",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "hyper"
|
|
||||||
version = "1.3.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d"
|
|
||||||
dependencies = [
|
|
||||||
"bytes",
|
|
||||||
"futures-channel",
|
|
||||||
"futures-util",
|
|
||||||
"http 1.1.0",
|
|
||||||
"http-body 1.0.0",
|
|
||||||
"httparse",
|
|
||||||
"itoa",
|
|
||||||
"pin-project-lite",
|
|
||||||
"smallvec",
|
|
||||||
"tokio",
|
|
||||||
"want",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hyper-rustls"
|
name = "hyper-rustls"
|
||||||
version = "0.26.0"
|
version = "0.24.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c"
|
checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"http 1.1.0",
|
"http",
|
||||||
"hyper 1.3.1",
|
"hyper",
|
||||||
"hyper-util",
|
"rustls 0.21.12",
|
||||||
"rustls 0.22.4",
|
|
||||||
"rustls-pki-types",
|
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-rustls 0.25.0",
|
"tokio-rustls 0.24.1",
|
||||||
"tower-service",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -4659,32 +4615,12 @@ version = "0.4.1"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1"
|
checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"hyper 0.14.28",
|
"hyper",
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-io-timeout",
|
"tokio-io-timeout",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "hyper-util"
|
|
||||||
version = "0.1.3"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa"
|
|
||||||
dependencies = [
|
|
||||||
"bytes",
|
|
||||||
"futures-channel",
|
|
||||||
"futures-util",
|
|
||||||
"http 1.1.0",
|
|
||||||
"http-body 1.0.0",
|
|
||||||
"hyper 1.3.1",
|
|
||||||
"pin-project-lite",
|
|
||||||
"socket2 0.5.7",
|
|
||||||
"tokio",
|
|
||||||
"tower",
|
|
||||||
"tower-service",
|
|
||||||
"tracing",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "iana-time-zone"
|
name = "iana-time-zone"
|
||||||
version = "0.1.60"
|
version = "0.1.60"
|
||||||
@@ -5668,7 +5604,7 @@ dependencies = [
|
|||||||
"etcd-client",
|
"etcd-client",
|
||||||
"futures",
|
"futures",
|
||||||
"h2",
|
"h2",
|
||||||
"http-body 0.4.6",
|
"http-body",
|
||||||
"humantime",
|
"humantime",
|
||||||
"humantime-serde",
|
"humantime-serde",
|
||||||
"itertools 0.10.5",
|
"itertools 0.10.5",
|
||||||
@@ -6430,6 +6366,7 @@ name = "object-store"
|
|||||||
version = "0.8.1"
|
version = "0.8.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
|
"async-trait",
|
||||||
"bytes",
|
"bytes",
|
||||||
"common-telemetry",
|
"common-telemetry",
|
||||||
"common-test-util",
|
"common-test-util",
|
||||||
@@ -6478,21 +6415,20 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "opendal"
|
name = "opendal"
|
||||||
version = "0.46.0"
|
version = "0.45.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "328c4992328e8965e6a6ef102d38438b5fdc7d9b9107eda2377ba05379d9d544"
|
checksum = "52c17c077f23fa2d2c25d9d22af98baa43b8bbe2ef0de80cf66339aa70401467"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"backon",
|
"backon",
|
||||||
"base64 0.22.1",
|
"base64 0.21.7",
|
||||||
"bytes",
|
"bytes",
|
||||||
"chrono",
|
"chrono",
|
||||||
"crc32c",
|
|
||||||
"flagset",
|
"flagset",
|
||||||
"futures",
|
"futures",
|
||||||
"getrandom",
|
"getrandom",
|
||||||
"http 1.1.0",
|
"http",
|
||||||
"log",
|
"log",
|
||||||
"md-5",
|
"md-5",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
@@ -6580,7 +6516,7 @@ checksum = "f24cda83b20ed2433c68241f918d0f6fdec8b1d43b7a9590ab4420c5095ca930"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"futures-core",
|
"futures-core",
|
||||||
"http 0.2.12",
|
"http",
|
||||||
"opentelemetry 0.21.0",
|
"opentelemetry 0.21.0",
|
||||||
"opentelemetry-proto 0.4.0",
|
"opentelemetry-proto 0.4.0",
|
||||||
"opentelemetry-semantic-conventions",
|
"opentelemetry-semantic-conventions",
|
||||||
@@ -6717,7 +6653,6 @@ dependencies = [
|
|||||||
"substrait 0.8.1",
|
"substrait 0.8.1",
|
||||||
"table",
|
"table",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-util",
|
|
||||||
"tonic 0.11.0",
|
"tonic 0.11.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -7586,23 +7521,17 @@ name = "promql"
|
|||||||
version = "0.8.1"
|
version = "0.8.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.11",
|
"ahash 0.8.11",
|
||||||
"async-recursion",
|
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"bytemuck",
|
"bytemuck",
|
||||||
"catalog",
|
|
||||||
"common-catalog",
|
|
||||||
"common-error",
|
"common-error",
|
||||||
"common-macro",
|
"common-macro",
|
||||||
"common-query",
|
|
||||||
"common-recordbatch",
|
"common-recordbatch",
|
||||||
"common-telemetry",
|
"common-telemetry",
|
||||||
"datafusion 37.0.0",
|
"datafusion 37.0.0",
|
||||||
"datafusion-expr 37.0.0",
|
"datafusion-expr 37.0.0",
|
||||||
"datafusion-functions 37.0.0",
|
|
||||||
"datatypes",
|
"datatypes",
|
||||||
"futures",
|
"futures",
|
||||||
"greptime-proto",
|
"greptime-proto",
|
||||||
"itertools 0.10.5",
|
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"prometheus",
|
"prometheus",
|
||||||
"promql-parser",
|
"promql-parser",
|
||||||
@@ -7610,7 +7539,6 @@ dependencies = [
|
|||||||
"query",
|
"query",
|
||||||
"session",
|
"session",
|
||||||
"snafu 0.8.2",
|
"snafu 0.8.2",
|
||||||
"table",
|
|
||||||
"tokio",
|
"tokio",
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -7918,6 +7846,7 @@ dependencies = [
|
|||||||
"async-recursion",
|
"async-recursion",
|
||||||
"async-stream",
|
"async-stream",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
"bytes",
|
||||||
"catalog",
|
"catalog",
|
||||||
"chrono",
|
"chrono",
|
||||||
"common-base",
|
"common-base",
|
||||||
@@ -7930,11 +7859,13 @@ dependencies = [
|
|||||||
"common-plugins",
|
"common-plugins",
|
||||||
"common-query",
|
"common-query",
|
||||||
"common-recordbatch",
|
"common-recordbatch",
|
||||||
|
"common-runtime",
|
||||||
"common-telemetry",
|
"common-telemetry",
|
||||||
"common-time",
|
"common-time",
|
||||||
"datafusion 37.0.0",
|
"datafusion 37.0.0",
|
||||||
"datafusion-common 37.0.0",
|
"datafusion-common 37.0.0",
|
||||||
"datafusion-expr 37.0.0",
|
"datafusion-expr 37.0.0",
|
||||||
|
"datafusion-functions 37.0.0",
|
||||||
"datafusion-optimizer 37.0.0",
|
"datafusion-optimizer 37.0.0",
|
||||||
"datafusion-physical-expr 37.0.0",
|
"datafusion-physical-expr 37.0.0",
|
||||||
"datafusion-sql 37.0.0",
|
"datafusion-sql 37.0.0",
|
||||||
@@ -7944,6 +7875,7 @@ dependencies = [
|
|||||||
"futures-util",
|
"futures-util",
|
||||||
"greptime-proto",
|
"greptime-proto",
|
||||||
"humantime",
|
"humantime",
|
||||||
|
"itertools 0.10.5",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"meter-core",
|
"meter-core",
|
||||||
"meter-macros",
|
"meter-macros",
|
||||||
@@ -7955,6 +7887,7 @@ dependencies = [
|
|||||||
"prometheus",
|
"prometheus",
|
||||||
"promql",
|
"promql",
|
||||||
"promql-parser",
|
"promql-parser",
|
||||||
|
"prost 0.12.4",
|
||||||
"rand",
|
"rand",
|
||||||
"regex",
|
"regex",
|
||||||
"session",
|
"session",
|
||||||
@@ -8263,20 +8196,20 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "reqsign"
|
name = "reqsign"
|
||||||
version = "0.15.0"
|
version = "0.14.9"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "01edce6b6c31a16ebc7525ac58c747a6d78bbce33e76bbebd350d6bc25b23e06"
|
checksum = "43e319d9de9ff4d941abf4ac718897118b0fe04577ea3f8e0f5788971784eef5"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"base64 0.22.1",
|
"base64 0.21.7",
|
||||||
"chrono",
|
"chrono",
|
||||||
"form_urlencoded",
|
"form_urlencoded",
|
||||||
"getrandom",
|
"getrandom",
|
||||||
"hex",
|
"hex",
|
||||||
"hmac",
|
"hmac",
|
||||||
"home",
|
"home",
|
||||||
"http 1.1.0",
|
"http",
|
||||||
"jsonwebtoken",
|
"jsonwebtoken",
|
||||||
"log",
|
"log",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
@@ -8285,7 +8218,7 @@ dependencies = [
|
|||||||
"rand",
|
"rand",
|
||||||
"reqwest",
|
"reqwest",
|
||||||
"rsa 0.9.6",
|
"rsa 0.9.6",
|
||||||
"rust-ini 0.21.0",
|
"rust-ini 0.20.0",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"sha1",
|
"sha1",
|
||||||
@@ -8294,20 +8227,20 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "reqwest"
|
name = "reqwest"
|
||||||
version = "0.12.4"
|
version = "0.11.27"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "566cafdd92868e0939d3fb961bd0dc25fcfaaed179291093b3d43e6b3150ea10"
|
checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"base64 0.22.1",
|
"base64 0.21.7",
|
||||||
"bytes",
|
"bytes",
|
||||||
|
"encoding_rs",
|
||||||
"futures-core",
|
"futures-core",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"http 1.1.0",
|
"h2",
|
||||||
"http-body 1.0.0",
|
"http",
|
||||||
"http-body-util",
|
"http-body",
|
||||||
"hyper 1.3.1",
|
"hyper",
|
||||||
"hyper-rustls",
|
"hyper-rustls",
|
||||||
"hyper-util",
|
|
||||||
"ipnet",
|
"ipnet",
|
||||||
"js-sys",
|
"js-sys",
|
||||||
"log",
|
"log",
|
||||||
@@ -8316,16 +8249,16 @@ dependencies = [
|
|||||||
"once_cell",
|
"once_cell",
|
||||||
"percent-encoding",
|
"percent-encoding",
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
"rustls 0.22.4",
|
"rustls 0.21.12",
|
||||||
"rustls-native-certs",
|
"rustls-native-certs",
|
||||||
"rustls-pemfile 2.1.2",
|
"rustls-pemfile 1.0.4",
|
||||||
"rustls-pki-types",
|
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"serde_urlencoded",
|
"serde_urlencoded",
|
||||||
"sync_wrapper",
|
"sync_wrapper",
|
||||||
|
"system-configuration",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-rustls 0.25.0",
|
"tokio-rustls 0.24.1",
|
||||||
"tokio-util",
|
"tokio-util",
|
||||||
"tower-service",
|
"tower-service",
|
||||||
"url",
|
"url",
|
||||||
@@ -8333,8 +8266,7 @@ dependencies = [
|
|||||||
"wasm-bindgen-futures",
|
"wasm-bindgen-futures",
|
||||||
"wasm-streams",
|
"wasm-streams",
|
||||||
"web-sys",
|
"web-sys",
|
||||||
"webpki-roots 0.26.1",
|
"winreg 0.50.0",
|
||||||
"winreg 0.52.0",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -8600,13 +8532,12 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rust-ini"
|
name = "rust-ini"
|
||||||
version = "0.21.0"
|
version = "0.20.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "0d625ed57d8f49af6cfa514c42e1a71fadcff60eb0b1c517ff82fe41aa025b41"
|
checksum = "3e0698206bcb8882bf2a9ecb4c1e7785db57ff052297085a6efd4fe42302068a"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
"ordered-multimap 0.7.3",
|
"ordered-multimap 0.7.3",
|
||||||
"trim-in-place",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -8748,13 +8679,12 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rustls-native-certs"
|
name = "rustls-native-certs"
|
||||||
version = "0.7.0"
|
version = "0.6.3"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792"
|
checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"openssl-probe",
|
"openssl-probe",
|
||||||
"rustls-pemfile 2.1.2",
|
"rustls-pemfile 1.0.4",
|
||||||
"rustls-pki-types",
|
|
||||||
"schannel",
|
"schannel",
|
||||||
"security-framework",
|
"security-framework",
|
||||||
]
|
]
|
||||||
@@ -9587,10 +9517,10 @@ dependencies = [
|
|||||||
"hashbrown 0.14.5",
|
"hashbrown 0.14.5",
|
||||||
"headers",
|
"headers",
|
||||||
"hostname",
|
"hostname",
|
||||||
"http 0.2.12",
|
"http",
|
||||||
"http-body 0.4.6",
|
"http-body",
|
||||||
"humantime-serde",
|
"humantime-serde",
|
||||||
"hyper 0.14.28",
|
"hyper",
|
||||||
"influxdb_line_protocol",
|
"influxdb_line_protocol",
|
||||||
"itertools 0.10.5",
|
"itertools 0.10.5",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
@@ -10370,9 +10300,7 @@ version = "0.8.1"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"bytes",
|
"bytes",
|
||||||
"catalog",
|
|
||||||
"common-error",
|
"common-error",
|
||||||
"common-function",
|
|
||||||
"common-macro",
|
"common-macro",
|
||||||
"common-telemetry",
|
"common-telemetry",
|
||||||
"datafusion 37.0.0",
|
"datafusion 37.0.0",
|
||||||
@@ -10382,7 +10310,6 @@ dependencies = [
|
|||||||
"datatypes",
|
"datatypes",
|
||||||
"promql",
|
"promql",
|
||||||
"prost 0.12.4",
|
"prost 0.12.4",
|
||||||
"session",
|
|
||||||
"snafu 0.8.2",
|
"snafu 0.8.2",
|
||||||
"substrait 0.17.1",
|
"substrait 0.17.1",
|
||||||
"tokio",
|
"tokio",
|
||||||
@@ -10559,6 +10486,7 @@ dependencies = [
|
|||||||
name = "table"
|
name = "table"
|
||||||
version = "0.8.1"
|
version = "0.8.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"chrono",
|
"chrono",
|
||||||
"common-base",
|
"common-base",
|
||||||
@@ -11216,9 +11144,9 @@ dependencies = [
|
|||||||
"futures-core",
|
"futures-core",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"h2",
|
"h2",
|
||||||
"http 0.2.12",
|
"http",
|
||||||
"http-body 0.4.6",
|
"http-body",
|
||||||
"hyper 0.14.28",
|
"hyper",
|
||||||
"hyper-timeout",
|
"hyper-timeout",
|
||||||
"percent-encoding",
|
"percent-encoding",
|
||||||
"pin-project",
|
"pin-project",
|
||||||
@@ -11244,9 +11172,9 @@ dependencies = [
|
|||||||
"bytes",
|
"bytes",
|
||||||
"flate2",
|
"flate2",
|
||||||
"h2",
|
"h2",
|
||||||
"http 0.2.12",
|
"http",
|
||||||
"http-body 0.4.6",
|
"http-body",
|
||||||
"hyper 0.14.28",
|
"hyper",
|
||||||
"hyper-timeout",
|
"hyper-timeout",
|
||||||
"percent-encoding",
|
"percent-encoding",
|
||||||
"pin-project",
|
"pin-project",
|
||||||
@@ -11348,8 +11276,8 @@ dependencies = [
|
|||||||
"bytes",
|
"bytes",
|
||||||
"futures-core",
|
"futures-core",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"http 0.2.12",
|
"http",
|
||||||
"http-body 0.4.6",
|
"http-body",
|
||||||
"http-range-header",
|
"http-range-header",
|
||||||
"httpdate",
|
"httpdate",
|
||||||
"iri-string",
|
"iri-string",
|
||||||
@@ -11592,12 +11520,6 @@ dependencies = [
|
|||||||
"tree-sitter",
|
"tree-sitter",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "trim-in-place"
|
|
||||||
version = "0.1.7"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "343e926fc669bc8cde4fa3129ab681c63671bae288b1f1081ceee6d9d37904fc"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "triomphe"
|
name = "triomphe"
|
||||||
version = "0.1.11"
|
version = "0.1.11"
|
||||||
@@ -12263,15 +12185,6 @@ version = "0.25.4"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1"
|
checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "webpki-roots"
|
|
||||||
version = "0.26.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "b3de34ae270483955a94f4b21bdaaeb83d508bb84a01435f393818edb0012009"
|
|
||||||
dependencies = [
|
|
||||||
"rustls-pki-types",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "which"
|
name = "which"
|
||||||
version = "4.4.2"
|
version = "4.4.2"
|
||||||
@@ -12638,9 +12551,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "winreg"
|
name = "winreg"
|
||||||
version = "0.52.0"
|
version = "0.50.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5"
|
checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
"windows-sys 0.48.0",
|
"windows-sys 0.48.0",
|
||||||
|
|||||||
@@ -120,7 +120,7 @@ etcd-client = { git = "https://github.com/MichaelScofield/etcd-client.git", rev
|
|||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "902f75fdd170c572e90b1f640161d90995f20218" }
|
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ae26136accd82fbdf8be540cd502f2e94951077e" }
|
||||||
humantime = "2.1"
|
humantime = "2.1"
|
||||||
humantime-serde = "1.1"
|
humantime-serde = "1.1"
|
||||||
itertools = "0.10"
|
itertools = "0.10"
|
||||||
@@ -146,7 +146,7 @@ raft-engine = { version = "0.4.1", default-features = false }
|
|||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
regex = "1.8"
|
regex = "1.8"
|
||||||
regex-automata = { version = "0.4" }
|
regex-automata = { version = "0.4" }
|
||||||
reqwest = { version = "0.12", default-features = false, features = [
|
reqwest = { version = "0.11", default-features = false, features = [
|
||||||
"json",
|
"json",
|
||||||
"rustls-tls-native-roots",
|
"rustls-tls-native-roots",
|
||||||
"stream",
|
"stream",
|
||||||
|
|||||||
1
src/cache/Cargo.toml
vendored
1
src/cache/Cargo.toml
vendored
@@ -11,3 +11,4 @@ common-macro.workspace = true
|
|||||||
common-meta.workspace = true
|
common-meta.workspace = true
|
||||||
moka.workspace = true
|
moka.workspace = true
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
|
substrait.workspace = true
|
||||||
|
|||||||
15
src/cache/src/lib.rs
vendored
15
src/cache/src/lib.rs
vendored
@@ -20,7 +20,8 @@ use std::time::Duration;
|
|||||||
use catalog::kvbackend::new_table_cache;
|
use catalog::kvbackend::new_table_cache;
|
||||||
use common_meta::cache::{
|
use common_meta::cache::{
|
||||||
new_table_flownode_set_cache, new_table_info_cache, new_table_name_cache,
|
new_table_flownode_set_cache, new_table_info_cache, new_table_name_cache,
|
||||||
new_table_route_cache, CacheRegistry, CacheRegistryBuilder, LayeredCacheRegistryBuilder,
|
new_table_route_cache, new_view_info_cache, CacheRegistry, CacheRegistryBuilder,
|
||||||
|
LayeredCacheRegistryBuilder,
|
||||||
};
|
};
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
use moka::future::CacheBuilder;
|
use moka::future::CacheBuilder;
|
||||||
@@ -33,6 +34,7 @@ const DEFAULT_CACHE_TTL: Duration = Duration::from_secs(10 * 60);
|
|||||||
const DEFAULT_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
|
const DEFAULT_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
|
||||||
|
|
||||||
pub const TABLE_INFO_CACHE_NAME: &str = "table_info_cache";
|
pub const TABLE_INFO_CACHE_NAME: &str = "table_info_cache";
|
||||||
|
pub const VIEW_INFO_CACHE_NAME: &str = "view_info_cache";
|
||||||
pub const TABLE_NAME_CACHE_NAME: &str = "table_name_cache";
|
pub const TABLE_NAME_CACHE_NAME: &str = "table_name_cache";
|
||||||
pub const TABLE_CACHE_NAME: &str = "table_cache";
|
pub const TABLE_CACHE_NAME: &str = "table_cache";
|
||||||
pub const TABLE_FLOWNODE_SET_CACHE_NAME: &str = "table_flownode_set_cache";
|
pub const TABLE_FLOWNODE_SET_CACHE_NAME: &str = "table_flownode_set_cache";
|
||||||
@@ -82,11 +84,22 @@ pub fn build_fundamental_cache_registry(kv_backend: KvBackendRef) -> CacheRegist
|
|||||||
cache,
|
cache,
|
||||||
kv_backend.clone(),
|
kv_backend.clone(),
|
||||||
));
|
));
|
||||||
|
// Builds the view info cache
|
||||||
|
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||||
|
.time_to_live(DEFAULT_CACHE_TTL)
|
||||||
|
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||||
|
.build();
|
||||||
|
let view_info_cache = Arc::new(new_view_info_cache(
|
||||||
|
VIEW_INFO_CACHE_NAME.to_string(),
|
||||||
|
cache,
|
||||||
|
kv_backend.clone(),
|
||||||
|
));
|
||||||
|
|
||||||
CacheRegistryBuilder::default()
|
CacheRegistryBuilder::default()
|
||||||
.add_cache(table_info_cache)
|
.add_cache(table_info_cache)
|
||||||
.add_cache(table_name_cache)
|
.add_cache(table_name_cache)
|
||||||
.add_cache(table_route_cache)
|
.add_cache(table_route_cache)
|
||||||
|
.add_cache(view_info_cache)
|
||||||
.add_cache(table_flownode_set_cache)
|
.add_cache(table_flownode_set_cache)
|
||||||
.build()
|
.build()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ arrow.workspace = true
|
|||||||
arrow-schema.workspace = true
|
arrow-schema.workspace = true
|
||||||
async-stream.workspace = true
|
async-stream.workspace = true
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
|
bytes.workspace = true
|
||||||
common-catalog.workspace = true
|
common-catalog.workspace = true
|
||||||
common-config.workspace = true
|
common-config.workspace = true
|
||||||
common-error.workspace = true
|
common-error.workspace = true
|
||||||
@@ -48,8 +49,11 @@ table.workspace = true
|
|||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
|
cache.workspace = true
|
||||||
catalog = { workspace = true, features = ["testing"] }
|
catalog = { workspace = true, features = ["testing"] }
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
|
common-meta = { workspace = true, features = ["testing"] }
|
||||||
|
common-query = { workspace = true, features = ["testing"] }
|
||||||
common-test-util.workspace = true
|
common-test-util.workspace = true
|
||||||
log-store.workspace = true
|
log-store.workspace = true
|
||||||
object-store.workspace = true
|
object-store.workspace = true
|
||||||
|
|||||||
@@ -19,10 +19,7 @@ use common_error::ext::{BoxedError, ErrorExt};
|
|||||||
use common_error::status_code::StatusCode;
|
use common_error::status_code::StatusCode;
|
||||||
use common_macro::stack_trace_debug;
|
use common_macro::stack_trace_debug;
|
||||||
use datafusion::error::DataFusionError;
|
use datafusion::error::DataFusionError;
|
||||||
use datatypes::prelude::ConcreteDataType;
|
|
||||||
use snafu::{Location, Snafu};
|
use snafu::{Location, Snafu};
|
||||||
use table::metadata::TableId;
|
|
||||||
use tokio::task::JoinError;
|
|
||||||
|
|
||||||
#[derive(Snafu)]
|
#[derive(Snafu)]
|
||||||
#[snafu(visibility(pub))]
|
#[snafu(visibility(pub))]
|
||||||
@@ -65,19 +62,6 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
source: BoxedError,
|
source: BoxedError,
|
||||||
},
|
},
|
||||||
#[snafu(display("Failed to open system catalog table"))]
|
|
||||||
OpenSystemCatalog {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: table::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to create system catalog table"))]
|
|
||||||
CreateSystemCatalog {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: table::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to create table, table info: {}", table_info))]
|
#[snafu(display("Failed to create table, table info: {}", table_info))]
|
||||||
CreateTable {
|
CreateTable {
|
||||||
@@ -94,52 +78,6 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display(
|
|
||||||
"System catalog table type mismatch, expected: binary, found: {:?}",
|
|
||||||
data_type,
|
|
||||||
))]
|
|
||||||
SystemCatalogTypeMismatch {
|
|
||||||
data_type: ConcreteDataType,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Invalid system catalog entry type: {:?}", entry_type))]
|
|
||||||
InvalidEntryType {
|
|
||||||
entry_type: Option<u8>,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Invalid system catalog key: {:?}", key))]
|
|
||||||
InvalidKey {
|
|
||||||
key: Option<String>,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Catalog value is not present"))]
|
|
||||||
EmptyValue {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to deserialize value"))]
|
|
||||||
ValueDeserialize {
|
|
||||||
#[snafu(source)]
|
|
||||||
error: serde_json::error::Error,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Table engine not found: {}", engine_name))]
|
|
||||||
TableEngineNotFound {
|
|
||||||
engine_name: String,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: table::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Cannot find catalog by name: {}", catalog_name))]
|
#[snafu(display("Cannot find catalog by name: {}", catalog_name))]
|
||||||
CatalogNotFound {
|
CatalogNotFound {
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
@@ -169,44 +107,9 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Schema {} already exists", schema))]
|
#[snafu(display("View info not found: {}", name))]
|
||||||
SchemaExists {
|
ViewInfoNotFound {
|
||||||
schema: String,
|
name: String,
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Operation {} not implemented yet", operation))]
|
|
||||||
Unimplemented {
|
|
||||||
operation: String,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Operation {} not supported", op))]
|
|
||||||
NotSupported {
|
|
||||||
op: String,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to open table {table_id}"))]
|
|
||||||
OpenTable {
|
|
||||||
table_id: TableId,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: table::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to open table in parallel"))]
|
|
||||||
ParallelOpenTable {
|
|
||||||
#[snafu(source)]
|
|
||||||
error: JoinError,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Table not found while opening table, table info: {}", table_info))]
|
|
||||||
TableNotFound {
|
|
||||||
table_info: String,
|
|
||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
@@ -217,13 +120,6 @@ pub enum Error {
|
|||||||
#[snafu(display("Failed to find region routes"))]
|
#[snafu(display("Failed to find region routes"))]
|
||||||
FindRegionRoutes { source: partition::error::Error },
|
FindRegionRoutes { source: partition::error::Error },
|
||||||
|
|
||||||
#[snafu(display("Failed to read system catalog table records"))]
|
|
||||||
ReadSystemCatalog {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: common_recordbatch::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to create recordbatch"))]
|
#[snafu(display("Failed to create recordbatch"))]
|
||||||
CreateRecordBatch {
|
CreateRecordBatch {
|
||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
@@ -231,20 +127,6 @@ pub enum Error {
|
|||||||
source: common_recordbatch::error::Error,
|
source: common_recordbatch::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to insert table creation record to system catalog"))]
|
|
||||||
InsertCatalogRecord {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: table::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to scan system catalog table"))]
|
|
||||||
SystemCatalogTableScan {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: table::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Internal error"))]
|
#[snafu(display("Internal error"))]
|
||||||
Internal {
|
Internal {
|
||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
@@ -258,20 +140,14 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to execute system catalog table scan"))]
|
#[snafu(display("Failed to decode logical plan for view: {}", name))]
|
||||||
SystemCatalogTableScanExec {
|
DecodePlan {
|
||||||
|
name: String,
|
||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: common_query::error::Error,
|
source: common_query::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Cannot parse catalog value"))]
|
|
||||||
InvalidCatalogValue {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: common_catalog::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to perform metasrv operation"))]
|
#[snafu(display("Failed to perform metasrv operation"))]
|
||||||
Metasrv {
|
Metasrv {
|
||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
@@ -297,20 +173,6 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Table schema mismatch"))]
|
|
||||||
TableSchemaMismatch {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: table::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("A generic error has occurred, msg: {}", msg))]
|
|
||||||
Generic {
|
|
||||||
msg: String,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Table metadata manager error"))]
|
#[snafu(display("Table metadata manager error"))]
|
||||||
TableMetadataManager {
|
TableMetadataManager {
|
||||||
source: common_meta::error::Error,
|
source: common_meta::error::Error,
|
||||||
@@ -324,6 +186,26 @@ pub enum Error {
|
|||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to get view info from cache"))]
|
||||||
|
GetViewCache {
|
||||||
|
source: common_meta::error::Error,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Cache not found: {name}"))]
|
||||||
|
CacheNotFound {
|
||||||
|
name: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to cast the catalog manager"))]
|
||||||
|
CastManager {
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
@@ -331,61 +213,43 @@ pub type Result<T> = std::result::Result<T, Error>;
|
|||||||
impl ErrorExt for Error {
|
impl ErrorExt for Error {
|
||||||
fn status_code(&self) -> StatusCode {
|
fn status_code(&self) -> StatusCode {
|
||||||
match self {
|
match self {
|
||||||
Error::InvalidKey { .. }
|
Error::SchemaNotFound { .. }
|
||||||
| Error::SchemaNotFound { .. }
|
|
||||||
| Error::CatalogNotFound { .. }
|
| Error::CatalogNotFound { .. }
|
||||||
| Error::FindPartitions { .. }
|
| Error::FindPartitions { .. }
|
||||||
| Error::FindRegionRoutes { .. }
|
| Error::FindRegionRoutes { .. }
|
||||||
| Error::InvalidEntryType { .. }
|
| Error::CacheNotFound { .. }
|
||||||
| Error::ParallelOpenTable { .. } => StatusCode::Unexpected,
|
| Error::CastManager { .. } => StatusCode::Unexpected,
|
||||||
|
|
||||||
Error::TableNotFound { .. } => StatusCode::TableNotFound,
|
Error::ViewInfoNotFound { .. } => StatusCode::TableNotFound,
|
||||||
|
|
||||||
Error::SystemCatalog { .. }
|
Error::SystemCatalog { .. } => StatusCode::StorageUnavailable,
|
||||||
| Error::EmptyValue { .. }
|
|
||||||
| Error::ValueDeserialize { .. } => StatusCode::StorageUnavailable,
|
|
||||||
|
|
||||||
Error::Generic { .. }
|
Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
|
||||||
| Error::SystemCatalogTypeMismatch { .. }
|
|
||||||
| Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
|
|
||||||
|
|
||||||
Error::ReadSystemCatalog { source, .. } | Error::CreateRecordBatch { source, .. } => {
|
|
||||||
source.status_code()
|
|
||||||
}
|
|
||||||
Error::InvalidCatalogValue { source, .. } => source.status_code(),
|
|
||||||
|
|
||||||
|
Error::CreateRecordBatch { source, .. } => source.status_code(),
|
||||||
Error::TableExists { .. } => StatusCode::TableAlreadyExists,
|
Error::TableExists { .. } => StatusCode::TableAlreadyExists,
|
||||||
Error::TableNotExist { .. } => StatusCode::TableNotFound,
|
Error::TableNotExist { .. } => StatusCode::TableNotFound,
|
||||||
Error::SchemaExists { .. } | Error::TableEngineNotFound { .. } => {
|
|
||||||
StatusCode::InvalidArguments
|
|
||||||
}
|
|
||||||
|
|
||||||
Error::ListCatalogs { source, .. }
|
Error::ListCatalogs { source, .. }
|
||||||
| Error::ListNodes { source, .. }
|
| Error::ListNodes { source, .. }
|
||||||
| Error::ListSchemas { source, .. }
|
| Error::ListSchemas { source, .. }
|
||||||
| Error::ListTables { source, .. } => source.status_code(),
|
| Error::ListTables { source, .. } => source.status_code(),
|
||||||
|
|
||||||
Error::OpenSystemCatalog { source, .. }
|
Error::CreateTable { source, .. } => source.status_code(),
|
||||||
| Error::CreateSystemCatalog { source, .. }
|
|
||||||
| Error::InsertCatalogRecord { source, .. }
|
|
||||||
| Error::OpenTable { source, .. }
|
|
||||||
| Error::CreateTable { source, .. }
|
|
||||||
| Error::TableSchemaMismatch { source, .. } => source.status_code(),
|
|
||||||
|
|
||||||
Error::Metasrv { source, .. } => source.status_code(),
|
Error::Metasrv { source, .. } => source.status_code(),
|
||||||
Error::SystemCatalogTableScan { source, .. } => source.status_code(),
|
Error::DecodePlan { source, .. } => source.status_code(),
|
||||||
Error::SystemCatalogTableScanExec { source, .. } => source.status_code(),
|
|
||||||
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
|
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
|
||||||
|
|
||||||
Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => {
|
Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => {
|
||||||
source.status_code()
|
source.status_code()
|
||||||
}
|
}
|
||||||
|
|
||||||
Error::Unimplemented { .. } | Error::NotSupported { .. } => StatusCode::Unsupported,
|
|
||||||
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
||||||
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
|
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
|
||||||
Error::TableMetadataManager { source, .. } => source.status_code(),
|
Error::TableMetadataManager { source, .. } => source.status_code(),
|
||||||
Error::GetTableCache { .. } => StatusCode::Internal,
|
Error::GetViewCache { source, .. } | Error::GetTableCache { source, .. } => {
|
||||||
|
source.status_code()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -417,11 +281,6 @@ mod tests {
|
|||||||
.status_code()
|
.status_code()
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
StatusCode::Unexpected,
|
|
||||||
InvalidKeySnafu { key: None }.build().status_code()
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
StatusCode::StorageUnavailable,
|
StatusCode::StorageUnavailable,
|
||||||
Error::SystemCatalog {
|
Error::SystemCatalog {
|
||||||
@@ -430,19 +289,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
.status_code()
|
.status_code()
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
StatusCode::Internal,
|
|
||||||
Error::SystemCatalogTypeMismatch {
|
|
||||||
data_type: ConcreteDataType::binary_datatype(),
|
|
||||||
location: Location::generate(),
|
|
||||||
}
|
|
||||||
.status_code()
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
StatusCode::StorageUnavailable,
|
|
||||||
EmptyValueSnafu {}.build().status_code()
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -22,14 +22,13 @@ use common_catalog::consts::{
|
|||||||
};
|
};
|
||||||
use common_config::Mode;
|
use common_config::Mode;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::cache::TableRouteCacheRef;
|
use common_meta::cache::{LayeredCacheRegistryRef, ViewInfoCacheRef};
|
||||||
use common_meta::key::catalog_name::CatalogNameKey;
|
use common_meta::key::catalog_name::CatalogNameKey;
|
||||||
use common_meta::key::schema_name::SchemaNameKey;
|
use common_meta::key::schema_name::SchemaNameKey;
|
||||||
use common_meta::key::table_info::TableInfoValue;
|
use common_meta::key::table_info::TableInfoValue;
|
||||||
use common_meta::key::table_name::TableNameKey;
|
use common_meta::key::table_name::TableNameKey;
|
||||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
use common_meta::table_name::TableName;
|
|
||||||
use futures_util::stream::BoxStream;
|
use futures_util::stream::BoxStream;
|
||||||
use futures_util::{StreamExt, TryStreamExt};
|
use futures_util::{StreamExt, TryStreamExt};
|
||||||
use meta_client::client::MetaClient;
|
use meta_client::client::MetaClient;
|
||||||
@@ -38,11 +37,12 @@ use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
|
|||||||
use snafu::prelude::*;
|
use snafu::prelude::*;
|
||||||
use table::dist_table::DistTable;
|
use table::dist_table::DistTable;
|
||||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||||
|
use table::table_name::TableName;
|
||||||
use table::TableRef;
|
use table::TableRef;
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu, ListSchemasSnafu,
|
CacheNotFoundSnafu, GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu,
|
||||||
ListTablesSnafu, Result, TableMetadataManagerSnafu,
|
ListSchemasSnafu, ListTablesSnafu, Result, TableMetadataManagerSnafu,
|
||||||
};
|
};
|
||||||
use crate::information_schema::InformationSchemaProvider;
|
use crate::information_schema::InformationSchemaProvider;
|
||||||
use crate::kvbackend::TableCacheRef;
|
use crate::kvbackend::TableCacheRef;
|
||||||
@@ -61,25 +61,26 @@ pub struct KvBackendCatalogManager {
|
|||||||
table_metadata_manager: TableMetadataManagerRef,
|
table_metadata_manager: TableMetadataManagerRef,
|
||||||
/// A sub-CatalogManager that handles system tables
|
/// A sub-CatalogManager that handles system tables
|
||||||
system_catalog: SystemCatalog,
|
system_catalog: SystemCatalog,
|
||||||
table_cache: TableCacheRef,
|
cache_registry: LayeredCacheRegistryRef,
|
||||||
}
|
}
|
||||||
|
|
||||||
const CATALOG_CACHE_MAX_CAPACITY: u64 = 128;
|
const CATALOG_CACHE_MAX_CAPACITY: u64 = 128;
|
||||||
|
|
||||||
impl KvBackendCatalogManager {
|
impl KvBackendCatalogManager {
|
||||||
pub async fn new(
|
pub fn new(
|
||||||
mode: Mode,
|
mode: Mode,
|
||||||
meta_client: Option<Arc<MetaClient>>,
|
meta_client: Option<Arc<MetaClient>>,
|
||||||
backend: KvBackendRef,
|
backend: KvBackendRef,
|
||||||
table_cache: TableCacheRef,
|
cache_registry: LayeredCacheRegistryRef,
|
||||||
table_route_cache: TableRouteCacheRef,
|
|
||||||
) -> Arc<Self> {
|
) -> Arc<Self> {
|
||||||
Arc::new_cyclic(|me| Self {
|
Arc::new_cyclic(|me| Self {
|
||||||
mode,
|
mode,
|
||||||
meta_client,
|
meta_client,
|
||||||
partition_manager: Arc::new(PartitionRuleManager::new(
|
partition_manager: Arc::new(PartitionRuleManager::new(
|
||||||
backend.clone(),
|
backend.clone(),
|
||||||
table_route_cache,
|
cache_registry
|
||||||
|
.get()
|
||||||
|
.expect("Failed to get table_route_cache"),
|
||||||
)),
|
)),
|
||||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
||||||
system_catalog: SystemCatalog {
|
system_catalog: SystemCatalog {
|
||||||
@@ -90,7 +91,7 @@ impl KvBackendCatalogManager {
|
|||||||
me.clone(),
|
me.clone(),
|
||||||
)),
|
)),
|
||||||
},
|
},
|
||||||
table_cache,
|
cache_registry,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -99,6 +100,12 @@ impl KvBackendCatalogManager {
|
|||||||
&self.mode
|
&self.mode
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn view_info_cache(&self) -> Result<ViewInfoCacheRef> {
|
||||||
|
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||||
|
name: "view_info_cache",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the `[MetaClient]`.
|
/// Returns the `[MetaClient]`.
|
||||||
pub fn meta_client(&self) -> Option<Arc<MetaClient>> {
|
pub fn meta_client(&self) -> Option<Arc<MetaClient>> {
|
||||||
self.meta_client.clone()
|
self.meta_client.clone()
|
||||||
@@ -215,7 +222,11 @@ impl CatalogManager for KvBackendCatalogManager {
|
|||||||
return Ok(Some(table));
|
return Ok(Some(table));
|
||||||
}
|
}
|
||||||
|
|
||||||
self.table_cache
|
let table_cache: TableCacheRef = self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||||
|
name: "table_cache",
|
||||||
|
})?;
|
||||||
|
|
||||||
|
table_cache
|
||||||
.get_by_ref(&TableName {
|
.get_by_ref(&TableName {
|
||||||
catalog_name: catalog_name.to_string(),
|
catalog_name: catalog_name.to_string(),
|
||||||
schema_name: schema_name.to_string(),
|
schema_name: schema_name.to_string(),
|
||||||
|
|||||||
@@ -17,11 +17,11 @@ use std::sync::Arc;
|
|||||||
use common_meta::cache::{CacheContainer, Initializer, TableInfoCacheRef, TableNameCacheRef};
|
use common_meta::cache::{CacheContainer, Initializer, TableInfoCacheRef, TableNameCacheRef};
|
||||||
use common_meta::error::{Result as MetaResult, ValueNotExistSnafu};
|
use common_meta::error::{Result as MetaResult, ValueNotExistSnafu};
|
||||||
use common_meta::instruction::CacheIdent;
|
use common_meta::instruction::CacheIdent;
|
||||||
use common_meta::table_name::TableName;
|
|
||||||
use futures::future::BoxFuture;
|
use futures::future::BoxFuture;
|
||||||
use moka::future::Cache;
|
use moka::future::Cache;
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
use table::dist_table::DistTable;
|
use table::dist_table::DistTable;
|
||||||
|
use table::table_name::TableName;
|
||||||
use table::TableRef;
|
use table::TableRef;
|
||||||
|
|
||||||
pub type TableCacheRef = Arc<TableCache>;
|
pub type TableCacheRef = Arc<TableCache>;
|
||||||
|
|||||||
@@ -15,15 +15,25 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use bytes::Bytes;
|
||||||
use common_catalog::format_full_table_name;
|
use common_catalog::format_full_table_name;
|
||||||
|
use common_query::logical_plan::SubstraitPlanDecoderRef;
|
||||||
use datafusion::common::{ResolvedTableReference, TableReference};
|
use datafusion::common::{ResolvedTableReference, TableReference};
|
||||||
use datafusion::datasource::provider_as_source;
|
use datafusion::datasource::view::ViewTable;
|
||||||
|
use datafusion::datasource::{provider_as_source, TableProvider};
|
||||||
use datafusion::logical_expr::TableSource;
|
use datafusion::logical_expr::TableSource;
|
||||||
use session::context::QueryContext;
|
use session::context::QueryContext;
|
||||||
use snafu::{ensure, OptionExt};
|
use snafu::{ensure, OptionExt, ResultExt};
|
||||||
|
use table::metadata::TableType;
|
||||||
use table::table::adapter::DfTableProviderAdapter;
|
use table::table::adapter::DfTableProviderAdapter;
|
||||||
|
mod dummy_catalog;
|
||||||
|
use dummy_catalog::DummyCatalogList;
|
||||||
|
|
||||||
use crate::error::{QueryAccessDeniedSnafu, Result, TableNotExistSnafu};
|
use crate::error::{
|
||||||
|
CastManagerSnafu, DatafusionSnafu, DecodePlanSnafu, GetViewCacheSnafu, QueryAccessDeniedSnafu,
|
||||||
|
Result, TableNotExistSnafu, ViewInfoNotFoundSnafu,
|
||||||
|
};
|
||||||
|
use crate::kvbackend::KvBackendCatalogManager;
|
||||||
use crate::CatalogManagerRef;
|
use crate::CatalogManagerRef;
|
||||||
|
|
||||||
pub struct DfTableSourceProvider {
|
pub struct DfTableSourceProvider {
|
||||||
@@ -32,6 +42,7 @@ pub struct DfTableSourceProvider {
|
|||||||
disallow_cross_catalog_query: bool,
|
disallow_cross_catalog_query: bool,
|
||||||
default_catalog: String,
|
default_catalog: String,
|
||||||
default_schema: String,
|
default_schema: String,
|
||||||
|
plan_decoder: SubstraitPlanDecoderRef,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DfTableSourceProvider {
|
impl DfTableSourceProvider {
|
||||||
@@ -39,6 +50,7 @@ impl DfTableSourceProvider {
|
|||||||
catalog_manager: CatalogManagerRef,
|
catalog_manager: CatalogManagerRef,
|
||||||
disallow_cross_catalog_query: bool,
|
disallow_cross_catalog_query: bool,
|
||||||
query_ctx: &QueryContext,
|
query_ctx: &QueryContext,
|
||||||
|
plan_decoder: SubstraitPlanDecoderRef,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
catalog_manager,
|
catalog_manager,
|
||||||
@@ -46,6 +58,7 @@ impl DfTableSourceProvider {
|
|||||||
resolved_tables: HashMap::new(),
|
resolved_tables: HashMap::new(),
|
||||||
default_catalog: query_ctx.current_catalog().to_owned(),
|
default_catalog: query_ctx.current_catalog().to_owned(),
|
||||||
default_schema: query_ctx.current_schema().to_owned(),
|
default_schema: query_ctx.current_schema().to_owned(),
|
||||||
|
plan_decoder,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -94,8 +107,39 @@ impl DfTableSourceProvider {
|
|||||||
table: format_full_table_name(catalog_name, schema_name, table_name),
|
table: format_full_table_name(catalog_name, schema_name, table_name),
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let provider = DfTableProviderAdapter::new(table);
|
let provider: Arc<dyn TableProvider> = if table.table_info().table_type == TableType::View {
|
||||||
let source = provider_as_source(Arc::new(provider));
|
let catalog_manager = self
|
||||||
|
.catalog_manager
|
||||||
|
.as_any()
|
||||||
|
.downcast_ref::<KvBackendCatalogManager>()
|
||||||
|
.context(CastManagerSnafu)?;
|
||||||
|
|
||||||
|
let view_info = catalog_manager
|
||||||
|
.view_info_cache()?
|
||||||
|
.get(table.table_info().ident.table_id)
|
||||||
|
.await
|
||||||
|
.context(GetViewCacheSnafu)?
|
||||||
|
.context(ViewInfoNotFoundSnafu {
|
||||||
|
name: &table.table_info().name,
|
||||||
|
})?;
|
||||||
|
|
||||||
|
// Build the catalog list provider for deserialization.
|
||||||
|
let catalog_list = Arc::new(DummyCatalogList::new(self.catalog_manager.clone()));
|
||||||
|
let logical_plan = self
|
||||||
|
.plan_decoder
|
||||||
|
.decode(Bytes::from(view_info.view_info.clone()), catalog_list, true)
|
||||||
|
.await
|
||||||
|
.context(DecodePlanSnafu {
|
||||||
|
name: &table.table_info().name,
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Arc::new(ViewTable::try_new(logical_plan, None).context(DatafusionSnafu)?)
|
||||||
|
} else {
|
||||||
|
Arc::new(DfTableProviderAdapter::new(table))
|
||||||
|
};
|
||||||
|
|
||||||
|
let source = provider_as_source(provider);
|
||||||
|
|
||||||
let _ = self.resolved_tables.insert(resolved_name, source.clone());
|
let _ = self.resolved_tables.insert(resolved_name, source.clone());
|
||||||
Ok(source)
|
Ok(source)
|
||||||
}
|
}
|
||||||
@@ -103,6 +147,7 @@ impl DfTableSourceProvider {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use common_query::test_util::DummyDecoder;
|
||||||
use session::context::QueryContext;
|
use session::context::QueryContext;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
@@ -112,8 +157,12 @@ mod tests {
|
|||||||
fn test_validate_table_ref() {
|
fn test_validate_table_ref() {
|
||||||
let query_ctx = &QueryContext::with("greptime", "public");
|
let query_ctx = &QueryContext::with("greptime", "public");
|
||||||
|
|
||||||
let table_provider =
|
let table_provider = DfTableSourceProvider::new(
|
||||||
DfTableSourceProvider::new(MemoryCatalogManager::with_default_setup(), true, query_ctx);
|
MemoryCatalogManager::with_default_setup(),
|
||||||
|
true,
|
||||||
|
query_ctx,
|
||||||
|
DummyDecoder::arc(),
|
||||||
|
);
|
||||||
|
|
||||||
let table_ref = TableReference::bare("table_name");
|
let table_ref = TableReference::bare("table_name");
|
||||||
let result = table_provider.resolve_table_ref(table_ref);
|
let result = table_provider.resolve_table_ref(table_ref);
|
||||||
@@ -148,4 +197,99 @@ mod tests {
|
|||||||
let table_ref = TableReference::full("greptime", "greptime_private", "columns");
|
let table_ref = TableReference::full("greptime", "greptime_private", "columns");
|
||||||
assert!(table_provider.resolve_table_ref(table_ref).is_ok());
|
assert!(table_provider.resolve_table_ref(table_ref).is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
|
use arrow::datatypes::{DataType, Field, Schema, SchemaRef};
|
||||||
|
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||||
|
use common_config::Mode;
|
||||||
|
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||||
|
use common_meta::key::TableMetadataManager;
|
||||||
|
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||||
|
use common_query::error::Result as QueryResult;
|
||||||
|
use common_query::logical_plan::SubstraitPlanDecoder;
|
||||||
|
use datafusion::catalog::CatalogProviderList;
|
||||||
|
use datafusion::logical_expr::builder::LogicalTableSource;
|
||||||
|
use datafusion::logical_expr::{col, lit, LogicalPlan, LogicalPlanBuilder};
|
||||||
|
|
||||||
|
struct MockDecoder;
|
||||||
|
impl MockDecoder {
|
||||||
|
pub fn arc() -> Arc<Self> {
|
||||||
|
Arc::new(MockDecoder)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl SubstraitPlanDecoder for MockDecoder {
|
||||||
|
async fn decode(
|
||||||
|
&self,
|
||||||
|
_message: bytes::Bytes,
|
||||||
|
_catalog_list: Arc<dyn CatalogProviderList>,
|
||||||
|
_optimize: bool,
|
||||||
|
) -> QueryResult<LogicalPlan> {
|
||||||
|
Ok(mock_plan())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mock_plan() -> LogicalPlan {
|
||||||
|
let schema = Schema::new(vec![
|
||||||
|
Field::new("id", DataType::Int32, true),
|
||||||
|
Field::new("name", DataType::Utf8, true),
|
||||||
|
]);
|
||||||
|
let table_source = LogicalTableSource::new(SchemaRef::new(schema));
|
||||||
|
|
||||||
|
let projection = None;
|
||||||
|
|
||||||
|
let builder =
|
||||||
|
LogicalPlanBuilder::scan("person", Arc::new(table_source), projection).unwrap();
|
||||||
|
|
||||||
|
builder
|
||||||
|
.filter(col("id").gt(lit(500)))
|
||||||
|
.unwrap()
|
||||||
|
.build()
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_resolve_view() {
|
||||||
|
let query_ctx = &QueryContext::with("greptime", "public");
|
||||||
|
let backend = Arc::new(MemoryKvBackend::default());
|
||||||
|
let layered_cache_builder = LayeredCacheRegistryBuilder::default()
|
||||||
|
.add_cache_registry(CacheRegistryBuilder::default().build());
|
||||||
|
let fundamental_cache_registry = build_fundamental_cache_registry(backend.clone());
|
||||||
|
let layered_cache_registry = Arc::new(
|
||||||
|
with_default_composite_cache_registry(
|
||||||
|
layered_cache_builder.add_cache_registry(fundamental_cache_registry),
|
||||||
|
)
|
||||||
|
.unwrap()
|
||||||
|
.build(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let catalog_manager = KvBackendCatalogManager::new(
|
||||||
|
Mode::Standalone,
|
||||||
|
None,
|
||||||
|
backend.clone(),
|
||||||
|
layered_cache_registry,
|
||||||
|
);
|
||||||
|
let table_metadata_manager = TableMetadataManager::new(backend);
|
||||||
|
let mut view_info = common_meta::key::test_utils::new_test_table_info(1024, vec![]);
|
||||||
|
view_info.table_type = TableType::View;
|
||||||
|
let logical_plan = vec![1, 2, 3];
|
||||||
|
// Create view metadata
|
||||||
|
table_metadata_manager
|
||||||
|
.create_view_metadata(view_info.clone().into(), logical_plan, HashSet::new())
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let mut table_provider =
|
||||||
|
DfTableSourceProvider::new(catalog_manager, true, query_ctx, MockDecoder::arc());
|
||||||
|
|
||||||
|
// View not found
|
||||||
|
let table_ref = TableReference::bare("not_exists_view");
|
||||||
|
assert!(table_provider.resolve_table(table_ref).await.is_err());
|
||||||
|
|
||||||
|
let table_ref = TableReference::bare(view_info.name);
|
||||||
|
let source = table_provider.resolve_table(table_ref).await.unwrap();
|
||||||
|
assert_eq!(*source.get_logical_plan().unwrap(), mock_plan());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
129
src/catalog/src/table_source/dummy_catalog.rs
Normal file
129
src/catalog/src/table_source/dummy_catalog.rs
Normal file
@@ -0,0 +1,129 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//! Dummy catalog for region server.
|
||||||
|
|
||||||
|
use std::any::Any;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use common_catalog::format_full_table_name;
|
||||||
|
use datafusion::catalog::schema::SchemaProvider;
|
||||||
|
use datafusion::catalog::{CatalogProvider, CatalogProviderList};
|
||||||
|
use datafusion::datasource::TableProvider;
|
||||||
|
use snafu::OptionExt;
|
||||||
|
use table::table::adapter::DfTableProviderAdapter;
|
||||||
|
|
||||||
|
use crate::error::TableNotExistSnafu;
|
||||||
|
use crate::CatalogManagerRef;
|
||||||
|
|
||||||
|
/// Delegate the resolving requests to the `[CatalogManager]` unconditionally.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct DummyCatalogList {
|
||||||
|
catalog_manager: CatalogManagerRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DummyCatalogList {
|
||||||
|
/// Creates a new catalog list with the given catalog manager.
|
||||||
|
pub fn new(catalog_manager: CatalogManagerRef) -> Self {
|
||||||
|
Self { catalog_manager }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CatalogProviderList for DummyCatalogList {
|
||||||
|
fn as_any(&self) -> &dyn Any {
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
fn register_catalog(
|
||||||
|
&self,
|
||||||
|
_name: String,
|
||||||
|
_catalog: Arc<dyn CatalogProvider>,
|
||||||
|
) -> Option<Arc<dyn CatalogProvider>> {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
fn catalog_names(&self) -> Vec<String> {
|
||||||
|
vec![]
|
||||||
|
}
|
||||||
|
|
||||||
|
fn catalog(&self, catalog_name: &str) -> Option<Arc<dyn CatalogProvider>> {
|
||||||
|
Some(Arc::new(DummyCatalogProvider {
|
||||||
|
catalog_name: catalog_name.to_string(),
|
||||||
|
catalog_manager: self.catalog_manager.clone(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A dummy catalog provider for [DummyCatalogList].
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct DummyCatalogProvider {
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: CatalogManagerRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CatalogProvider for DummyCatalogProvider {
|
||||||
|
fn as_any(&self) -> &dyn Any {
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
fn schema_names(&self) -> Vec<String> {
|
||||||
|
vec![]
|
||||||
|
}
|
||||||
|
|
||||||
|
fn schema(&self, schema_name: &str) -> Option<Arc<dyn SchemaProvider>> {
|
||||||
|
Some(Arc::new(DummySchemaProvider {
|
||||||
|
catalog_name: self.catalog_name.clone(),
|
||||||
|
schema_name: schema_name.to_string(),
|
||||||
|
catalog_manager: self.catalog_manager.clone(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A dummy schema provider for [DummyCatalogList].
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct DummySchemaProvider {
|
||||||
|
catalog_name: String,
|
||||||
|
schema_name: String,
|
||||||
|
catalog_manager: CatalogManagerRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl SchemaProvider for DummySchemaProvider {
|
||||||
|
fn as_any(&self) -> &dyn Any {
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
fn table_names(&self) -> Vec<String> {
|
||||||
|
vec![]
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn table(&self, name: &str) -> datafusion::error::Result<Option<Arc<dyn TableProvider>>> {
|
||||||
|
let table = self
|
||||||
|
.catalog_manager
|
||||||
|
.table(&self.catalog_name, &self.schema_name, name)
|
||||||
|
.await?
|
||||||
|
.with_context(|| TableNotExistSnafu {
|
||||||
|
table: format_full_table_name(&self.catalog_name, &self.schema_name, name),
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let table_provider: Arc<dyn TableProvider> = Arc::new(DfTableProviderAdapter::new(table));
|
||||||
|
|
||||||
|
Ok(Some(table_provider))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn table_exist(&self, _name: &str) -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -173,14 +173,14 @@ impl Client {
|
|||||||
Ok(FlightClient { addr, client })
|
Ok(FlightClient { addr, client })
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn raw_region_client(&self) -> Result<PbRegionClient<Channel>> {
|
pub(crate) fn raw_region_client(&self) -> Result<(String, PbRegionClient<Channel>)> {
|
||||||
let (_, channel) = self.find_channel()?;
|
let (addr, channel) = self.find_channel()?;
|
||||||
let client = PbRegionClient::new(channel)
|
let client = PbRegionClient::new(channel)
|
||||||
.max_decoding_message_size(self.max_grpc_recv_message_size())
|
.max_decoding_message_size(self.max_grpc_recv_message_size())
|
||||||
.max_encoding_message_size(self.max_grpc_send_message_size())
|
.max_encoding_message_size(self.max_grpc_send_message_size())
|
||||||
.accept_compressed(CompressionEncoding::Zstd)
|
.accept_compressed(CompressionEncoding::Zstd)
|
||||||
.send_compressed(CompressionEncoding::Zstd);
|
.send_compressed(CompressionEncoding::Zstd);
|
||||||
Ok(client)
|
Ok((addr, client))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn make_prometheus_gateway_client(&self) -> Result<PrometheusGatewayClient<Channel>> {
|
pub fn make_prometheus_gateway_client(&self) -> Result<PrometheusGatewayClient<Channel>> {
|
||||||
|
|||||||
@@ -89,8 +89,9 @@ pub enum Error {
|
|||||||
source: common_grpc::error::Error,
|
source: common_grpc::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to request RegionServer, code: {}", code))]
|
#[snafu(display("Failed to request RegionServer {}, code: {}", addr, code))]
|
||||||
RegionServer {
|
RegionServer {
|
||||||
|
addr: String,
|
||||||
code: Code,
|
code: Code,
|
||||||
source: BoxedError,
|
source: BoxedError,
|
||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
|
|||||||
@@ -177,7 +177,7 @@ impl RegionRequester {
|
|||||||
.with_label_values(&[request_type.as_str()])
|
.with_label_values(&[request_type.as_str()])
|
||||||
.start_timer();
|
.start_timer();
|
||||||
|
|
||||||
let mut client = self.client.raw_region_client()?;
|
let (addr, mut client) = self.client.raw_region_client()?;
|
||||||
|
|
||||||
let response = client
|
let response = client
|
||||||
.handle(request)
|
.handle(request)
|
||||||
@@ -187,6 +187,7 @@ impl RegionRequester {
|
|||||||
let err: error::Error = e.into();
|
let err: error::Error = e.into();
|
||||||
// Uses `Error::RegionServer` instead of `Error::Server`
|
// Uses `Error::RegionServer` instead of `Error::Server`
|
||||||
error::Error::RegionServer {
|
error::Error::RegionServer {
|
||||||
|
addr,
|
||||||
code,
|
code,
|
||||||
source: BoxedError::new(err),
|
source: BoxedError::new(err),
|
||||||
location: location!(),
|
location: location!(),
|
||||||
|
|||||||
@@ -23,9 +23,6 @@ mod helper;
|
|||||||
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
|
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
|
||||||
#[allow(unused)]
|
#[allow(unused)]
|
||||||
mod repl;
|
mod repl;
|
||||||
// TODO(tisonkun): migrate deprecated methods
|
|
||||||
#[allow(deprecated)]
|
|
||||||
mod upgrade;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use bench::BenchTableMetadataCommand;
|
use bench::BenchTableMetadataCommand;
|
||||||
@@ -33,7 +30,6 @@ use clap::Parser;
|
|||||||
use common_telemetry::logging::{LoggingOptions, TracingOptions};
|
use common_telemetry::logging::{LoggingOptions, TracingOptions};
|
||||||
pub use repl::Repl;
|
pub use repl::Repl;
|
||||||
use tracing_appender::non_blocking::WorkerGuard;
|
use tracing_appender::non_blocking::WorkerGuard;
|
||||||
use upgrade::UpgradeCommand;
|
|
||||||
|
|
||||||
use self::export::ExportCommand;
|
use self::export::ExportCommand;
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
@@ -116,7 +112,6 @@ impl Command {
|
|||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
enum SubCommand {
|
enum SubCommand {
|
||||||
// Attach(AttachCommand),
|
// Attach(AttachCommand),
|
||||||
Upgrade(UpgradeCommand),
|
|
||||||
Bench(BenchTableMetadataCommand),
|
Bench(BenchTableMetadataCommand),
|
||||||
Export(ExportCommand),
|
Export(ExportCommand),
|
||||||
}
|
}
|
||||||
@@ -125,7 +120,6 @@ impl SubCommand {
|
|||||||
async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
|
async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
|
||||||
match self {
|
match self {
|
||||||
// SubCommand::Attach(cmd) => cmd.build().await,
|
// SubCommand::Attach(cmd) => cmd.build().await,
|
||||||
SubCommand::Upgrade(cmd) => cmd.build(guard).await,
|
|
||||||
SubCommand::Bench(cmd) => cmd.build(guard).await,
|
SubCommand::Bench(cmd) => cmd.build(guard).await,
|
||||||
SubCommand::Export(cmd) => cmd.build(guard).await,
|
SubCommand::Export(cmd) => cmd.build(guard).await,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -23,13 +23,13 @@ use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
|||||||
use common_meta::kv_backend::etcd::EtcdStore;
|
use common_meta::kv_backend::etcd::EtcdStore;
|
||||||
use common_meta::peer::Peer;
|
use common_meta::peer::Peer;
|
||||||
use common_meta::rpc::router::{Region, RegionRoute};
|
use common_meta::rpc::router::{Region, RegionRoute};
|
||||||
use common_meta::table_name::TableName;
|
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use datatypes::data_type::ConcreteDataType;
|
use datatypes::data_type::ConcreteDataType;
|
||||||
use datatypes::schema::{ColumnSchema, RawSchema};
|
use datatypes::schema::{ColumnSchema, RawSchema};
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use store_api::storage::RegionNumber;
|
use store_api::storage::RegionNumber;
|
||||||
use table::metadata::{RawTableInfo, RawTableMeta, TableId, TableIdent, TableType};
|
use table::metadata::{RawTableInfo, RawTableMeta, TableId, TableIdent, TableType};
|
||||||
|
use table::table_name::TableName;
|
||||||
use tracing_appender::non_blocking::WorkerGuard;
|
use tracing_appender::non_blocking::WorkerGuard;
|
||||||
|
|
||||||
use self::metadata::TableMetadataBencher;
|
use self::metadata::TableMetadataBencher;
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ use std::time::Instant;
|
|||||||
|
|
||||||
use common_meta::key::table_route::TableRouteValue;
|
use common_meta::key::table_route::TableRouteValue;
|
||||||
use common_meta::key::TableMetadataManagerRef;
|
use common_meta::key::TableMetadataManagerRef;
|
||||||
use common_meta::table_name::TableName;
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::cli::bench::{
|
use crate::cli::bench::{
|
||||||
bench_self_recorded, create_region_routes, create_region_wal_options, create_table_info,
|
bench_self_recorded, create_region_routes, create_region_wal_options, create_table_info,
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ use query::datafusion::DatafusionQueryEngine;
|
|||||||
use query::logical_optimizer::LogicalOptimizer;
|
use query::logical_optimizer::LogicalOptimizer;
|
||||||
use query::parser::QueryLanguageParser;
|
use query::parser::QueryLanguageParser;
|
||||||
use query::plan::LogicalPlan;
|
use query::plan::LogicalPlan;
|
||||||
use query::query_engine::QueryEngineState;
|
use query::query_engine::{DefaultSerializer, QueryEngineState};
|
||||||
use query::QueryEngine;
|
use query::QueryEngine;
|
||||||
use rustyline::error::ReadlineError;
|
use rustyline::error::ReadlineError;
|
||||||
use rustyline::Editor;
|
use rustyline::Editor;
|
||||||
@@ -185,7 +185,7 @@ impl Repl {
|
|||||||
.context(PlanStatementSnafu)?;
|
.context(PlanStatementSnafu)?;
|
||||||
|
|
||||||
let plan = DFLogicalSubstraitConvertor {}
|
let plan = DFLogicalSubstraitConvertor {}
|
||||||
.encode(&plan)
|
.encode(&plan, DefaultSerializer)
|
||||||
.context(SubstraitEncodeLogicalPlanSnafu)?;
|
.context(SubstraitEncodeLogicalPlanSnafu)?;
|
||||||
|
|
||||||
self.database.logical_plan(plan.to_vec()).await
|
self.database.logical_plan(plan.to_vec()).await
|
||||||
@@ -277,24 +277,12 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
|||||||
.build(),
|
.build(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let table_cache = layered_cache_registry
|
|
||||||
.get()
|
|
||||||
.context(error::CacheRequiredSnafu {
|
|
||||||
name: TABLE_CACHE_NAME,
|
|
||||||
})?;
|
|
||||||
let table_route_cache = layered_cache_registry
|
|
||||||
.get()
|
|
||||||
.context(error::CacheRequiredSnafu {
|
|
||||||
name: TABLE_ROUTE_CACHE_NAME,
|
|
||||||
})?;
|
|
||||||
let catalog_manager = KvBackendCatalogManager::new(
|
let catalog_manager = KvBackendCatalogManager::new(
|
||||||
Mode::Distributed,
|
Mode::Distributed,
|
||||||
Some(meta_client.clone()),
|
Some(meta_client.clone()),
|
||||||
cached_meta_backend.clone(),
|
cached_meta_backend.clone(),
|
||||||
table_cache,
|
layered_cache_registry,
|
||||||
table_route_cache,
|
);
|
||||||
)
|
|
||||||
.await;
|
|
||||||
let plugins: Plugins = Default::default();
|
let plugins: Plugins = Default::default();
|
||||||
let state = Arc::new(QueryEngineState::new(
|
let state = Arc::new(QueryEngineState::new(
|
||||||
catalog_manager,
|
catalog_manager,
|
||||||
|
|||||||
@@ -1,584 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use clap::Parser;
|
|
||||||
use client::api::v1::meta::TableRouteValue;
|
|
||||||
use common_meta::ddl::utils::region_storage_path;
|
|
||||||
use common_meta::error as MetaError;
|
|
||||||
use common_meta::key::catalog_name::{CatalogNameKey, CatalogNameValue};
|
|
||||||
use common_meta::key::datanode_table::{DatanodeTableKey, DatanodeTableValue, RegionInfo};
|
|
||||||
use common_meta::key::schema_name::{SchemaNameKey, SchemaNameValue};
|
|
||||||
use common_meta::key::table_info::{TableInfoKey, TableInfoValue};
|
|
||||||
use common_meta::key::table_name::{TableNameKey, TableNameValue};
|
|
||||||
use common_meta::key::table_region::{TableRegionKey, TableRegionValue};
|
|
||||||
use common_meta::key::table_route::{TableRouteKey, TableRouteValue as NextTableRouteValue};
|
|
||||||
use common_meta::key::{MetaKey, RegionDistribution, TableMetaValue};
|
|
||||||
use common_meta::kv_backend::etcd::EtcdStore;
|
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
|
||||||
use common_meta::range_stream::PaginationStream;
|
|
||||||
use common_meta::rpc::router::TableRoute;
|
|
||||||
use common_meta::rpc::store::{BatchDeleteRequest, BatchPutRequest, PutRequest, RangeRequest};
|
|
||||||
use common_meta::rpc::KeyValue;
|
|
||||||
use common_meta::util::get_prefix_end_key;
|
|
||||||
use common_telemetry::info;
|
|
||||||
use etcd_client::Client;
|
|
||||||
use futures::TryStreamExt;
|
|
||||||
use prost::Message;
|
|
||||||
use snafu::ResultExt;
|
|
||||||
use tracing_appender::non_blocking::WorkerGuard;
|
|
||||||
use v1_helper::{CatalogKey as v1CatalogKey, SchemaKey as v1SchemaKey, TableGlobalValue};
|
|
||||||
|
|
||||||
use crate::cli::{Instance, Tool};
|
|
||||||
use crate::error::{self, ConnectEtcdSnafu, Result};
|
|
||||||
|
|
||||||
#[derive(Debug, Default, Parser)]
|
|
||||||
pub struct UpgradeCommand {
|
|
||||||
#[clap(long)]
|
|
||||||
etcd_addr: String,
|
|
||||||
#[clap(long)]
|
|
||||||
dryrun: bool,
|
|
||||||
|
|
||||||
#[clap(long)]
|
|
||||||
skip_table_global_keys: bool,
|
|
||||||
#[clap(long)]
|
|
||||||
skip_catalog_keys: bool,
|
|
||||||
#[clap(long)]
|
|
||||||
skip_schema_keys: bool,
|
|
||||||
#[clap(long)]
|
|
||||||
skip_table_route_keys: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl UpgradeCommand {
|
|
||||||
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
|
|
||||||
let client = Client::connect([&self.etcd_addr], None)
|
|
||||||
.await
|
|
||||||
.context(ConnectEtcdSnafu {
|
|
||||||
etcd_addr: &self.etcd_addr,
|
|
||||||
})?;
|
|
||||||
let tool = MigrateTableMetadata {
|
|
||||||
etcd_store: EtcdStore::with_etcd_client(client, 128),
|
|
||||||
dryrun: self.dryrun,
|
|
||||||
skip_catalog_keys: self.skip_catalog_keys,
|
|
||||||
skip_table_global_keys: self.skip_table_global_keys,
|
|
||||||
skip_schema_keys: self.skip_schema_keys,
|
|
||||||
skip_table_route_keys: self.skip_table_route_keys,
|
|
||||||
};
|
|
||||||
Ok(Instance::new(Box::new(tool), guard))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct MigrateTableMetadata {
|
|
||||||
etcd_store: KvBackendRef,
|
|
||||||
dryrun: bool,
|
|
||||||
|
|
||||||
skip_table_global_keys: bool,
|
|
||||||
|
|
||||||
skip_catalog_keys: bool,
|
|
||||||
|
|
||||||
skip_schema_keys: bool,
|
|
||||||
|
|
||||||
skip_table_route_keys: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Tool for MigrateTableMetadata {
|
|
||||||
// migrates database's metadata from 0.3 to 0.4.
|
|
||||||
async fn do_work(&self) -> Result<()> {
|
|
||||||
if !self.skip_table_global_keys {
|
|
||||||
self.migrate_table_global_values().await?;
|
|
||||||
}
|
|
||||||
if !self.skip_catalog_keys {
|
|
||||||
self.migrate_catalog_keys().await?;
|
|
||||||
}
|
|
||||||
if !self.skip_schema_keys {
|
|
||||||
self.migrate_schema_keys().await?;
|
|
||||||
}
|
|
||||||
if !self.skip_table_route_keys {
|
|
||||||
self.migrate_table_route_keys().await?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const PAGE_SIZE: usize = 1000;
|
|
||||||
|
|
||||||
impl MigrateTableMetadata {
|
|
||||||
async fn migrate_table_route_keys(&self) -> Result<()> {
|
|
||||||
let key = b"__meta_table_route".to_vec();
|
|
||||||
let range_end = get_prefix_end_key(&key);
|
|
||||||
let mut keys = Vec::new();
|
|
||||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
|
||||||
|
|
||||||
let mut stream = PaginationStream::new(
|
|
||||||
self.etcd_store.clone(),
|
|
||||||
RangeRequest::new().with_range(key, range_end),
|
|
||||||
PAGE_SIZE,
|
|
||||||
Arc::new(|kv: KeyValue| {
|
|
||||||
let value =
|
|
||||||
TableRouteValue::decode(&kv.value[..]).context(MetaError::DecodeProtoSnafu)?;
|
|
||||||
Ok((kv.key, value))
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
|
|
||||||
while let Some((key, value)) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
|
||||||
let table_id = self.migrate_table_route_key(value).await?;
|
|
||||||
keys.push(key);
|
|
||||||
keys.push(TableRegionKey::new(table_id).to_bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("Total migrated TableRouteKeys: {}", keys.len() / 2);
|
|
||||||
self.delete_migrated_keys(keys).await;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn migrate_table_route_key(&self, value: TableRouteValue) -> Result<u32> {
|
|
||||||
let table_route = TableRoute::try_from_raw(
|
|
||||||
&value.peers,
|
|
||||||
value.table_route.expect("expected table_route"),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let new_table_value = NextTableRouteValue::physical(table_route.region_routes);
|
|
||||||
|
|
||||||
let table_id = table_route.table.id as u32;
|
|
||||||
let new_key = TableRouteKey::new(table_id);
|
|
||||||
info!("Creating '{new_key}'");
|
|
||||||
|
|
||||||
if self.dryrun {
|
|
||||||
info!("Dryrun: do nothing");
|
|
||||||
} else {
|
|
||||||
self.etcd_store
|
|
||||||
.put(
|
|
||||||
PutRequest::new()
|
|
||||||
.with_key(new_key.to_bytes())
|
|
||||||
.with_value(new_table_value.try_as_raw_value().unwrap()),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(table_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn migrate_schema_keys(&self) -> Result<()> {
|
|
||||||
// The schema key prefix.
|
|
||||||
let key = b"__s".to_vec();
|
|
||||||
let range_end = get_prefix_end_key(&key);
|
|
||||||
|
|
||||||
let mut keys = Vec::new();
|
|
||||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
|
||||||
let mut stream = PaginationStream::new(
|
|
||||||
self.etcd_store.clone(),
|
|
||||||
RangeRequest::new().with_range(key, range_end),
|
|
||||||
PAGE_SIZE,
|
|
||||||
Arc::new(|kv: KeyValue| {
|
|
||||||
let key_str =
|
|
||||||
std::str::from_utf8(&kv.key).context(MetaError::ConvertRawKeySnafu)?;
|
|
||||||
let key = v1SchemaKey::parse(key_str)
|
|
||||||
.unwrap_or_else(|e| panic!("schema key is corrupted: {e}, key: {key_str}"));
|
|
||||||
|
|
||||||
Ok(key)
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
while let Some(key) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
|
||||||
let _ = self.migrate_schema_key(&key).await;
|
|
||||||
keys.push(key.to_string().as_bytes().to_vec());
|
|
||||||
}
|
|
||||||
info!("Total migrated SchemaKeys: {}", keys.len());
|
|
||||||
self.delete_migrated_keys(keys).await;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn migrate_schema_key(&self, key: &v1SchemaKey) -> Result<()> {
|
|
||||||
let new_key = SchemaNameKey::new(&key.catalog_name, &key.schema_name);
|
|
||||||
let schema_name_value = SchemaNameValue::default();
|
|
||||||
|
|
||||||
info!("Creating '{new_key}'");
|
|
||||||
|
|
||||||
if self.dryrun {
|
|
||||||
info!("Dryrun: do nothing");
|
|
||||||
} else {
|
|
||||||
self.etcd_store
|
|
||||||
.put(
|
|
||||||
PutRequest::new()
|
|
||||||
.with_key(new_key.to_bytes())
|
|
||||||
.with_value(schema_name_value.try_as_raw_value().unwrap()),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn migrate_catalog_keys(&self) -> Result<()> {
|
|
||||||
// The catalog key prefix.
|
|
||||||
let key = b"__c".to_vec();
|
|
||||||
let range_end = get_prefix_end_key(&key);
|
|
||||||
|
|
||||||
let mut keys = Vec::new();
|
|
||||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
|
||||||
let mut stream = PaginationStream::new(
|
|
||||||
self.etcd_store.clone(),
|
|
||||||
RangeRequest::new().with_range(key, range_end),
|
|
||||||
PAGE_SIZE,
|
|
||||||
Arc::new(|kv: KeyValue| {
|
|
||||||
let key_str =
|
|
||||||
std::str::from_utf8(&kv.key).context(MetaError::ConvertRawKeySnafu)?;
|
|
||||||
let key = v1CatalogKey::parse(key_str)
|
|
||||||
.unwrap_or_else(|e| panic!("catalog key is corrupted: {e}, key: {key_str}"));
|
|
||||||
|
|
||||||
Ok(key)
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
while let Some(key) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
|
||||||
let _ = self.migrate_catalog_key(&key).await;
|
|
||||||
keys.push(key.to_string().as_bytes().to_vec());
|
|
||||||
}
|
|
||||||
info!("Total migrated CatalogKeys: {}", keys.len());
|
|
||||||
self.delete_migrated_keys(keys).await;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn migrate_catalog_key(&self, key: &v1CatalogKey) {
|
|
||||||
let new_key = CatalogNameKey::new(&key.catalog_name);
|
|
||||||
let catalog_name_value = CatalogNameValue;
|
|
||||||
|
|
||||||
info!("Creating '{new_key}'");
|
|
||||||
|
|
||||||
if self.dryrun {
|
|
||||||
info!("Dryrun: do nothing");
|
|
||||||
} else {
|
|
||||||
self.etcd_store
|
|
||||||
.put(
|
|
||||||
PutRequest::new()
|
|
||||||
.with_key(new_key.to_bytes())
|
|
||||||
.with_value(catalog_name_value.try_as_raw_value().unwrap()),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn migrate_table_global_values(&self) -> Result<()> {
|
|
||||||
let key = b"__tg".to_vec();
|
|
||||||
let range_end = get_prefix_end_key(&key);
|
|
||||||
|
|
||||||
let mut keys = Vec::new();
|
|
||||||
|
|
||||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
|
||||||
let mut stream = PaginationStream::new(
|
|
||||||
self.etcd_store.clone(),
|
|
||||||
RangeRequest::new().with_range(key, range_end.clone()),
|
|
||||||
PAGE_SIZE,
|
|
||||||
Arc::new(|kv: KeyValue| {
|
|
||||||
let key = String::from_utf8_lossy(kv.key()).to_string();
|
|
||||||
let value = TableGlobalValue::from_bytes(kv.value())
|
|
||||||
.unwrap_or_else(|e| panic!("table global value is corrupted: {e}, key: {key}"));
|
|
||||||
|
|
||||||
Ok((key, value))
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
while let Some((key, value)) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
|
||||||
self.create_table_name_key(&value).await;
|
|
||||||
|
|
||||||
self.create_datanode_table_keys(&value).await;
|
|
||||||
|
|
||||||
self.split_table_global_value(&key, value).await;
|
|
||||||
|
|
||||||
keys.push(key.as_bytes().to_vec());
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("Total migrated TableGlobalKeys: {}", keys.len());
|
|
||||||
self.delete_migrated_keys(keys).await;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn delete_migrated_keys(&self, keys: Vec<Vec<u8>>) {
|
|
||||||
for keys in keys.chunks(PAGE_SIZE) {
|
|
||||||
info!("Deleting {} keys", keys.len());
|
|
||||||
let req = BatchDeleteRequest {
|
|
||||||
keys: keys.to_vec(),
|
|
||||||
prev_kv: false,
|
|
||||||
};
|
|
||||||
if self.dryrun {
|
|
||||||
info!("Dryrun: do nothing");
|
|
||||||
} else {
|
|
||||||
self.etcd_store.batch_delete(req).await.unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn split_table_global_value(&self, key: &str, value: TableGlobalValue) {
|
|
||||||
let table_id = value.table_id();
|
|
||||||
let region_distribution: RegionDistribution = value.regions_id_map.into_iter().collect();
|
|
||||||
|
|
||||||
let table_info_key = TableInfoKey::new(table_id);
|
|
||||||
let table_info_value = TableInfoValue::new(value.table_info);
|
|
||||||
|
|
||||||
let table_region_key = TableRegionKey::new(table_id);
|
|
||||||
let table_region_value = TableRegionValue::new(region_distribution);
|
|
||||||
|
|
||||||
info!("Splitting TableGlobalKey '{key}' into '{table_info_key}' and '{table_region_key}'");
|
|
||||||
|
|
||||||
if self.dryrun {
|
|
||||||
info!("Dryrun: do nothing");
|
|
||||||
} else {
|
|
||||||
self.etcd_store
|
|
||||||
.batch_put(
|
|
||||||
BatchPutRequest::new()
|
|
||||||
.add_kv(
|
|
||||||
table_info_key.to_bytes(),
|
|
||||||
table_info_value.try_as_raw_value().unwrap(),
|
|
||||||
)
|
|
||||||
.add_kv(
|
|
||||||
table_region_key.to_bytes(),
|
|
||||||
table_region_value.try_as_raw_value().unwrap(),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn create_table_name_key(&self, value: &TableGlobalValue) {
|
|
||||||
let table_info = &value.table_info;
|
|
||||||
let table_id = value.table_id();
|
|
||||||
|
|
||||||
let table_name_key = TableNameKey::new(
|
|
||||||
&table_info.catalog_name,
|
|
||||||
&table_info.schema_name,
|
|
||||||
&table_info.name,
|
|
||||||
);
|
|
||||||
let table_name_value = TableNameValue::new(table_id);
|
|
||||||
|
|
||||||
info!("Creating '{table_name_key}' => {table_id}");
|
|
||||||
|
|
||||||
if self.dryrun {
|
|
||||||
info!("Dryrun: do nothing");
|
|
||||||
} else {
|
|
||||||
self.etcd_store
|
|
||||||
.put(
|
|
||||||
PutRequest::new()
|
|
||||||
.with_key(table_name_key.to_bytes())
|
|
||||||
.with_value(table_name_value.try_as_raw_value().unwrap()),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn create_datanode_table_keys(&self, value: &TableGlobalValue) {
|
|
||||||
let table_id = value.table_id();
|
|
||||||
let engine = value.table_info.meta.engine.as_str();
|
|
||||||
let region_storage_path = region_storage_path(
|
|
||||||
&value.table_info.catalog_name,
|
|
||||||
&value.table_info.schema_name,
|
|
||||||
);
|
|
||||||
let region_distribution: RegionDistribution =
|
|
||||||
value.regions_id_map.clone().into_iter().collect();
|
|
||||||
|
|
||||||
// TODO(niebayes): properly fetch or construct wal options.
|
|
||||||
let region_wal_options = HashMap::default();
|
|
||||||
|
|
||||||
let datanode_table_kvs = region_distribution
|
|
||||||
.into_iter()
|
|
||||||
.map(|(datanode_id, regions)| {
|
|
||||||
let k = DatanodeTableKey::new(datanode_id, table_id);
|
|
||||||
info!("Creating DatanodeTableKey '{k}' => {regions:?}");
|
|
||||||
(
|
|
||||||
k,
|
|
||||||
DatanodeTableValue::new(
|
|
||||||
table_id,
|
|
||||||
regions,
|
|
||||||
RegionInfo {
|
|
||||||
engine: engine.to_string(),
|
|
||||||
region_storage_path: region_storage_path.clone(),
|
|
||||||
region_options: (&value.table_info.meta.options).into(),
|
|
||||||
region_wal_options: region_wal_options.clone(),
|
|
||||||
},
|
|
||||||
),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
if self.dryrun {
|
|
||||||
info!("Dryrun: do nothing");
|
|
||||||
} else {
|
|
||||||
let mut req = BatchPutRequest::new();
|
|
||||||
for (key, value) in datanode_table_kvs {
|
|
||||||
req = req.add_kv(key.to_bytes(), value.try_as_raw_value().unwrap());
|
|
||||||
}
|
|
||||||
self.etcd_store.batch_put(req).await.unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[deprecated(since = "0.4.0", note = "Used for migrate old version(v0.3) metadata")]
|
|
||||||
mod v1_helper {
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::fmt::{Display, Formatter};
|
|
||||||
|
|
||||||
use err::{DeserializeCatalogEntryValueSnafu, Error, InvalidCatalogSnafu};
|
|
||||||
use lazy_static::lazy_static;
|
|
||||||
use regex::Regex;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use snafu::{ensure, OptionExt, ResultExt};
|
|
||||||
use table::metadata::{RawTableInfo, TableId};
|
|
||||||
|
|
||||||
pub const CATALOG_KEY_PREFIX: &str = "__c";
|
|
||||||
pub const SCHEMA_KEY_PREFIX: &str = "__s";
|
|
||||||
|
|
||||||
/// The pattern of a valid catalog, schema or table name.
|
|
||||||
const NAME_PATTERN: &str = "[a-zA-Z_:][a-zA-Z0-9_:]*";
|
|
||||||
|
|
||||||
lazy_static! {
|
|
||||||
static ref CATALOG_KEY_PATTERN: Regex =
|
|
||||||
Regex::new(&format!("^{CATALOG_KEY_PREFIX}-({NAME_PATTERN})$")).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
lazy_static! {
|
|
||||||
static ref SCHEMA_KEY_PATTERN: Regex = Regex::new(&format!(
|
|
||||||
"^{SCHEMA_KEY_PREFIX}-({NAME_PATTERN})-({NAME_PATTERN})$"
|
|
||||||
))
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Table global info contains necessary info for a datanode to create table regions, including
|
|
||||||
/// table id, table meta(schema...), region id allocation across datanodes.
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
|
||||||
pub struct TableGlobalValue {
|
|
||||||
/// Id of datanode that created the global table info kv. only for debugging.
|
|
||||||
pub node_id: u64,
|
|
||||||
/// Allocation of region ids across all datanodes.
|
|
||||||
pub regions_id_map: HashMap<u64, Vec<u32>>,
|
|
||||||
pub table_info: RawTableInfo,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TableGlobalValue {
|
|
||||||
pub fn table_id(&self) -> TableId {
|
|
||||||
self.table_info.ident.table_id
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct CatalogKey {
|
|
||||||
pub catalog_name: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Display for CatalogKey {
|
|
||||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
|
||||||
f.write_str(CATALOG_KEY_PREFIX)?;
|
|
||||||
f.write_str("-")?;
|
|
||||||
f.write_str(&self.catalog_name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CatalogKey {
|
|
||||||
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
|
|
||||||
let key = s.as_ref();
|
|
||||||
let captures = CATALOG_KEY_PATTERN
|
|
||||||
.captures(key)
|
|
||||||
.context(InvalidCatalogSnafu { key })?;
|
|
||||||
ensure!(captures.len() == 2, InvalidCatalogSnafu { key });
|
|
||||||
Ok(Self {
|
|
||||||
catalog_name: captures[1].to_string(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct CatalogValue;
|
|
||||||
|
|
||||||
pub struct SchemaKey {
|
|
||||||
pub catalog_name: String,
|
|
||||||
pub schema_name: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Display for SchemaKey {
|
|
||||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
|
||||||
f.write_str(SCHEMA_KEY_PREFIX)?;
|
|
||||||
f.write_str("-")?;
|
|
||||||
f.write_str(&self.catalog_name)?;
|
|
||||||
f.write_str("-")?;
|
|
||||||
f.write_str(&self.schema_name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SchemaKey {
|
|
||||||
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
|
|
||||||
let key = s.as_ref();
|
|
||||||
let captures = SCHEMA_KEY_PATTERN
|
|
||||||
.captures(key)
|
|
||||||
.context(InvalidCatalogSnafu { key })?;
|
|
||||||
ensure!(captures.len() == 3, InvalidCatalogSnafu { key });
|
|
||||||
Ok(Self {
|
|
||||||
catalog_name: captures[1].to_string(),
|
|
||||||
schema_name: captures[2].to_string(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct SchemaValue;
|
|
||||||
|
|
||||||
macro_rules! define_catalog_value {
|
|
||||||
( $($val_ty: ty), *) => {
|
|
||||||
$(
|
|
||||||
impl $val_ty {
|
|
||||||
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
|
|
||||||
serde_json::from_str(s.as_ref())
|
|
||||||
.context(DeserializeCatalogEntryValueSnafu { raw: s.as_ref() })
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn from_bytes(bytes: impl AsRef<[u8]>) -> Result<Self, Error> {
|
|
||||||
Self::parse(&String::from_utf8_lossy(bytes.as_ref()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)*
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
define_catalog_value!(TableGlobalValue);
|
|
||||||
|
|
||||||
mod err {
|
|
||||||
use snafu::{Location, Snafu};
|
|
||||||
|
|
||||||
#[derive(Debug, Snafu)]
|
|
||||||
#[snafu(visibility(pub))]
|
|
||||||
pub enum Error {
|
|
||||||
#[snafu(display("Invalid catalog info: {}", key))]
|
|
||||||
InvalidCatalog {
|
|
||||||
key: String,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to deserialize catalog entry value: {}", raw))]
|
|
||||||
DeserializeCatalogEntryValue {
|
|
||||||
raw: String,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: serde_json::error::Error,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -375,11 +375,11 @@ impl ErrorExt for Error {
|
|||||||
|
|
||||||
Error::SerdeJson { .. } | Error::FileIo { .. } => StatusCode::Unexpected,
|
Error::SerdeJson { .. } | Error::FileIo { .. } => StatusCode::Unexpected,
|
||||||
|
|
||||||
Error::CacheRequired { .. } | Error::BuildCacheRegistry { .. } => StatusCode::Internal,
|
|
||||||
|
|
||||||
Error::Other { source, .. } => source.status_code(),
|
Error::Other { source, .. } => source.status_code(),
|
||||||
|
|
||||||
Error::BuildRuntime { source, .. } => source.status_code(),
|
Error::BuildRuntime { source, .. } => source.status_code(),
|
||||||
|
|
||||||
|
Error::CacheRequired { .. } | Error::BuildCacheRegistry { .. } => StatusCode::Internal,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -16,10 +16,7 @@ use std::sync::Arc;
|
|||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use cache::{
|
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||||
build_fundamental_cache_registry, with_default_composite_cache_registry, TABLE_CACHE_NAME,
|
|
||||||
TABLE_ROUTE_CACHE_NAME,
|
|
||||||
};
|
|
||||||
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use client::client_manager::DatanodeClients;
|
use client::client_manager::DatanodeClients;
|
||||||
@@ -302,25 +299,12 @@ impl StartCommand {
|
|||||||
.build(),
|
.build(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let table_cache = layered_cache_registry
|
|
||||||
.get()
|
|
||||||
.context(error::CacheRequiredSnafu {
|
|
||||||
name: TABLE_CACHE_NAME,
|
|
||||||
})?;
|
|
||||||
let table_route_cache =
|
|
||||||
layered_cache_registry
|
|
||||||
.get()
|
|
||||||
.context(error::CacheRequiredSnafu {
|
|
||||||
name: TABLE_ROUTE_CACHE_NAME,
|
|
||||||
})?;
|
|
||||||
let catalog_manager = KvBackendCatalogManager::new(
|
let catalog_manager = KvBackendCatalogManager::new(
|
||||||
opts.mode,
|
opts.mode,
|
||||||
Some(meta_client.clone()),
|
Some(meta_client.clone()),
|
||||||
cached_meta_backend.clone(),
|
cached_meta_backend.clone(),
|
||||||
table_cache,
|
layered_cache_registry.clone(),
|
||||||
table_route_cache,
|
);
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
let executor = HandlerGroupExecutor::new(vec![
|
let executor = HandlerGroupExecutor::new(vec![
|
||||||
Arc::new(ParseMailboxMessageHandler),
|
Arc::new(ParseMailboxMessageHandler),
|
||||||
|
|||||||
@@ -16,10 +16,7 @@ use std::sync::Arc;
|
|||||||
use std::{fs, path};
|
use std::{fs, path};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use cache::{
|
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||||
build_fundamental_cache_registry, with_default_composite_cache_registry, TABLE_CACHE_NAME,
|
|
||||||
TABLE_ROUTE_CACHE_NAME,
|
|
||||||
};
|
|
||||||
use catalog::kvbackend::KvBackendCatalogManager;
|
use catalog::kvbackend::KvBackendCatalogManager;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
|
use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
|
||||||
@@ -61,14 +58,14 @@ use servers::export_metrics::ExportMetricsOption;
|
|||||||
use servers::http::HttpOptions;
|
use servers::http::HttpOptions;
|
||||||
use servers::tls::{TlsMode, TlsOption};
|
use servers::tls::{TlsMode, TlsOption};
|
||||||
use servers::Mode;
|
use servers::Mode;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::ResultExt;
|
||||||
use tracing_appender::non_blocking::WorkerGuard;
|
use tracing_appender::non_blocking::WorkerGuard;
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
BuildCacheRegistrySnafu, CacheRequiredSnafu, CreateDirSnafu, IllegalConfigSnafu,
|
BuildCacheRegistrySnafu, CreateDirSnafu, IllegalConfigSnafu, InitDdlManagerSnafu,
|
||||||
InitDdlManagerSnafu, InitMetadataSnafu, InitTimezoneSnafu, LoadLayeredConfigSnafu, Result,
|
InitMetadataSnafu, InitTimezoneSnafu, LoadLayeredConfigSnafu, Result, ShutdownDatanodeSnafu,
|
||||||
ShutdownDatanodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu,
|
ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu, StartProcedureManagerSnafu,
|
||||||
StartProcedureManagerSnafu, StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
|
StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
|
||||||
};
|
};
|
||||||
use crate::options::GlobalOptions;
|
use crate::options::GlobalOptions;
|
||||||
use crate::{log_versions, App};
|
use crate::{log_versions, App};
|
||||||
@@ -421,20 +418,12 @@ impl StartCommand {
|
|||||||
.build(),
|
.build(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let table_cache = layered_cache_registry.get().context(CacheRequiredSnafu {
|
|
||||||
name: TABLE_CACHE_NAME,
|
|
||||||
})?;
|
|
||||||
let table_route_cache = layered_cache_registry.get().context(CacheRequiredSnafu {
|
|
||||||
name: TABLE_ROUTE_CACHE_NAME,
|
|
||||||
})?;
|
|
||||||
let catalog_manager = KvBackendCatalogManager::new(
|
let catalog_manager = KvBackendCatalogManager::new(
|
||||||
dn_opts.mode,
|
dn_opts.mode,
|
||||||
None,
|
None,
|
||||||
kv_backend.clone(),
|
kv_backend.clone(),
|
||||||
table_cache,
|
layered_cache_registry.clone(),
|
||||||
table_route_cache,
|
);
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
let table_metadata_manager =
|
let table_metadata_manager =
|
||||||
Self::create_table_metadata_manager(kv_backend.clone()).await?;
|
Self::create_table_metadata_manager(kv_backend.clone()).await?;
|
||||||
|
|||||||
@@ -92,44 +92,34 @@ impl CompressionType {
|
|||||||
macro_rules! impl_compression_type {
|
macro_rules! impl_compression_type {
|
||||||
($(($enum_item:ident, $prefix:ident)),*) => {
|
($(($enum_item:ident, $prefix:ident)),*) => {
|
||||||
paste::item! {
|
paste::item! {
|
||||||
use bytes::{Buf, BufMut, BytesMut};
|
|
||||||
|
|
||||||
impl CompressionType {
|
impl CompressionType {
|
||||||
pub async fn encode<B: Buf>(&self, mut content: B) -> io::Result<Vec<u8>> {
|
pub async fn encode(&self, content: impl AsRef<[u8]>) -> io::Result<Vec<u8>> {
|
||||||
match self {
|
match self {
|
||||||
$(
|
$(
|
||||||
CompressionType::$enum_item => {
|
CompressionType::$enum_item => {
|
||||||
let mut buffer = Vec::with_capacity(content.remaining());
|
let mut buffer = Vec::with_capacity(content.as_ref().len());
|
||||||
let mut encoder = write::[<$prefix Encoder>]::new(&mut buffer);
|
let mut encoder = write::[<$prefix Encoder>]::new(&mut buffer);
|
||||||
encoder.write_all_buf(&mut content).await?;
|
encoder.write_all(content.as_ref()).await?;
|
||||||
encoder.shutdown().await?;
|
encoder.shutdown().await?;
|
||||||
Ok(buffer)
|
Ok(buffer)
|
||||||
}
|
}
|
||||||
)*
|
)*
|
||||||
CompressionType::Uncompressed => {
|
CompressionType::Uncompressed => Ok(content.as_ref().to_vec()),
|
||||||
let mut bs = BytesMut::with_capacity(content.remaining());
|
|
||||||
bs.put(content);
|
|
||||||
Ok(bs.to_vec())
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn decode<B: Buf>(&self, mut content: B) -> io::Result<Vec<u8>> {
|
pub async fn decode(&self, content: impl AsRef<[u8]>) -> io::Result<Vec<u8>> {
|
||||||
match self {
|
match self {
|
||||||
$(
|
$(
|
||||||
CompressionType::$enum_item => {
|
CompressionType::$enum_item => {
|
||||||
let mut buffer = Vec::with_capacity(content.remaining() * 2);
|
let mut buffer = Vec::with_capacity(content.as_ref().len() * 2);
|
||||||
let mut encoder = write::[<$prefix Decoder>]::new(&mut buffer);
|
let mut encoder = write::[<$prefix Decoder>]::new(&mut buffer);
|
||||||
encoder.write_all_buf(&mut content).await?;
|
encoder.write_all(content.as_ref()).await?;
|
||||||
encoder.shutdown().await?;
|
encoder.shutdown().await?;
|
||||||
Ok(buffer)
|
Ok(buffer)
|
||||||
}
|
}
|
||||||
)*
|
)*
|
||||||
CompressionType::Uncompressed => {
|
CompressionType::Uncompressed => Ok(content.as_ref().to_vec()),
|
||||||
let mut bs = BytesMut::with_capacity(content.remaining());
|
|
||||||
bs.put(content);
|
|
||||||
Ok(bs.to_vec())
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -161,13 +151,13 @@ macro_rules! impl_compression_type {
|
|||||||
$(
|
$(
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn [<test_ $enum_item:lower _compression>]() {
|
async fn [<test_ $enum_item:lower _compression>]() {
|
||||||
let string = "foo_bar".as_bytes();
|
let string = "foo_bar".as_bytes().to_vec();
|
||||||
let compress = CompressionType::$enum_item
|
let compress = CompressionType::$enum_item
|
||||||
.encode(string)
|
.encode(&string)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let decompress = CompressionType::$enum_item
|
let decompress = CompressionType::$enum_item
|
||||||
.decode(compress.as_slice())
|
.decode(&compress)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(decompress, string);
|
assert_eq!(decompress, string);
|
||||||
@@ -175,13 +165,13 @@ macro_rules! impl_compression_type {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_uncompression() {
|
async fn test_uncompression() {
|
||||||
let string = "foo_bar".as_bytes();
|
let string = "foo_bar".as_bytes().to_vec();
|
||||||
let compress = CompressionType::Uncompressed
|
let compress = CompressionType::Uncompressed
|
||||||
.encode(string)
|
.encode(&string)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let decompress = CompressionType::Uncompressed
|
let decompress = CompressionType::Uncompressed
|
||||||
.decode(compress.as_slice())
|
.decode(&compress)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(decompress, string);
|
assert_eq!(decompress, string);
|
||||||
|
|||||||
@@ -36,7 +36,6 @@ use datafusion::physical_plan::SendableRecordBatchStream;
|
|||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use object_store::ObjectStore;
|
use object_store::ObjectStore;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use tokio_util::compat::FuturesAsyncWriteCompatExt;
|
|
||||||
|
|
||||||
use self::csv::CsvFormat;
|
use self::csv::CsvFormat;
|
||||||
use self::json::JsonFormat;
|
use self::json::JsonFormat;
|
||||||
@@ -147,8 +146,7 @@ pub fn open_with_decoder<T: ArrowDecoder, F: Fn() -> DataFusionResult<T>>(
|
|||||||
let reader = object_store
|
let reader = object_store
|
||||||
.reader(&path)
|
.reader(&path)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| DataFusionError::External(Box::new(e)))?
|
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
||||||
.into_bytes_stream(..);
|
|
||||||
|
|
||||||
let mut upstream = compression_type.convert_stream(reader).fuse();
|
let mut upstream = compression_type.convert_stream(reader).fuse();
|
||||||
|
|
||||||
@@ -205,7 +203,6 @@ pub async fn stream_to_file<T: DfRecordBatchEncoder, U: Fn(SharedBuffer) -> T>(
|
|||||||
.writer_with(&path)
|
.writer_with(&path)
|
||||||
.concurrent(concurrency)
|
.concurrent(concurrency)
|
||||||
.await
|
.await
|
||||||
.map(|v| v.into_futures_async_write().compat_write())
|
|
||||||
.context(error::WriteObjectSnafu { path })
|
.context(error::WriteObjectSnafu { path })
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|||||||
@@ -29,7 +29,6 @@ use datafusion::physical_plan::SendableRecordBatchStream;
|
|||||||
use derive_builder::Builder;
|
use derive_builder::Builder;
|
||||||
use object_store::ObjectStore;
|
use object_store::ObjectStore;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use tokio_util::compat::FuturesAsyncReadCompatExt;
|
|
||||||
use tokio_util::io::SyncIoBridge;
|
use tokio_util::io::SyncIoBridge;
|
||||||
|
|
||||||
use super::stream_to_file;
|
use super::stream_to_file;
|
||||||
@@ -165,16 +164,10 @@ impl FileOpener for CsvOpener {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl FileFormat for CsvFormat {
|
impl FileFormat for CsvFormat {
|
||||||
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
|
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
|
||||||
let meta = store
|
|
||||||
.stat(path)
|
|
||||||
.await
|
|
||||||
.context(error::ReadObjectSnafu { path })?;
|
|
||||||
let reader = store
|
let reader = store
|
||||||
.reader(path)
|
.reader(path)
|
||||||
.await
|
.await
|
||||||
.context(error::ReadObjectSnafu { path })?
|
.context(error::ReadObjectSnafu { path })?;
|
||||||
.into_futures_async_read(0..meta.content_length())
|
|
||||||
.compat();
|
|
||||||
|
|
||||||
let decoded = self.compression_type.convert_async_read(reader);
|
let decoded = self.compression_type.convert_async_read(reader);
|
||||||
|
|
||||||
|
|||||||
@@ -31,7 +31,6 @@ use datafusion::error::{DataFusionError, Result as DataFusionResult};
|
|||||||
use datafusion::physical_plan::SendableRecordBatchStream;
|
use datafusion::physical_plan::SendableRecordBatchStream;
|
||||||
use object_store::ObjectStore;
|
use object_store::ObjectStore;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use tokio_util::compat::FuturesAsyncReadCompatExt;
|
|
||||||
use tokio_util::io::SyncIoBridge;
|
use tokio_util::io::SyncIoBridge;
|
||||||
|
|
||||||
use super::stream_to_file;
|
use super::stream_to_file;
|
||||||
@@ -83,16 +82,10 @@ impl Default for JsonFormat {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl FileFormat for JsonFormat {
|
impl FileFormat for JsonFormat {
|
||||||
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
|
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
|
||||||
let meta = store
|
|
||||||
.stat(path)
|
|
||||||
.await
|
|
||||||
.context(error::ReadObjectSnafu { path })?;
|
|
||||||
let reader = store
|
let reader = store
|
||||||
.reader(path)
|
.reader(path)
|
||||||
.await
|
.await
|
||||||
.context(error::ReadObjectSnafu { path })?
|
.context(error::ReadObjectSnafu { path })?;
|
||||||
.into_futures_async_read(0..meta.content_length())
|
|
||||||
.compat();
|
|
||||||
|
|
||||||
let decoded = self.compression_type.convert_async_read(reader);
|
let decoded = self.compression_type.convert_async_read(reader);
|
||||||
|
|
||||||
|
|||||||
@@ -16,17 +16,15 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use arrow_schema::{ArrowError, Schema, SchemaRef};
|
use arrow_schema::{ArrowError, Schema, SchemaRef};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use bytes::Bytes;
|
|
||||||
use common_recordbatch::adapter::RecordBatchStreamTypeAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamTypeAdapter;
|
||||||
use datafusion::datasource::physical_plan::{FileMeta, FileOpenFuture, FileOpener};
|
use datafusion::datasource::physical_plan::{FileMeta, FileOpenFuture, FileOpener};
|
||||||
use datafusion::error::{DataFusionError, Result as DfResult};
|
use datafusion::error::{DataFusionError, Result as DfResult};
|
||||||
use futures::future::BoxFuture;
|
use futures::{StreamExt, TryStreamExt};
|
||||||
use futures::{FutureExt, StreamExt, TryStreamExt};
|
|
||||||
use object_store::ObjectStore;
|
use object_store::ObjectStore;
|
||||||
use orc_rust::arrow_reader::ArrowReaderBuilder;
|
use orc_rust::arrow_reader::ArrowReaderBuilder;
|
||||||
use orc_rust::async_arrow_reader::ArrowStreamReader;
|
use orc_rust::async_arrow_reader::ArrowStreamReader;
|
||||||
use orc_rust::reader::AsyncChunkReader;
|
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
use tokio::io::{AsyncRead, AsyncSeek};
|
||||||
|
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
use crate::file_format::FileFormat;
|
use crate::file_format::FileFormat;
|
||||||
@@ -34,49 +32,18 @@ use crate::file_format::FileFormat;
|
|||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||||
pub struct OrcFormat;
|
pub struct OrcFormat;
|
||||||
|
|
||||||
#[derive(Clone)]
|
pub async fn new_orc_stream_reader<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>(
|
||||||
pub struct ReaderAdapter {
|
reader: R,
|
||||||
reader: object_store::Reader,
|
) -> Result<ArrowStreamReader<R>> {
|
||||||
len: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ReaderAdapter {
|
|
||||||
pub fn new(reader: object_store::Reader, len: u64) -> Self {
|
|
||||||
Self { reader, len }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AsyncChunkReader for ReaderAdapter {
|
|
||||||
fn len(&mut self) -> BoxFuture<'_, std::io::Result<u64>> {
|
|
||||||
async move { Ok(self.len) }.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_bytes(
|
|
||||||
&mut self,
|
|
||||||
offset_from_start: u64,
|
|
||||||
length: u64,
|
|
||||||
) -> BoxFuture<'_, std::io::Result<Bytes>> {
|
|
||||||
async move {
|
|
||||||
let bytes = self
|
|
||||||
.reader
|
|
||||||
.read(offset_from_start..offset_from_start + length)
|
|
||||||
.await?;
|
|
||||||
Ok(bytes.to_bytes())
|
|
||||||
}
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn new_orc_stream_reader(
|
|
||||||
reader: ReaderAdapter,
|
|
||||||
) -> Result<ArrowStreamReader<ReaderAdapter>> {
|
|
||||||
let reader_build = ArrowReaderBuilder::try_new_async(reader)
|
let reader_build = ArrowReaderBuilder::try_new_async(reader)
|
||||||
.await
|
.await
|
||||||
.context(error::OrcReaderSnafu)?;
|
.context(error::OrcReaderSnafu)?;
|
||||||
Ok(reader_build.build_async())
|
Ok(reader_build.build_async())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn infer_orc_schema(reader: ReaderAdapter) -> Result<Schema> {
|
pub async fn infer_orc_schema<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>(
|
||||||
|
reader: R,
|
||||||
|
) -> Result<Schema> {
|
||||||
let reader = new_orc_stream_reader(reader).await?;
|
let reader = new_orc_stream_reader(reader).await?;
|
||||||
Ok(reader.schema().as_ref().clone())
|
Ok(reader.schema().as_ref().clone())
|
||||||
}
|
}
|
||||||
@@ -84,15 +51,13 @@ pub async fn infer_orc_schema(reader: ReaderAdapter) -> Result<Schema> {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl FileFormat for OrcFormat {
|
impl FileFormat for OrcFormat {
|
||||||
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
|
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
|
||||||
let meta = store
|
|
||||||
.stat(path)
|
|
||||||
.await
|
|
||||||
.context(error::ReadObjectSnafu { path })?;
|
|
||||||
let reader = store
|
let reader = store
|
||||||
.reader(path)
|
.reader(path)
|
||||||
.await
|
.await
|
||||||
.context(error::ReadObjectSnafu { path })?;
|
.context(error::ReadObjectSnafu { path })?;
|
||||||
let schema = infer_orc_schema(ReaderAdapter::new(reader, meta.content_length())).await?;
|
|
||||||
|
let schema = infer_orc_schema(reader).await?;
|
||||||
|
|
||||||
Ok(schema)
|
Ok(schema)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -132,22 +97,14 @@ impl FileOpener for OrcOpener {
|
|||||||
};
|
};
|
||||||
let projection = self.projection.clone();
|
let projection = self.projection.clone();
|
||||||
Ok(Box::pin(async move {
|
Ok(Box::pin(async move {
|
||||||
let path = meta.location().to_string();
|
|
||||||
|
|
||||||
let meta = object_store
|
|
||||||
.stat(&path)
|
|
||||||
.await
|
|
||||||
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
|
||||||
|
|
||||||
let reader = object_store
|
let reader = object_store
|
||||||
.reader(&path)
|
.reader(meta.location().to_string().as_str())
|
||||||
.await
|
.await
|
||||||
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
||||||
|
|
||||||
let stream_reader =
|
let stream_reader = new_orc_stream_reader(reader)
|
||||||
new_orc_stream_reader(ReaderAdapter::new(reader, meta.content_length()))
|
.await
|
||||||
.await
|
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
||||||
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
|
||||||
|
|
||||||
let stream =
|
let stream =
|
||||||
RecordBatchStreamTypeAdapter::new(projected_schema, stream_reader, projection);
|
RecordBatchStreamTypeAdapter::new(projected_schema, stream_reader, projection);
|
||||||
|
|||||||
@@ -29,11 +29,10 @@ use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
|
|||||||
use datafusion::physical_plan::SendableRecordBatchStream;
|
use datafusion::physical_plan::SendableRecordBatchStream;
|
||||||
use futures::future::BoxFuture;
|
use futures::future::BoxFuture;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use object_store::{FuturesAsyncReader, ObjectStore};
|
use object_store::{ObjectStore, Reader, Writer};
|
||||||
use parquet::basic::{Compression, ZstdLevel};
|
use parquet::basic::{Compression, ZstdLevel};
|
||||||
use parquet::file::properties::WriterProperties;
|
use parquet::file::properties::WriterProperties;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use tokio_util::compat::{Compat, FuturesAsyncReadCompatExt, FuturesAsyncWriteCompatExt};
|
|
||||||
|
|
||||||
use crate::buffered_writer::{ArrowWriterCloser, DfRecordBatchEncoder, LazyBufferedWriter};
|
use crate::buffered_writer::{ArrowWriterCloser, DfRecordBatchEncoder, LazyBufferedWriter};
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
@@ -46,16 +45,10 @@ pub struct ParquetFormat {}
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl FileFormat for ParquetFormat {
|
impl FileFormat for ParquetFormat {
|
||||||
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
|
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
|
||||||
let meta = store
|
|
||||||
.stat(path)
|
|
||||||
.await
|
|
||||||
.context(error::ReadObjectSnafu { path })?;
|
|
||||||
let mut reader = store
|
let mut reader = store
|
||||||
.reader(path)
|
.reader(path)
|
||||||
.await
|
.await
|
||||||
.context(error::ReadObjectSnafu { path })?
|
.context(error::ReadObjectSnafu { path })?;
|
||||||
.into_futures_async_read(0..meta.content_length())
|
|
||||||
.compat();
|
|
||||||
|
|
||||||
let metadata = reader
|
let metadata = reader
|
||||||
.get_metadata()
|
.get_metadata()
|
||||||
@@ -105,7 +98,7 @@ impl ParquetFileReaderFactory for DefaultParquetFileReaderFactory {
|
|||||||
|
|
||||||
pub struct LazyParquetFileReader {
|
pub struct LazyParquetFileReader {
|
||||||
object_store: ObjectStore,
|
object_store: ObjectStore,
|
||||||
reader: Option<Compat<FuturesAsyncReader>>,
|
reader: Option<Reader>,
|
||||||
path: String,
|
path: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -121,13 +114,7 @@ impl LazyParquetFileReader {
|
|||||||
/// Must initialize the reader, or throw an error from the future.
|
/// Must initialize the reader, or throw an error from the future.
|
||||||
async fn maybe_initialize(&mut self) -> result::Result<(), object_store::Error> {
|
async fn maybe_initialize(&mut self) -> result::Result<(), object_store::Error> {
|
||||||
if self.reader.is_none() {
|
if self.reader.is_none() {
|
||||||
let meta = self.object_store.stat(&self.path).await?;
|
let reader = self.object_store.reader(&self.path).await?;
|
||||||
let reader = self
|
|
||||||
.object_store
|
|
||||||
.reader(&self.path)
|
|
||||||
.await?
|
|
||||||
.into_futures_async_read(0..meta.content_length())
|
|
||||||
.compat();
|
|
||||||
self.reader = Some(reader);
|
self.reader = Some(reader);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -180,17 +167,16 @@ pub struct BufferedWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type InnerBufferedWriter = LazyBufferedWriter<
|
type InnerBufferedWriter = LazyBufferedWriter<
|
||||||
Compat<object_store::FuturesAsyncWriter>,
|
object_store::Writer,
|
||||||
ArrowWriter<SharedBuffer>,
|
ArrowWriter<SharedBuffer>,
|
||||||
impl Fn(String) -> BoxFuture<'static, Result<Compat<object_store::FuturesAsyncWriter>>>,
|
impl Fn(String) -> BoxFuture<'static, Result<Writer>>,
|
||||||
>;
|
>;
|
||||||
|
|
||||||
impl BufferedWriter {
|
impl BufferedWriter {
|
||||||
fn make_write_factory(
|
fn make_write_factory(
|
||||||
store: ObjectStore,
|
store: ObjectStore,
|
||||||
concurrency: usize,
|
concurrency: usize,
|
||||||
) -> impl Fn(String) -> BoxFuture<'static, Result<Compat<object_store::FuturesAsyncWriter>>>
|
) -> impl Fn(String) -> BoxFuture<'static, Result<Writer>> {
|
||||||
{
|
|
||||||
move |path| {
|
move |path| {
|
||||||
let store = store.clone();
|
let store = store.clone();
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
@@ -198,7 +184,6 @@ impl BufferedWriter {
|
|||||||
.writer_with(&path)
|
.writer_with(&path)
|
||||||
.concurrent(concurrency)
|
.concurrent(concurrency)
|
||||||
.await
|
.await
|
||||||
.map(|v| v.into_futures_async_write().compat_write())
|
|
||||||
.context(error::WriteObjectSnafu { path })
|
.context(error::WriteObjectSnafu { path })
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -120,7 +120,7 @@ pub async fn setup_stream_to_json_test(origin_path: &str, threshold: impl Fn(usi
|
|||||||
|
|
||||||
let written = tmp_store.read(&output_path).await.unwrap();
|
let written = tmp_store.read(&output_path).await.unwrap();
|
||||||
let origin = store.read(origin_path).await.unwrap();
|
let origin = store.read(origin_path).await.unwrap();
|
||||||
assert_eq_lines(written.to_vec(), origin.to_vec());
|
assert_eq_lines(written, origin);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn setup_stream_to_csv_test(origin_path: &str, threshold: impl Fn(usize) -> usize) {
|
pub async fn setup_stream_to_csv_test(origin_path: &str, threshold: impl Fn(usize) -> usize) {
|
||||||
@@ -158,7 +158,7 @@ pub async fn setup_stream_to_csv_test(origin_path: &str, threshold: impl Fn(usiz
|
|||||||
|
|
||||||
let written = tmp_store.read(&output_path).await.unwrap();
|
let written = tmp_store.read(&output_path).await.unwrap();
|
||||||
let origin = store.read(origin_path).await.unwrap();
|
let origin = store.read(origin_path).await.unwrap();
|
||||||
assert_eq_lines(written.to_vec(), origin.to_vec());
|
assert_eq_lines(written, origin);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ignore the CRLF difference across operating systems.
|
// Ignore the CRLF difference across operating systems.
|
||||||
|
|||||||
@@ -143,8 +143,6 @@ fn clamp_impl<T: LogicalPrimitiveType, const CLAMP_MIN: bool, const CLAMP_MAX: b
|
|||||||
min: T::Native,
|
min: T::Native,
|
||||||
max: T::Native,
|
max: T::Native,
|
||||||
) -> Result<VectorRef> {
|
) -> Result<VectorRef> {
|
||||||
common_telemetry::info!("[DEBUG] min {min:?}, max {max:?}");
|
|
||||||
|
|
||||||
let iter = ArrayIter::new(input);
|
let iter = ArrayIter::new(input);
|
||||||
let result = iter.map(|x| {
|
let result = iter.map(|x| {
|
||||||
x.map(|x| {
|
x.map(|x| {
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ pub use registry::{
|
|||||||
LayeredCacheRegistryBuilder, LayeredCacheRegistryRef,
|
LayeredCacheRegistryBuilder, LayeredCacheRegistryRef,
|
||||||
};
|
};
|
||||||
pub use table::{
|
pub use table::{
|
||||||
new_table_info_cache, new_table_name_cache, new_table_route_cache, TableInfoCache,
|
new_table_info_cache, new_table_name_cache, new_table_route_cache, new_view_info_cache,
|
||||||
TableInfoCacheRef, TableNameCache, TableNameCacheRef, TableRoute, TableRouteCache,
|
TableInfoCache, TableInfoCacheRef, TableNameCache, TableNameCacheRef, TableRoute,
|
||||||
TableRouteCacheRef,
|
TableRouteCache, TableRouteCacheRef, ViewInfoCache, ViewInfoCacheRef,
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -145,13 +145,13 @@ mod tests {
|
|||||||
|
|
||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use moka::future::CacheBuilder;
|
use moka::future::CacheBuilder;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::cache::flow::table_flownode::new_table_flownode_set_cache;
|
use crate::cache::flow::table_flownode::new_table_flownode_set_cache;
|
||||||
use crate::instruction::{CacheIdent, CreateFlow, DropFlow};
|
use crate::instruction::{CacheIdent, CreateFlow, DropFlow};
|
||||||
use crate::key::flow::flow_info::FlowInfoValue;
|
use crate::key::flow::flow_info::FlowInfoValue;
|
||||||
use crate::key::flow::FlowMetadataManager;
|
use crate::key::flow::FlowMetadataManager;
|
||||||
use crate::kv_backend::memory::MemoryKvBackend;
|
use crate::kv_backend::memory::MemoryKvBackend;
|
||||||
use crate::table_name::TableName;
|
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_cache_empty_set() {
|
async fn test_cache_empty_set() {
|
||||||
|
|||||||
3
src/common/meta/src/cache/table.rs
vendored
3
src/common/meta/src/cache/table.rs
vendored
@@ -15,6 +15,9 @@
|
|||||||
mod table_info;
|
mod table_info;
|
||||||
mod table_name;
|
mod table_name;
|
||||||
mod table_route;
|
mod table_route;
|
||||||
|
mod view_info;
|
||||||
|
|
||||||
pub use table_info::{new_table_info_cache, TableInfoCache, TableInfoCacheRef};
|
pub use table_info::{new_table_info_cache, TableInfoCache, TableInfoCacheRef};
|
||||||
pub use table_name::{new_table_name_cache, TableNameCache, TableNameCacheRef};
|
pub use table_name::{new_table_name_cache, TableNameCache, TableNameCacheRef};
|
||||||
pub use table_route::{new_table_route_cache, TableRoute, TableRouteCache, TableRouteCacheRef};
|
pub use table_route::{new_table_route_cache, TableRoute, TableRouteCache, TableRouteCacheRef};
|
||||||
|
pub use view_info::{new_view_info_cache, ViewInfoCache, ViewInfoCacheRef};
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ use futures::future::BoxFuture;
|
|||||||
use moka::future::Cache;
|
use moka::future::Cache;
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
use table::metadata::TableId;
|
use table::metadata::TableId;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::cache::{CacheContainer, Initializer};
|
use crate::cache::{CacheContainer, Initializer};
|
||||||
use crate::error;
|
use crate::error;
|
||||||
@@ -25,7 +26,6 @@ use crate::error::Result;
|
|||||||
use crate::instruction::CacheIdent;
|
use crate::instruction::CacheIdent;
|
||||||
use crate::key::table_name::{TableNameKey, TableNameManager, TableNameManagerRef};
|
use crate::key::table_name::{TableNameKey, TableNameManager, TableNameManagerRef};
|
||||||
use crate::kv_backend::KvBackendRef;
|
use crate::kv_backend::KvBackendRef;
|
||||||
use crate::table_name::TableName;
|
|
||||||
|
|
||||||
/// [TableNameCache] caches the [TableName] to [TableId] mapping.
|
/// [TableNameCache] caches the [TableName] to [TableId] mapping.
|
||||||
pub type TableNameCache = CacheContainer<TableName, TableId, CacheIdent>;
|
pub type TableNameCache = CacheContainer<TableName, TableId, CacheIdent>;
|
||||||
|
|||||||
143
src/common/meta/src/cache/table/view_info.rs
vendored
Normal file
143
src/common/meta/src/cache/table/view_info.rs
vendored
Normal file
@@ -0,0 +1,143 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use futures::future::BoxFuture;
|
||||||
|
use moka::future::Cache;
|
||||||
|
use snafu::OptionExt;
|
||||||
|
use store_api::storage::TableId;
|
||||||
|
|
||||||
|
use crate::cache::{CacheContainer, Initializer};
|
||||||
|
use crate::error;
|
||||||
|
use crate::error::Result;
|
||||||
|
use crate::instruction::CacheIdent;
|
||||||
|
use crate::key::view_info::{ViewInfoManager, ViewInfoManagerRef, ViewInfoValue};
|
||||||
|
use crate::kv_backend::KvBackendRef;
|
||||||
|
|
||||||
|
/// [ViewInfoCache] caches the [TableId] to [ViewInfoValue] mapping.
|
||||||
|
pub type ViewInfoCache = CacheContainer<TableId, Arc<ViewInfoValue>, CacheIdent>;
|
||||||
|
|
||||||
|
pub type ViewInfoCacheRef = Arc<ViewInfoCache>;
|
||||||
|
|
||||||
|
/// Constructs a [ViewInfoCache].
|
||||||
|
pub fn new_view_info_cache(
|
||||||
|
name: String,
|
||||||
|
cache: Cache<TableId, Arc<ViewInfoValue>>,
|
||||||
|
kv_backend: KvBackendRef,
|
||||||
|
) -> ViewInfoCache {
|
||||||
|
let view_info_manager = Arc::new(ViewInfoManager::new(kv_backend));
|
||||||
|
let init = init_factory(view_info_manager);
|
||||||
|
|
||||||
|
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn init_factory(view_info_manager: ViewInfoManagerRef) -> Initializer<TableId, Arc<ViewInfoValue>> {
|
||||||
|
Arc::new(move |view_id| {
|
||||||
|
let view_info_manager = view_info_manager.clone();
|
||||||
|
Box::pin(async move {
|
||||||
|
let view_info = view_info_manager
|
||||||
|
.get(*view_id)
|
||||||
|
.await?
|
||||||
|
.context(error::ValueNotExistSnafu {})?
|
||||||
|
.into_inner();
|
||||||
|
|
||||||
|
Ok(Some(Arc::new(view_info)))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn invalidator<'a>(
|
||||||
|
cache: &'a Cache<TableId, Arc<ViewInfoValue>>,
|
||||||
|
ident: &'a CacheIdent,
|
||||||
|
) -> BoxFuture<'a, Result<()>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
if let CacheIdent::TableId(table_id) = ident {
|
||||||
|
cache.invalidate(table_id).await
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn filter(ident: &CacheIdent) -> bool {
|
||||||
|
matches!(ident, CacheIdent::TableId(_))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::collections::HashSet;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use moka::future::CacheBuilder;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
use crate::ddl::tests::create_view::test_create_view_task;
|
||||||
|
use crate::key::TableMetadataManager;
|
||||||
|
use crate::kv_backend::memory::MemoryKvBackend;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_view_info_cache() {
|
||||||
|
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||||
|
let table_metadata_manager = TableMetadataManager::new(mem_kv.clone());
|
||||||
|
let cache = CacheBuilder::new(128).build();
|
||||||
|
let cache = new_view_info_cache("test".to_string(), cache, mem_kv.clone());
|
||||||
|
|
||||||
|
let result = cache.get(1024).await.unwrap();
|
||||||
|
assert!(result.is_none());
|
||||||
|
let mut task = test_create_view_task("my_view");
|
||||||
|
let table_names = {
|
||||||
|
let mut set = HashSet::new();
|
||||||
|
set.insert(TableName {
|
||||||
|
catalog_name: "greptime".to_string(),
|
||||||
|
schema_name: "public".to_string(),
|
||||||
|
table_name: "a_table".to_string(),
|
||||||
|
});
|
||||||
|
set.insert(TableName {
|
||||||
|
catalog_name: "greptime".to_string(),
|
||||||
|
schema_name: "public".to_string(),
|
||||||
|
table_name: "b_table".to_string(),
|
||||||
|
});
|
||||||
|
set
|
||||||
|
};
|
||||||
|
|
||||||
|
task.view_info.ident.table_id = 1024;
|
||||||
|
table_metadata_manager
|
||||||
|
.create_view_metadata(
|
||||||
|
task.view_info.clone(),
|
||||||
|
task.create_view.logical_plan.clone(),
|
||||||
|
table_names,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let view_info = cache.get(1024).await.unwrap().unwrap();
|
||||||
|
assert_eq!(view_info.view_info, task.create_view.logical_plan);
|
||||||
|
assert_eq!(
|
||||||
|
view_info.table_names,
|
||||||
|
task.create_view
|
||||||
|
.table_names
|
||||||
|
.iter()
|
||||||
|
.map(|t| t.clone().into())
|
||||||
|
.collect::<HashSet<_>>()
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(cache.contains_key(&1024));
|
||||||
|
cache
|
||||||
|
.invalidate(&[CacheIdent::TableId(1024)])
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert!(!cache.contains_key(&1024));
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -48,7 +48,7 @@ pub mod table_meta;
|
|||||||
#[cfg(any(test, feature = "testing"))]
|
#[cfg(any(test, feature = "testing"))]
|
||||||
pub mod test_util;
|
pub mod test_util;
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests;
|
pub(crate) mod tests;
|
||||||
pub mod truncate_table;
|
pub mod truncate_table;
|
||||||
pub mod utils;
|
pub mod utils;
|
||||||
|
|
||||||
|
|||||||
@@ -13,10 +13,10 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use table::metadata::RawTableInfo;
|
use table::metadata::RawTableInfo;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
|
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
|
||||||
use crate::instruction::CacheIdent;
|
use crate::instruction::CacheIdent;
|
||||||
use crate::table_name::TableName;
|
|
||||||
|
|
||||||
impl AlterLogicalTablesProcedure {
|
impl AlterLogicalTablesProcedure {
|
||||||
pub(crate) fn build_table_cache_keys_to_invalidate(&self) -> Vec<CacheIdent> {
|
pub(crate) fn build_table_cache_keys_to_invalidate(&self) -> Vec<CacheIdent> {
|
||||||
|
|||||||
@@ -18,13 +18,13 @@ use common_telemetry::{info, warn};
|
|||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
use table::metadata::TableId;
|
use table::metadata::TableId;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::cache_invalidator::Context;
|
use crate::cache_invalidator::Context;
|
||||||
use crate::ddl::create_logical_tables::CreateLogicalTablesProcedure;
|
use crate::ddl::create_logical_tables::CreateLogicalTablesProcedure;
|
||||||
use crate::ddl::physical_table_metadata;
|
use crate::ddl::physical_table_metadata;
|
||||||
use crate::error::{Result, TableInfoNotFoundSnafu};
|
use crate::error::{Result, TableInfoNotFoundSnafu};
|
||||||
use crate::instruction::CacheIdent;
|
use crate::instruction::CacheIdent;
|
||||||
use crate::table_name::TableName;
|
|
||||||
|
|
||||||
impl CreateLogicalTablesProcedure {
|
impl CreateLogicalTablesProcedure {
|
||||||
pub(crate) async fn update_physical_table_metadata(&mut self) -> Result<()> {
|
pub(crate) async fn update_physical_table_metadata(&mut self) -> Result<()> {
|
||||||
|
|||||||
@@ -22,9 +22,11 @@ use strum::AsRefStr;
|
|||||||
use table::metadata::{RawTableInfo, TableId, TableType};
|
use table::metadata::{RawTableInfo, TableId, TableType};
|
||||||
use table::table_reference::TableReference;
|
use table::table_reference::TableReference;
|
||||||
|
|
||||||
|
use crate::cache_invalidator::Context;
|
||||||
use crate::ddl::utils::handle_retry_error;
|
use crate::ddl::utils::handle_retry_error;
|
||||||
use crate::ddl::{DdlContext, TableMetadata, TableMetadataAllocatorContext};
|
use crate::ddl::{DdlContext, TableMetadata, TableMetadataAllocatorContext};
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
|
use crate::instruction::CacheIdent;
|
||||||
use crate::key::table_name::TableNameKey;
|
use crate::key::table_name::TableNameKey;
|
||||||
use crate::lock_key::{CatalogLock, SchemaLock, TableNameLock};
|
use crate::lock_key::{CatalogLock, SchemaLock, TableNameLock};
|
||||||
use crate::rpc::ddl::CreateViewTask;
|
use crate::rpc::ddl::CreateViewTask;
|
||||||
@@ -157,6 +159,25 @@ impl CreateViewProcedure {
|
|||||||
Ok(Status::executing(true))
|
Ok(Status::executing(true))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn invalidate_view_cache(&self) -> Result<()> {
|
||||||
|
let cache_invalidator = &self.context.cache_invalidator;
|
||||||
|
let ctx = Context {
|
||||||
|
subject: Some("Invalidate view cache by creating view".to_string()),
|
||||||
|
};
|
||||||
|
|
||||||
|
cache_invalidator
|
||||||
|
.invalidate(
|
||||||
|
&ctx,
|
||||||
|
&[
|
||||||
|
CacheIdent::TableName(self.data.table_ref().into()),
|
||||||
|
CacheIdent::TableId(self.view_id()),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Creates view metadata
|
/// Creates view metadata
|
||||||
///
|
///
|
||||||
/// Abort(not-retry):
|
/// Abort(not-retry):
|
||||||
@@ -175,15 +196,21 @@ impl CreateViewProcedure {
|
|||||||
view_name: self.data.table_ref().to_string(),
|
view_name: self.data.table_ref().to_string(),
|
||||||
})?;
|
})?;
|
||||||
let new_logical_plan = self.data.task.raw_logical_plan().clone();
|
let new_logical_plan = self.data.task.raw_logical_plan().clone();
|
||||||
|
let table_names = self.data.task.table_names();
|
||||||
|
|
||||||
manager
|
manager
|
||||||
.update_view_info(view_id, ¤t_view_info, new_logical_plan)
|
.update_view_info(view_id, ¤t_view_info, new_logical_plan, table_names)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
info!("Updated view metadata for view {view_id}");
|
info!("Updated view metadata for view {view_id}");
|
||||||
} else {
|
} else {
|
||||||
let raw_view_info = self.view_info().clone();
|
let raw_view_info = self.view_info().clone();
|
||||||
manager
|
manager
|
||||||
.create_view_metadata(raw_view_info, self.data.task.raw_logical_plan())
|
.create_view_metadata(
|
||||||
|
raw_view_info,
|
||||||
|
self.data.task.raw_logical_plan().clone(),
|
||||||
|
self.data.task.table_names(),
|
||||||
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
@@ -191,6 +218,7 @@ impl CreateViewProcedure {
|
|||||||
ctx.procedure_id
|
ctx.procedure_id
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
self.invalidate_view_cache().await?;
|
||||||
|
|
||||||
Ok(Status::done_with_output(view_id))
|
Ok(Status::done_with_output(view_id))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,19 +14,23 @@
|
|||||||
|
|
||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
|
|
||||||
|
use common_catalog::format_full_table_name;
|
||||||
use common_procedure::Status;
|
use common_procedure::Status;
|
||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use table::metadata::TableId;
|
use snafu::OptionExt;
|
||||||
|
use table::metadata::{TableId, TableType};
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use super::executor::DropDatabaseExecutor;
|
use super::executor::DropDatabaseExecutor;
|
||||||
use super::metadata::DropDatabaseRemoveMetadata;
|
use super::metadata::DropDatabaseRemoveMetadata;
|
||||||
use super::DropTableTarget;
|
use super::DropTableTarget;
|
||||||
|
use crate::cache_invalidator::Context;
|
||||||
use crate::ddl::drop_database::{DropDatabaseContext, State};
|
use crate::ddl::drop_database::{DropDatabaseContext, State};
|
||||||
use crate::ddl::DdlContext;
|
use crate::ddl::DdlContext;
|
||||||
use crate::error::Result;
|
use crate::error::{Result, TableInfoNotFoundSnafu};
|
||||||
|
use crate::instruction::CacheIdent;
|
||||||
use crate::key::table_route::TableRouteValue;
|
use crate::key::table_route::TableRouteValue;
|
||||||
use crate::table_name::TableName;
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
pub(crate) struct DropDatabaseCursor {
|
pub(crate) struct DropDatabaseCursor {
|
||||||
@@ -101,6 +105,40 @@ impl DropDatabaseCursor {
|
|||||||
)),
|
)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn handle_view(
|
||||||
|
&self,
|
||||||
|
ddl_ctx: &DdlContext,
|
||||||
|
ctx: &mut DropDatabaseContext,
|
||||||
|
table_name: String,
|
||||||
|
table_id: TableId,
|
||||||
|
) -> Result<(Box<dyn State>, Status)> {
|
||||||
|
let view_name = TableName::new(&ctx.catalog, &ctx.schema, &table_name);
|
||||||
|
ddl_ctx
|
||||||
|
.table_metadata_manager
|
||||||
|
.destroy_view_info(table_id, &view_name)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let cache_invalidator = &ddl_ctx.cache_invalidator;
|
||||||
|
let ctx = Context {
|
||||||
|
subject: Some("Invalidate table cache by dropping table".to_string()),
|
||||||
|
};
|
||||||
|
|
||||||
|
cache_invalidator
|
||||||
|
.invalidate(
|
||||||
|
&ctx,
|
||||||
|
&[
|
||||||
|
CacheIdent::TableName(view_name),
|
||||||
|
CacheIdent::TableId(table_id),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok((
|
||||||
|
Box::new(DropDatabaseCursor::new(self.target)),
|
||||||
|
Status::executing(false),
|
||||||
|
))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
@@ -122,6 +160,20 @@ impl State for DropDatabaseCursor {
|
|||||||
match ctx.tables.as_mut().unwrap().try_next().await? {
|
match ctx.tables.as_mut().unwrap().try_next().await? {
|
||||||
Some((table_name, table_name_value)) => {
|
Some((table_name, table_name_value)) => {
|
||||||
let table_id = table_name_value.table_id();
|
let table_id = table_name_value.table_id();
|
||||||
|
|
||||||
|
let table_info_value = ddl_ctx
|
||||||
|
.table_metadata_manager
|
||||||
|
.table_info_manager()
|
||||||
|
.get(table_id)
|
||||||
|
.await?
|
||||||
|
.with_context(|| TableInfoNotFoundSnafu {
|
||||||
|
table: format_full_table_name(&ctx.catalog, &ctx.schema, &table_name),
|
||||||
|
})?;
|
||||||
|
|
||||||
|
if table_info_value.table_info.table_type == TableType::View {
|
||||||
|
return self.handle_view(ddl_ctx, ctx, table_name, table_id).await;
|
||||||
|
}
|
||||||
|
|
||||||
match ddl_ctx
|
match ddl_ctx
|
||||||
.table_metadata_manager
|
.table_metadata_manager
|
||||||
.table_route_manager()
|
.table_route_manager()
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ use common_telemetry::info;
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
use table::metadata::TableId;
|
use table::metadata::TableId;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use super::cursor::DropDatabaseCursor;
|
use super::cursor::DropDatabaseCursor;
|
||||||
use super::{DropDatabaseContext, DropTableTarget};
|
use super::{DropDatabaseContext, DropTableTarget};
|
||||||
@@ -29,7 +30,6 @@ use crate::error::{self, Result};
|
|||||||
use crate::key::table_route::TableRouteValue;
|
use crate::key::table_route::TableRouteValue;
|
||||||
use crate::region_keeper::OperatingRegionGuard;
|
use crate::region_keeper::OperatingRegionGuard;
|
||||||
use crate::rpc::router::{operating_leader_regions, RegionRoute};
|
use crate::rpc::router::{operating_leader_regions, RegionRoute};
|
||||||
use crate::table_name::TableName;
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
pub(crate) struct DropDatabaseExecutor {
|
pub(crate) struct DropDatabaseExecutor {
|
||||||
@@ -135,6 +135,7 @@ mod tests {
|
|||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_recordbatch::SendableRecordBatchStream;
|
use common_recordbatch::SendableRecordBatchStream;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::ddl::drop_database::cursor::DropDatabaseCursor;
|
use crate::ddl::drop_database::cursor::DropDatabaseCursor;
|
||||||
use crate::ddl::drop_database::executor::DropDatabaseExecutor;
|
use crate::ddl::drop_database::executor::DropDatabaseExecutor;
|
||||||
@@ -144,7 +145,6 @@ mod tests {
|
|||||||
use crate::key::datanode_table::DatanodeTableKey;
|
use crate::key::datanode_table::DatanodeTableKey;
|
||||||
use crate::peer::Peer;
|
use crate::peer::Peer;
|
||||||
use crate::rpc::router::region_distribution;
|
use crate::rpc::router::region_distribution;
|
||||||
use crate::table_name::TableName;
|
|
||||||
use crate::test_util::{new_ddl_context, MockDatanodeHandler, MockDatanodeManager};
|
use crate::test_util::{new_ddl_context, MockDatanodeHandler, MockDatanodeManager};
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ use futures::future::join_all;
|
|||||||
use snafu::ensure;
|
use snafu::ensure;
|
||||||
use store_api::storage::RegionId;
|
use store_api::storage::RegionId;
|
||||||
use table::metadata::TableId;
|
use table::metadata::TableId;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::cache_invalidator::Context;
|
use crate::cache_invalidator::Context;
|
||||||
use crate::ddl::utils::add_peer_context_if_needed;
|
use crate::ddl::utils::add_peer_context_if_needed;
|
||||||
@@ -32,7 +33,6 @@ use crate::instruction::CacheIdent;
|
|||||||
use crate::key::table_name::TableNameKey;
|
use crate::key::table_name::TableNameKey;
|
||||||
use crate::key::table_route::TableRouteValue;
|
use crate::key::table_route::TableRouteValue;
|
||||||
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
|
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
|
||||||
use crate::table_name::TableName;
|
|
||||||
|
|
||||||
/// [Control] indicated to the caller whether to go to the next step.
|
/// [Control] indicated to the caller whether to go to the next step.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@@ -224,6 +224,7 @@ mod tests {
|
|||||||
use api::v1::{ColumnDataType, SemanticType};
|
use api::v1::{ColumnDataType, SemanticType};
|
||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use table::metadata::RawTableInfo;
|
use table::metadata::RawTableInfo;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::ddl::test_util::columns::TestColumnDefBuilder;
|
use crate::ddl::test_util::columns::TestColumnDefBuilder;
|
||||||
@@ -231,7 +232,6 @@ mod tests {
|
|||||||
build_raw_table_info_from_expr, TestCreateTableExprBuilder,
|
build_raw_table_info_from_expr, TestCreateTableExprBuilder,
|
||||||
};
|
};
|
||||||
use crate::key::table_route::TableRouteValue;
|
use crate::key::table_route::TableRouteValue;
|
||||||
use crate::table_name::TableName;
|
|
||||||
use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
||||||
|
|
||||||
fn test_create_raw_table_info(name: &str) -> RawTableInfo {
|
fn test_create_raw_table_info(name: &str) -> RawTableInfo {
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ mod alter_table;
|
|||||||
mod create_flow;
|
mod create_flow;
|
||||||
mod create_logical_tables;
|
mod create_logical_tables;
|
||||||
mod create_table;
|
mod create_table;
|
||||||
mod create_view;
|
pub(crate) mod create_view;
|
||||||
mod drop_database;
|
mod drop_database;
|
||||||
mod drop_flow;
|
mod drop_flow;
|
||||||
mod drop_table;
|
mod drop_table;
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ use std::sync::Arc;
|
|||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use common_procedure_test::execute_procedure_until_done;
|
use common_procedure_test::execute_procedure_until_done;
|
||||||
use session::context::QueryContext;
|
use session::context::QueryContext;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::ddl::create_flow::CreateFlowProcedure;
|
use crate::ddl::create_flow::CreateFlowProcedure;
|
||||||
use crate::ddl::test_util::create_table::test_create_table_task;
|
use crate::ddl::test_util::create_table::test_create_table_task;
|
||||||
@@ -27,7 +28,6 @@ use crate::ddl::DdlContext;
|
|||||||
use crate::key::table_route::TableRouteValue;
|
use crate::key::table_route::TableRouteValue;
|
||||||
use crate::key::FlowId;
|
use crate::key::FlowId;
|
||||||
use crate::rpc::ddl::CreateFlowTask;
|
use crate::rpc::ddl::CreateFlowTask;
|
||||||
use crate::table_name::TableName;
|
|
||||||
use crate::test_util::{new_ddl_context, MockFlownodeManager};
|
use crate::test_util::{new_ddl_context, MockFlownodeManager};
|
||||||
use crate::{error, ClusterId};
|
use crate::{error, ClusterId};
|
||||||
|
|
||||||
|
|||||||
@@ -13,9 +13,10 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::assert_matches::assert_matches;
|
use std::assert_matches::assert_matches;
|
||||||
|
use std::collections::HashSet;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use api::v1::CreateViewExpr;
|
use api::v1::{CreateViewExpr, TableName};
|
||||||
use common_error::ext::ErrorExt;
|
use common_error::ext::ErrorExt;
|
||||||
use common_error::status_code::StatusCode;
|
use common_error::status_code::StatusCode;
|
||||||
use common_procedure::{Context as ProcedureContext, Procedure, ProcedureId, Status};
|
use common_procedure::{Context as ProcedureContext, Procedure, ProcedureId, Status};
|
||||||
@@ -31,7 +32,35 @@ use crate::error::Error;
|
|||||||
use crate::rpc::ddl::CreateViewTask;
|
use crate::rpc::ddl::CreateViewTask;
|
||||||
use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
||||||
|
|
||||||
fn test_create_view_task(name: &str) -> CreateViewTask {
|
fn test_table_names() -> HashSet<table::table_name::TableName> {
|
||||||
|
let mut set = HashSet::new();
|
||||||
|
set.insert(table::table_name::TableName {
|
||||||
|
catalog_name: "greptime".to_string(),
|
||||||
|
schema_name: "public".to_string(),
|
||||||
|
table_name: "a_table".to_string(),
|
||||||
|
});
|
||||||
|
set.insert(table::table_name::TableName {
|
||||||
|
catalog_name: "greptime".to_string(),
|
||||||
|
schema_name: "public".to_string(),
|
||||||
|
table_name: "b_table".to_string(),
|
||||||
|
});
|
||||||
|
set
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn test_create_view_task(name: &str) -> CreateViewTask {
|
||||||
|
let table_names = vec![
|
||||||
|
TableName {
|
||||||
|
catalog_name: "greptime".to_string(),
|
||||||
|
schema_name: "public".to_string(),
|
||||||
|
table_name: "a_table".to_string(),
|
||||||
|
},
|
||||||
|
TableName {
|
||||||
|
catalog_name: "greptime".to_string(),
|
||||||
|
schema_name: "public".to_string(),
|
||||||
|
table_name: "b_table".to_string(),
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
let expr = CreateViewExpr {
|
let expr = CreateViewExpr {
|
||||||
catalog_name: "greptime".to_string(),
|
catalog_name: "greptime".to_string(),
|
||||||
schema_name: "public".to_string(),
|
schema_name: "public".to_string(),
|
||||||
@@ -39,6 +68,7 @@ fn test_create_view_task(name: &str) -> CreateViewTask {
|
|||||||
or_replace: false,
|
or_replace: false,
|
||||||
create_if_not_exists: false,
|
create_if_not_exists: false,
|
||||||
logical_plan: vec![1, 2, 3],
|
logical_plan: vec![1, 2, 3],
|
||||||
|
table_names,
|
||||||
};
|
};
|
||||||
|
|
||||||
let view_info = RawTableInfo {
|
let view_info = RawTableInfo {
|
||||||
@@ -70,7 +100,11 @@ async fn test_on_prepare_view_exists_err() {
|
|||||||
// Puts a value to table name key.
|
// Puts a value to table name key.
|
||||||
ddl_context
|
ddl_context
|
||||||
.table_metadata_manager
|
.table_metadata_manager
|
||||||
.create_view_metadata(task.view_info.clone(), &task.create_view.logical_plan)
|
.create_view_metadata(
|
||||||
|
task.view_info.clone(),
|
||||||
|
task.create_view.logical_plan.clone(),
|
||||||
|
test_table_names(),
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);
|
let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);
|
||||||
@@ -90,7 +124,11 @@ async fn test_on_prepare_with_create_if_view_exists() {
|
|||||||
// Puts a value to table name key.
|
// Puts a value to table name key.
|
||||||
ddl_context
|
ddl_context
|
||||||
.table_metadata_manager
|
.table_metadata_manager
|
||||||
.create_view_metadata(task.view_info.clone(), &task.create_view.logical_plan)
|
.create_view_metadata(
|
||||||
|
task.view_info.clone(),
|
||||||
|
task.create_view.logical_plan.clone(),
|
||||||
|
test_table_names(),
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);
|
let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use common_procedure_test::execute_procedure_until_done;
|
use common_procedure_test::execute_procedure_until_done;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::ddl::drop_flow::DropFlowProcedure;
|
use crate::ddl::drop_flow::DropFlowProcedure;
|
||||||
use crate::ddl::test_util::create_table::test_create_table_task;
|
use crate::ddl::test_util::create_table::test_create_table_task;
|
||||||
@@ -26,7 +27,6 @@ use crate::ddl::tests::create_flow::create_test_flow;
|
|||||||
use crate::error;
|
use crate::error;
|
||||||
use crate::key::table_route::TableRouteValue;
|
use crate::key::table_route::TableRouteValue;
|
||||||
use crate::rpc::ddl::DropFlowTask;
|
use crate::rpc::ddl::DropFlowTask;
|
||||||
use crate::table_name::TableName;
|
|
||||||
use crate::test_util::{new_ddl_context, MockFlownodeManager};
|
use crate::test_util::{new_ddl_context, MockFlownodeManager};
|
||||||
|
|
||||||
fn test_drop_flow_task(flow_name: &str, flow_id: u32, drop_if_exists: bool) -> DropFlowTask {
|
fn test_drop_flow_task(flow_name: &str, flow_id: u32, drop_if_exists: bool) -> DropFlowTask {
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ use snafu::{ensure, ResultExt};
|
|||||||
use store_api::storage::RegionId;
|
use store_api::storage::RegionId;
|
||||||
use strum::AsRefStr;
|
use strum::AsRefStr;
|
||||||
use table::metadata::{RawTableInfo, TableId};
|
use table::metadata::{RawTableInfo, TableId};
|
||||||
|
use table::table_name::TableName;
|
||||||
use table::table_reference::TableReference;
|
use table::table_reference::TableReference;
|
||||||
|
|
||||||
use super::utils::handle_retry_error;
|
use super::utils::handle_retry_error;
|
||||||
@@ -40,7 +41,6 @@ use crate::key::DeserializedValueWithBytes;
|
|||||||
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
|
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
|
||||||
use crate::rpc::ddl::TruncateTableTask;
|
use crate::rpc::ddl::TruncateTableTask;
|
||||||
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
|
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
|
||||||
use crate::table_name::TableName;
|
|
||||||
use crate::{metrics, ClusterId};
|
use crate::{metrics, ClusterId};
|
||||||
|
|
||||||
pub struct TruncateTableProcedure {
|
pub struct TruncateTableProcedure {
|
||||||
|
|||||||
@@ -489,8 +489,7 @@ async fn handle_create_table_task(
|
|||||||
|
|
||||||
Ok(SubmitDdlTaskResponse {
|
Ok(SubmitDdlTaskResponse {
|
||||||
key: procedure_id.into(),
|
key: procedure_id.into(),
|
||||||
table_id: Some(table_id),
|
table_ids: vec![table_id],
|
||||||
..Default::default()
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -534,7 +533,6 @@ async fn handle_create_logical_table_tasks(
|
|||||||
Ok(SubmitDdlTaskResponse {
|
Ok(SubmitDdlTaskResponse {
|
||||||
key: procedure_id.into(),
|
key: procedure_id.into(),
|
||||||
table_ids,
|
table_ids,
|
||||||
..Default::default()
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -690,8 +688,7 @@ async fn handle_create_view_task(
|
|||||||
|
|
||||||
Ok(SubmitDdlTaskResponse {
|
Ok(SubmitDdlTaskResponse {
|
||||||
key: procedure_id.into(),
|
key: procedure_id.into(),
|
||||||
table_id: Some(view_id),
|
table_ids: vec![view_id],
|
||||||
..Default::default()
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -20,11 +20,11 @@ use serde::{Deserialize, Serialize};
|
|||||||
use store_api::storage::{RegionId, RegionNumber};
|
use store_api::storage::{RegionId, RegionNumber};
|
||||||
use strum::Display;
|
use strum::Display;
|
||||||
use table::metadata::TableId;
|
use table::metadata::TableId;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::flow_name::FlowName;
|
use crate::flow_name::FlowName;
|
||||||
use crate::key::schema_name::SchemaName;
|
use crate::key::schema_name::SchemaName;
|
||||||
use crate::key::FlowId;
|
use crate::key::FlowId;
|
||||||
use crate::table_name::TableName;
|
|
||||||
use crate::{ClusterId, DatanodeId, FlownodeId};
|
use crate::{ClusterId, DatanodeId, FlownodeId};
|
||||||
|
|
||||||
#[derive(Eq, Hash, PartialEq, Clone, Debug, Serialize, Deserialize)]
|
#[derive(Eq, Hash, PartialEq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
|||||||
@@ -89,9 +89,6 @@ pub mod flow;
|
|||||||
pub mod schema_name;
|
pub mod schema_name;
|
||||||
pub mod table_info;
|
pub mod table_info;
|
||||||
pub mod table_name;
|
pub mod table_name;
|
||||||
// TODO(weny): removes it.
|
|
||||||
#[allow(deprecated)]
|
|
||||||
pub mod table_region;
|
|
||||||
pub mod view_info;
|
pub mod view_info;
|
||||||
// TODO(weny): removes it.
|
// TODO(weny): removes it.
|
||||||
#[allow(deprecated)]
|
#[allow(deprecated)]
|
||||||
@@ -119,6 +116,7 @@ use serde::{Deserialize, Serialize};
|
|||||||
use snafu::{ensure, OptionExt, ResultExt};
|
use snafu::{ensure, OptionExt, ResultExt};
|
||||||
use store_api::storage::RegionNumber;
|
use store_api::storage::RegionNumber;
|
||||||
use table::metadata::{RawTableInfo, TableId};
|
use table::metadata::{RawTableInfo, TableId};
|
||||||
|
use table::table_name::TableName;
|
||||||
use table_info::{TableInfoKey, TableInfoManager, TableInfoValue};
|
use table_info::{TableInfoKey, TableInfoManager, TableInfoValue};
|
||||||
use table_name::{TableNameKey, TableNameManager, TableNameValue};
|
use table_name::{TableNameKey, TableNameManager, TableNameValue};
|
||||||
use view_info::{ViewInfoKey, ViewInfoManager, ViewInfoValue};
|
use view_info::{ViewInfoKey, ViewInfoManager, ViewInfoValue};
|
||||||
@@ -138,14 +136,12 @@ use crate::kv_backend::txn::{Txn, TxnOp};
|
|||||||
use crate::kv_backend::KvBackendRef;
|
use crate::kv_backend::KvBackendRef;
|
||||||
use crate::rpc::router::{region_distribution, RegionRoute, RegionStatus};
|
use crate::rpc::router::{region_distribution, RegionRoute, RegionStatus};
|
||||||
use crate::rpc::store::BatchDeleteRequest;
|
use crate::rpc::store::BatchDeleteRequest;
|
||||||
use crate::table_name::TableName;
|
|
||||||
use crate::DatanodeId;
|
use crate::DatanodeId;
|
||||||
|
|
||||||
pub const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.]*";
|
pub const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.]*";
|
||||||
pub const MAINTENANCE_KEY: &str = "maintenance";
|
pub const MAINTENANCE_KEY: &str = "maintenance";
|
||||||
|
|
||||||
const DATANODE_TABLE_KEY_PREFIX: &str = "__dn_table";
|
const DATANODE_TABLE_KEY_PREFIX: &str = "__dn_table";
|
||||||
const TABLE_REGION_KEY_PREFIX: &str = "__table_region";
|
|
||||||
pub const TABLE_INFO_KEY_PREFIX: &str = "__table_info";
|
pub const TABLE_INFO_KEY_PREFIX: &str = "__table_info";
|
||||||
pub const VIEW_INFO_KEY_PREFIX: &str = "__view_info";
|
pub const VIEW_INFO_KEY_PREFIX: &str = "__view_info";
|
||||||
pub const TABLE_NAME_KEY_PREFIX: &str = "__table_name";
|
pub const TABLE_NAME_KEY_PREFIX: &str = "__table_name";
|
||||||
@@ -490,7 +486,8 @@ impl TableMetadataManager {
|
|||||||
pub async fn create_view_metadata(
|
pub async fn create_view_metadata(
|
||||||
&self,
|
&self,
|
||||||
view_info: RawTableInfo,
|
view_info: RawTableInfo,
|
||||||
raw_logical_plan: &Vec<u8>,
|
raw_logical_plan: Vec<u8>,
|
||||||
|
table_names: HashSet<TableName>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let view_id = view_info.ident.table_id;
|
let view_id = view_info.ident.table_id;
|
||||||
|
|
||||||
@@ -512,7 +509,7 @@ impl TableMetadataManager {
|
|||||||
.build_create_txn(view_id, &table_info_value)?;
|
.build_create_txn(view_id, &table_info_value)?;
|
||||||
|
|
||||||
// Creates view info
|
// Creates view info
|
||||||
let view_info_value = ViewInfoValue::new(raw_logical_plan);
|
let view_info_value = ViewInfoValue::new(raw_logical_plan, table_names);
|
||||||
let (create_view_info_txn, on_create_view_info_failure) = self
|
let (create_view_info_txn, on_create_view_info_failure) = self
|
||||||
.view_info_manager()
|
.view_info_manager()
|
||||||
.build_create_txn(view_id, &view_info_value)?;
|
.build_create_txn(view_id, &view_info_value)?;
|
||||||
@@ -804,6 +801,33 @@ impl TableMetadataManager {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn view_info_keys(&self, view_id: TableId, view_name: &TableName) -> Result<Vec<Vec<u8>>> {
|
||||||
|
let mut keys = Vec::with_capacity(3);
|
||||||
|
let view_name = TableNameKey::new(
|
||||||
|
&view_name.catalog_name,
|
||||||
|
&view_name.schema_name,
|
||||||
|
&view_name.table_name,
|
||||||
|
);
|
||||||
|
let table_info_key = TableInfoKey::new(view_id);
|
||||||
|
let view_info_key = ViewInfoKey::new(view_id);
|
||||||
|
keys.push(view_name.to_bytes());
|
||||||
|
keys.push(table_info_key.to_bytes());
|
||||||
|
keys.push(view_info_key.to_bytes());
|
||||||
|
|
||||||
|
Ok(keys)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Deletes metadata for view **permanently**.
|
||||||
|
/// The caller MUST ensure it has the exclusive access to `ViewNameKey`.
|
||||||
|
pub async fn destroy_view_info(&self, view_id: TableId, view_name: &TableName) -> Result<()> {
|
||||||
|
let keys = self.view_info_keys(view_id, view_name)?;
|
||||||
|
let _ = self
|
||||||
|
.kv_backend
|
||||||
|
.batch_delete(BatchDeleteRequest::new().with_keys(keys))
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Renames the table name and returns an error if different metadata exists.
|
/// Renames the table name and returns an error if different metadata exists.
|
||||||
/// The caller MUST ensure it has the exclusive access to old and new `TableNameKey`s,
|
/// The caller MUST ensure it has the exclusive access to old and new `TableNameKey`s,
|
||||||
/// and the new `TableNameKey` MUST be empty.
|
/// and the new `TableNameKey` MUST be empty.
|
||||||
@@ -903,8 +927,9 @@ impl TableMetadataManager {
|
|||||||
view_id: TableId,
|
view_id: TableId,
|
||||||
current_view_info_value: &DeserializedValueWithBytes<ViewInfoValue>,
|
current_view_info_value: &DeserializedValueWithBytes<ViewInfoValue>,
|
||||||
new_view_info: Vec<u8>,
|
new_view_info: Vec<u8>,
|
||||||
|
table_names: HashSet<TableName>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let new_view_info_value = current_view_info_value.update(new_view_info);
|
let new_view_info_value = current_view_info_value.update(new_view_info, table_names);
|
||||||
|
|
||||||
// Updates view info.
|
// Updates view info.
|
||||||
let (update_view_info_txn, on_update_view_info_failure) = self
|
let (update_view_info_txn, on_update_view_info_failure) = self
|
||||||
@@ -1174,7 +1199,7 @@ impl_optional_meta_value! {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::collections::{BTreeMap, HashMap};
|
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
@@ -1183,6 +1208,7 @@ mod tests {
|
|||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
use store_api::storage::RegionId;
|
use store_api::storage::RegionId;
|
||||||
use table::metadata::{RawTableInfo, TableInfo};
|
use table::metadata::{RawTableInfo, TableInfo};
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use super::datanode_table::DatanodeTableKey;
|
use super::datanode_table::DatanodeTableKey;
|
||||||
use super::test_utils;
|
use super::test_utils;
|
||||||
@@ -1197,7 +1223,6 @@ mod tests {
|
|||||||
use crate::kv_backend::memory::MemoryKvBackend;
|
use crate::kv_backend::memory::MemoryKvBackend;
|
||||||
use crate::peer::Peer;
|
use crate::peer::Peer;
|
||||||
use crate::rpc::router::{region_distribution, Region, RegionRoute, RegionStatus};
|
use crate::rpc::router::{region_distribution, Region, RegionRoute, RegionStatus};
|
||||||
use crate::table_name::TableName;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_deserialized_value_with_bytes() {
|
fn test_deserialized_value_with_bytes() {
|
||||||
@@ -1250,6 +1275,21 @@ mod tests {
|
|||||||
test_utils::new_test_table_info(10, region_numbers)
|
test_utils::new_test_table_info(10, region_numbers)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn new_test_table_names() -> HashSet<TableName> {
|
||||||
|
let mut set = HashSet::new();
|
||||||
|
set.insert(TableName {
|
||||||
|
catalog_name: "greptime".to_string(),
|
||||||
|
schema_name: "public".to_string(),
|
||||||
|
table_name: "a_table".to_string(),
|
||||||
|
});
|
||||||
|
set.insert(TableName {
|
||||||
|
catalog_name: "greptime".to_string(),
|
||||||
|
schema_name: "public".to_string(),
|
||||||
|
table_name: "b_table".to_string(),
|
||||||
|
});
|
||||||
|
set
|
||||||
|
}
|
||||||
|
|
||||||
async fn create_physical_table_metadata(
|
async fn create_physical_table_metadata(
|
||||||
table_metadata_manager: &TableMetadataManager,
|
table_metadata_manager: &TableMetadataManager,
|
||||||
table_info: RawTableInfo,
|
table_info: RawTableInfo,
|
||||||
@@ -1961,9 +2001,11 @@ mod tests {
|
|||||||
|
|
||||||
let logical_plan: Vec<u8> = vec![1, 2, 3];
|
let logical_plan: Vec<u8> = vec![1, 2, 3];
|
||||||
|
|
||||||
|
let table_names = new_test_table_names();
|
||||||
|
|
||||||
// Create metadata
|
// Create metadata
|
||||||
table_metadata_manager
|
table_metadata_manager
|
||||||
.create_view_metadata(view_info.clone(), &logical_plan)
|
.create_view_metadata(view_info.clone(), logical_plan.clone(), table_names.clone())
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -1977,6 +2019,7 @@ mod tests {
|
|||||||
.unwrap()
|
.unwrap()
|
||||||
.into_inner();
|
.into_inner();
|
||||||
assert_eq!(current_view_info.view_info, logical_plan);
|
assert_eq!(current_view_info.view_info, logical_plan);
|
||||||
|
assert_eq!(current_view_info.table_names, table_names);
|
||||||
// assert table info
|
// assert table info
|
||||||
let current_table_info = table_metadata_manager
|
let current_table_info = table_metadata_manager
|
||||||
.table_info_manager()
|
.table_info_manager()
|
||||||
@@ -1989,16 +2032,43 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let new_logical_plan: Vec<u8> = vec![4, 5, 6];
|
let new_logical_plan: Vec<u8> = vec![4, 5, 6];
|
||||||
let current_view_info_value =
|
let new_table_names = {
|
||||||
DeserializedValueWithBytes::from_inner(ViewInfoValue::new(&logical_plan));
|
let mut set = HashSet::new();
|
||||||
|
set.insert(TableName {
|
||||||
|
catalog_name: "greptime".to_string(),
|
||||||
|
schema_name: "public".to_string(),
|
||||||
|
table_name: "b_table".to_string(),
|
||||||
|
});
|
||||||
|
set.insert(TableName {
|
||||||
|
catalog_name: "greptime".to_string(),
|
||||||
|
schema_name: "public".to_string(),
|
||||||
|
table_name: "c_table".to_string(),
|
||||||
|
});
|
||||||
|
set
|
||||||
|
};
|
||||||
|
|
||||||
|
let current_view_info_value = DeserializedValueWithBytes::from_inner(ViewInfoValue::new(
|
||||||
|
logical_plan.clone(),
|
||||||
|
table_names,
|
||||||
|
));
|
||||||
// should be ok.
|
// should be ok.
|
||||||
table_metadata_manager
|
table_metadata_manager
|
||||||
.update_view_info(view_id, ¤t_view_info_value, new_logical_plan.clone())
|
.update_view_info(
|
||||||
|
view_id,
|
||||||
|
¤t_view_info_value,
|
||||||
|
new_logical_plan.clone(),
|
||||||
|
new_table_names.clone(),
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
// if table info was updated, it should be ok.
|
// if table info was updated, it should be ok.
|
||||||
table_metadata_manager
|
table_metadata_manager
|
||||||
.update_view_info(view_id, ¤t_view_info_value, new_logical_plan.clone())
|
.update_view_info(
|
||||||
|
view_id,
|
||||||
|
¤t_view_info_value,
|
||||||
|
new_logical_plan.clone(),
|
||||||
|
new_table_names.clone(),
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -2011,14 +2081,21 @@ mod tests {
|
|||||||
.unwrap()
|
.unwrap()
|
||||||
.into_inner();
|
.into_inner();
|
||||||
assert_eq!(updated_view_info.view_info, new_logical_plan);
|
assert_eq!(updated_view_info.view_info, new_logical_plan);
|
||||||
|
assert_eq!(updated_view_info.table_names, new_table_names);
|
||||||
|
|
||||||
let wrong_view_info = logical_plan.clone();
|
let wrong_view_info = logical_plan.clone();
|
||||||
let wrong_view_info_value =
|
let wrong_view_info_value = DeserializedValueWithBytes::from_inner(
|
||||||
DeserializedValueWithBytes::from_inner(current_view_info_value.update(wrong_view_info));
|
current_view_info_value.update(wrong_view_info, new_table_names.clone()),
|
||||||
|
);
|
||||||
// if the current_view_info_value is wrong, it should return an error.
|
// if the current_view_info_value is wrong, it should return an error.
|
||||||
// The ABA problem.
|
// The ABA problem.
|
||||||
assert!(table_metadata_manager
|
assert!(table_metadata_manager
|
||||||
.update_view_info(view_id, &wrong_view_info_value, new_logical_plan.clone())
|
.update_view_info(
|
||||||
|
view_id,
|
||||||
|
&wrong_view_info_value,
|
||||||
|
new_logical_plan.clone(),
|
||||||
|
new_table_names.clone(),
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.is_err());
|
.is_err());
|
||||||
|
|
||||||
@@ -2031,5 +2108,6 @@ mod tests {
|
|||||||
.unwrap()
|
.unwrap()
|
||||||
.into_inner();
|
.into_inner();
|
||||||
assert_eq!(current_view_info.view_info, new_logical_plan);
|
assert_eq!(current_view_info.view_info, new_logical_plan);
|
||||||
|
assert_eq!(current_view_info.table_names, new_table_names);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -72,12 +72,8 @@ impl DatanodeTableKey {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn prefix(datanode_id: DatanodeId) -> String {
|
pub fn prefix(datanode_id: DatanodeId) -> String {
|
||||||
format!("{}/{datanode_id}", DATANODE_TABLE_KEY_PREFIX)
|
format!("{}/{datanode_id}/", DATANODE_TABLE_KEY_PREFIX)
|
||||||
}
|
|
||||||
|
|
||||||
pub fn range_start_key(datanode_id: DatanodeId) -> String {
|
|
||||||
format!("{}/", Self::prefix(datanode_id))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -114,7 +110,7 @@ impl<'a> MetaKey<'a, DatanodeTableKey> for DatanodeTableKey {
|
|||||||
|
|
||||||
impl Display for DatanodeTableKey {
|
impl Display for DatanodeTableKey {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
write!(f, "{}/{}", Self::prefix(self.datanode_id), self.table_id)
|
write!(f, "{}{}", Self::prefix(self.datanode_id), self.table_id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -164,7 +160,7 @@ impl DatanodeTableManager {
|
|||||||
&self,
|
&self,
|
||||||
datanode_id: DatanodeId,
|
datanode_id: DatanodeId,
|
||||||
) -> BoxStream<'static, Result<DatanodeTableValue>> {
|
) -> BoxStream<'static, Result<DatanodeTableValue>> {
|
||||||
let start_key = DatanodeTableKey::range_start_key(datanode_id);
|
let start_key = DatanodeTableKey::prefix(datanode_id);
|
||||||
let req = RangeRequest::new().with_prefix(start_key.as_bytes());
|
let req = RangeRequest::new().with_prefix(start_key.as_bytes());
|
||||||
|
|
||||||
let stream = PaginationStream::new(
|
let stream = PaginationStream::new(
|
||||||
|
|||||||
@@ -262,12 +262,12 @@ mod tests {
|
|||||||
|
|
||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
use table::metadata::TableId;
|
use table::metadata::TableId;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::key::flow::table_flow::TableFlowKey;
|
use crate::key::flow::table_flow::TableFlowKey;
|
||||||
use crate::key::FlowPartitionId;
|
use crate::key::FlowPartitionId;
|
||||||
use crate::kv_backend::memory::MemoryKvBackend;
|
use crate::kv_backend::memory::MemoryKvBackend;
|
||||||
use crate::table_name::TableName;
|
|
||||||
use crate::FlownodeId;
|
use crate::FlownodeId;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ use regex::Regex;
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
use table::metadata::TableId;
|
use table::metadata::TableId;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
use crate::key::flow::FlowScoped;
|
use crate::key::flow::FlowScoped;
|
||||||
@@ -27,7 +28,6 @@ use crate::key::txn_helper::TxnOpGetResponseSet;
|
|||||||
use crate::key::{DeserializedValueWithBytes, FlowId, FlowPartitionId, MetaKey, TableMetaValue};
|
use crate::key::{DeserializedValueWithBytes, FlowId, FlowPartitionId, MetaKey, TableMetaValue};
|
||||||
use crate::kv_backend::txn::Txn;
|
use crate::kv_backend::txn::Txn;
|
||||||
use crate::kv_backend::KvBackendRef;
|
use crate::kv_backend::KvBackendRef;
|
||||||
use crate::table_name::TableName;
|
|
||||||
use crate::FlownodeId;
|
use crate::FlownodeId;
|
||||||
|
|
||||||
const FLOW_INFO_KEY_PREFIX: &str = "info";
|
const FLOW_INFO_KEY_PREFIX: &str = "info";
|
||||||
|
|||||||
@@ -69,8 +69,7 @@ impl FlownodeFlowKey {
|
|||||||
|
|
||||||
/// The prefix used to retrieve all [FlownodeFlowKey]s with the specified `flownode_id`.
|
/// The prefix used to retrieve all [FlownodeFlowKey]s with the specified `flownode_id`.
|
||||||
pub fn range_start_key(flownode_id: FlownodeId) -> Vec<u8> {
|
pub fn range_start_key(flownode_id: FlownodeId) -> Vec<u8> {
|
||||||
let inner =
|
let inner = BytesAdapter::from(FlownodeFlowKeyInner::prefix(flownode_id).into_bytes());
|
||||||
BytesAdapter::from(FlownodeFlowKeyInner::range_start_key(flownode_id).into_bytes());
|
|
||||||
|
|
||||||
FlowScoped::new(inner).to_bytes()
|
FlowScoped::new(inner).to_bytes()
|
||||||
}
|
}
|
||||||
@@ -108,13 +107,8 @@ impl FlownodeFlowKeyInner {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn prefix(flownode_id: FlownodeId) -> String {
|
pub fn prefix(flownode_id: FlownodeId) -> String {
|
||||||
format!("{}/{flownode_id}", FLOWNODE_FLOW_KEY_PREFIX)
|
format!("{}/{flownode_id}/", FLOWNODE_FLOW_KEY_PREFIX)
|
||||||
}
|
|
||||||
|
|
||||||
/// The prefix used to retrieve all [FlownodeFlowKey]s with the specified `flownode_id`.
|
|
||||||
fn range_start_key(flownode_id: FlownodeId) -> String {
|
|
||||||
format!("{}/", Self::prefix(flownode_id))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -80,7 +80,7 @@ impl TableFlowKey {
|
|||||||
|
|
||||||
/// The prefix used to retrieve all [TableFlowKey]s with the specified `table_id`.
|
/// The prefix used to retrieve all [TableFlowKey]s with the specified `table_id`.
|
||||||
pub fn range_start_key(table_id: TableId) -> Vec<u8> {
|
pub fn range_start_key(table_id: TableId) -> Vec<u8> {
|
||||||
let inner = BytesAdapter::from(TableFlowKeyInner::range_start_key(table_id).into_bytes());
|
let inner = BytesAdapter::from(TableFlowKeyInner::prefix(table_id).into_bytes());
|
||||||
|
|
||||||
FlowScoped::new(inner).to_bytes()
|
FlowScoped::new(inner).to_bytes()
|
||||||
}
|
}
|
||||||
@@ -123,12 +123,7 @@ impl TableFlowKeyInner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn prefix(table_id: TableId) -> String {
|
fn prefix(table_id: TableId) -> String {
|
||||||
format!("{}/{table_id}", TABLE_FLOW_KEY_PREFIX)
|
format!("{}/{table_id}/", TABLE_FLOW_KEY_PREFIX)
|
||||||
}
|
|
||||||
|
|
||||||
/// The prefix used to retrieve all [TableFlowKey]s with the specified `table_id`.
|
|
||||||
fn range_start_key(table_id: TableId) -> String {
|
|
||||||
format!("{}/", Self::prefix(table_id))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ use std::sync::Arc;
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
use table::metadata::{RawTableInfo, TableId};
|
use table::metadata::{RawTableInfo, TableId};
|
||||||
|
use table::table_name::TableName;
|
||||||
use table::table_reference::TableReference;
|
use table::table_reference::TableReference;
|
||||||
|
|
||||||
use super::TABLE_INFO_KEY_PATTERN;
|
use super::TABLE_INFO_KEY_PATTERN;
|
||||||
@@ -28,7 +29,6 @@ use crate::key::{DeserializedValueWithBytes, MetaKey, TableMetaValue, TABLE_INFO
|
|||||||
use crate::kv_backend::txn::Txn;
|
use crate::kv_backend::txn::Txn;
|
||||||
use crate::kv_backend::KvBackendRef;
|
use crate::kv_backend::KvBackendRef;
|
||||||
use crate::rpc::store::BatchGetRequest;
|
use crate::rpc::store::BatchGetRequest;
|
||||||
use crate::table_name::TableName;
|
|
||||||
|
|
||||||
/// The key stores the metadata of the table.
|
/// The key stores the metadata of the table.
|
||||||
///
|
///
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ use futures_util::stream::BoxStream;
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
use table::metadata::TableId;
|
use table::metadata::TableId;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use super::{MetaKey, TableMetaValue, TABLE_NAME_KEY_PATTERN, TABLE_NAME_KEY_PREFIX};
|
use super::{MetaKey, TableMetaValue, TABLE_NAME_KEY_PATTERN, TABLE_NAME_KEY_PREFIX};
|
||||||
use crate::error::{Error, InvalidTableMetadataSnafu, Result};
|
use crate::error::{Error, InvalidTableMetadataSnafu, Result};
|
||||||
@@ -29,7 +30,6 @@ use crate::kv_backend::KvBackendRef;
|
|||||||
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
|
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
|
||||||
use crate::rpc::store::{BatchGetRequest, RangeRequest};
|
use crate::rpc::store::{BatchGetRequest, RangeRequest};
|
||||||
use crate::rpc::KeyValue;
|
use crate::rpc::KeyValue;
|
||||||
use crate::table_name::TableName;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
|
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
|
||||||
pub struct TableNameKey<'a> {
|
pub struct TableNameKey<'a> {
|
||||||
@@ -48,7 +48,7 @@ impl<'a> TableNameKey<'a> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn prefix_to_table(catalog: &str, schema: &str) -> String {
|
pub fn prefix_to_table(catalog: &str, schema: &str) -> String {
|
||||||
format!("{}/{}/{}", TABLE_NAME_KEY_PREFIX, catalog, schema)
|
format!("{}/{}/{}/", TABLE_NAME_KEY_PREFIX, catalog, schema)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -56,7 +56,7 @@ impl Display for TableNameKey<'_> {
|
|||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
write!(
|
write!(
|
||||||
f,
|
f,
|
||||||
"{}/{}",
|
"{}{}",
|
||||||
Self::prefix_to_table(self.catalog, self.schema),
|
Self::prefix_to_table(self.catalog, self.schema),
|
||||||
self.table
|
self.table
|
||||||
)
|
)
|
||||||
@@ -268,7 +268,11 @@ impl TableNameManager {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
|
use futures::StreamExt;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use crate::kv_backend::KvBackend;
|
||||||
|
use crate::rpc::store::PutRequest;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_strip_table_name() {
|
fn test_strip_table_name() {
|
||||||
@@ -324,4 +328,39 @@ mod tests {
|
|||||||
assert_eq!(value.try_as_raw_value().unwrap(), literal);
|
assert_eq!(value.try_as_raw_value().unwrap(), literal);
|
||||||
assert_eq!(TableNameValue::try_from_raw_value(literal).unwrap(), value);
|
assert_eq!(TableNameValue::try_from_raw_value(literal).unwrap(), value);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_prefix_scan_tables() {
|
||||||
|
let memory_kv = Arc::new(MemoryKvBackend::<crate::error::Error>::new());
|
||||||
|
memory_kv
|
||||||
|
.put(PutRequest {
|
||||||
|
key: TableNameKey {
|
||||||
|
catalog: "greptime",
|
||||||
|
schema: "👉",
|
||||||
|
table: "t",
|
||||||
|
}
|
||||||
|
.to_bytes(),
|
||||||
|
value: vec![],
|
||||||
|
prev_kv: false,
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
memory_kv
|
||||||
|
.put(PutRequest {
|
||||||
|
key: TableNameKey {
|
||||||
|
catalog: "greptime",
|
||||||
|
schema: "👉👈",
|
||||||
|
table: "t",
|
||||||
|
}
|
||||||
|
.to_bytes(),
|
||||||
|
value: vec![],
|
||||||
|
prev_kv: false,
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let manager = TableNameManager::new(memory_kv);
|
||||||
|
let items = manager.tables("greptime", "👉").collect::<Vec<_>>().await;
|
||||||
|
assert_eq!(items.len(), 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,130 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
use std::fmt::Display;
|
|
||||||
|
|
||||||
use lazy_static::lazy_static;
|
|
||||||
use regex::Regex;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use snafu::{OptionExt, ResultExt};
|
|
||||||
use store_api::storage::RegionNumber;
|
|
||||||
use table::metadata::TableId;
|
|
||||||
|
|
||||||
use super::{MetaKey, TABLE_REGION_KEY_PREFIX};
|
|
||||||
use crate::error::{InvalidTableMetadataSnafu, Result, SerdeJsonSnafu};
|
|
||||||
use crate::{impl_table_meta_value, DatanodeId};
|
|
||||||
|
|
||||||
pub type RegionDistribution = BTreeMap<DatanodeId, Vec<RegionNumber>>;
|
|
||||||
|
|
||||||
#[deprecated(
|
|
||||||
since = "0.4.0",
|
|
||||||
note = "Please use the TableRouteManager's get_region_distribution method instead"
|
|
||||||
)]
|
|
||||||
#[derive(Debug, PartialEq)]
|
|
||||||
pub struct TableRegionKey {
|
|
||||||
table_id: TableId,
|
|
||||||
}
|
|
||||||
|
|
||||||
lazy_static! {
|
|
||||||
static ref TABLE_REGION_KEY_PATTERN: Regex =
|
|
||||||
Regex::new(&format!("^{TABLE_REGION_KEY_PREFIX}/([0-9]+)$")).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TableRegionKey {
|
|
||||||
pub fn new(table_id: TableId) -> Self {
|
|
||||||
Self { table_id }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Display for TableRegionKey {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
write!(f, "{}/{}", TABLE_REGION_KEY_PREFIX, self.table_id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> MetaKey<'a, TableRegionKey> for TableRegionKey {
|
|
||||||
fn to_bytes(&self) -> Vec<u8> {
|
|
||||||
self.to_string().into_bytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn from_bytes(bytes: &'a [u8]) -> Result<TableRegionKey> {
|
|
||||||
let key = std::str::from_utf8(bytes).map_err(|e| {
|
|
||||||
InvalidTableMetadataSnafu {
|
|
||||||
err_msg: format!(
|
|
||||||
"TableRegionKey '{}' is not a valid UTF8 string: {e}",
|
|
||||||
String::from_utf8_lossy(bytes)
|
|
||||||
),
|
|
||||||
}
|
|
||||||
.build()
|
|
||||||
})?;
|
|
||||||
let captures =
|
|
||||||
TABLE_REGION_KEY_PATTERN
|
|
||||||
.captures(key)
|
|
||||||
.context(InvalidTableMetadataSnafu {
|
|
||||||
err_msg: format!("Invalid TableRegionKey '{key}'"),
|
|
||||||
})?;
|
|
||||||
// Safety: pass the regex check above
|
|
||||||
let table_id = captures[1].parse::<TableId>().unwrap();
|
|
||||||
Ok(TableRegionKey { table_id })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[deprecated(
|
|
||||||
since = "0.4.0",
|
|
||||||
note = "Please use the TableRouteManager's get_region_distribution method instead"
|
|
||||||
)]
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
pub struct TableRegionValue {
|
|
||||||
pub region_distribution: RegionDistribution,
|
|
||||||
version: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TableRegionValue {
|
|
||||||
pub fn new(region_distribution: RegionDistribution) -> Self {
|
|
||||||
Self {
|
|
||||||
region_distribution,
|
|
||||||
version: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl_table_meta_value! {TableRegionValue}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use crate::key::TableMetaValue;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_serialization() {
|
|
||||||
let key = TableRegionKey::new(24);
|
|
||||||
let raw_key = key.to_bytes();
|
|
||||||
assert_eq!(raw_key, b"__table_region/24");
|
|
||||||
let deserialized = TableRegionKey::from_bytes(b"__table_region/24").unwrap();
|
|
||||||
assert_eq!(key, deserialized);
|
|
||||||
|
|
||||||
let value = TableRegionValue {
|
|
||||||
region_distribution: RegionDistribution::from([(1, vec![1, 2, 3]), (2, vec![4, 5, 6])]),
|
|
||||||
version: 0,
|
|
||||||
};
|
|
||||||
let literal = br#"{"region_distribution":{"1":[1,2,3],"2":[4,5,6]},"version":0}"#;
|
|
||||||
|
|
||||||
assert_eq!(value.try_as_raw_value().unwrap(), literal);
|
|
||||||
assert_eq!(
|
|
||||||
TableRegionValue::try_from_raw_value(literal).unwrap(),
|
|
||||||
value,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -12,12 +12,14 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::fmt::Display;
|
use std::fmt::Display;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
use table::metadata::TableId;
|
use table::metadata::TableId;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use super::VIEW_INFO_KEY_PATTERN;
|
use super::VIEW_INFO_KEY_PATTERN;
|
||||||
use crate::error::{InvalidViewInfoSnafu, Result};
|
use crate::error::{InvalidViewInfoSnafu, Result};
|
||||||
@@ -80,21 +82,30 @@ impl<'a> MetaKey<'a, ViewInfoKey> for ViewInfoKey {
|
|||||||
/// The VIEW info value that keeps the metadata.
|
/// The VIEW info value that keeps the metadata.
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||||
pub struct ViewInfoValue {
|
pub struct ViewInfoValue {
|
||||||
|
/// The encoded logical plan
|
||||||
pub view_info: RawViewLogicalPlan,
|
pub view_info: RawViewLogicalPlan,
|
||||||
|
/// The resolved fully table names in logical plan
|
||||||
|
pub table_names: HashSet<TableName>,
|
||||||
version: u64,
|
version: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ViewInfoValue {
|
impl ViewInfoValue {
|
||||||
pub fn new(view_info: &RawViewLogicalPlan) -> Self {
|
pub fn new(view_info: RawViewLogicalPlan, table_names: HashSet<TableName>) -> Self {
|
||||||
Self {
|
Self {
|
||||||
view_info: view_info.clone(),
|
view_info,
|
||||||
|
table_names,
|
||||||
version: 0,
|
version: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn update(&self, new_view_info: RawViewLogicalPlan) -> Self {
|
pub(crate) fn update(
|
||||||
|
&self,
|
||||||
|
new_view_info: RawViewLogicalPlan,
|
||||||
|
table_names: HashSet<TableName>,
|
||||||
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
view_info: new_view_info,
|
view_info: new_view_info,
|
||||||
|
table_names,
|
||||||
version: self.version + 1,
|
version: self.version + 1,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -105,6 +116,8 @@ pub struct ViewInfoManager {
|
|||||||
kv_backend: KvBackendRef,
|
kv_backend: KvBackendRef,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub type ViewInfoManagerRef = Arc<ViewInfoManager>;
|
||||||
|
|
||||||
impl ViewInfoManager {
|
impl ViewInfoManager {
|
||||||
pub fn new(kv_backend: KvBackendRef) -> Self {
|
pub fn new(kv_backend: KvBackendRef) -> Self {
|
||||||
Self { kv_backend }
|
Self { kv_backend }
|
||||||
@@ -254,9 +267,25 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_value_serialization() {
|
fn test_value_serialization() {
|
||||||
|
let table_names = {
|
||||||
|
let mut set = HashSet::new();
|
||||||
|
set.insert(TableName {
|
||||||
|
catalog_name: "greptime".to_string(),
|
||||||
|
schema_name: "public".to_string(),
|
||||||
|
table_name: "a_table".to_string(),
|
||||||
|
});
|
||||||
|
set.insert(TableName {
|
||||||
|
catalog_name: "greptime".to_string(),
|
||||||
|
schema_name: "public".to_string(),
|
||||||
|
table_name: "b_table".to_string(),
|
||||||
|
});
|
||||||
|
set
|
||||||
|
};
|
||||||
|
|
||||||
let value = ViewInfoValue {
|
let value = ViewInfoValue {
|
||||||
view_info: vec![1, 2, 3],
|
view_info: vec![1, 2, 3],
|
||||||
version: 1,
|
version: 1,
|
||||||
|
table_names,
|
||||||
};
|
};
|
||||||
let serialized = value.try_as_raw_value().unwrap();
|
let serialized = value.try_as_raw_value().unwrap();
|
||||||
let deserialized = ViewInfoValue::try_from_raw_value(&serialized).unwrap();
|
let deserialized = ViewInfoValue::try_from_raw_value(&serialized).unwrap();
|
||||||
|
|||||||
@@ -40,7 +40,6 @@ pub mod region_keeper;
|
|||||||
pub mod rpc;
|
pub mod rpc;
|
||||||
pub mod sequence;
|
pub mod sequence;
|
||||||
pub mod state_store;
|
pub mod state_store;
|
||||||
pub mod table_name;
|
|
||||||
#[cfg(any(test, feature = "testing"))]
|
#[cfg(any(test, feature = "testing"))]
|
||||||
pub mod test_util;
|
pub mod test_util;
|
||||||
pub mod util;
|
pub mod util;
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::result;
|
use std::result;
|
||||||
|
|
||||||
use api::v1::meta::ddl_task_request::Task;
|
use api::v1::meta::ddl_task_request::Task;
|
||||||
@@ -39,11 +39,11 @@ use serde_with::{serde_as, DefaultOnNull};
|
|||||||
use session::context::QueryContextRef;
|
use session::context::QueryContextRef;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use table::metadata::{RawTableInfo, TableId};
|
use table::metadata::{RawTableInfo, TableId};
|
||||||
|
use table::table_name::TableName;
|
||||||
use table::table_reference::TableReference;
|
use table::table_reference::TableReference;
|
||||||
|
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
use crate::key::FlowId;
|
use crate::key::FlowId;
|
||||||
use crate::table_name::TableName;
|
|
||||||
|
|
||||||
/// DDL tasks
|
/// DDL tasks
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
@@ -274,10 +274,7 @@ impl TryFrom<SubmitDdlTaskRequest> for PbDdlTaskRequest {
|
|||||||
#[derive(Debug, Default)]
|
#[derive(Debug, Default)]
|
||||||
pub struct SubmitDdlTaskResponse {
|
pub struct SubmitDdlTaskResponse {
|
||||||
pub key: Vec<u8>,
|
pub key: Vec<u8>,
|
||||||
// For create physical table
|
// `table_id`s for `CREATE TABLE` or `CREATE LOGICAL TABLES` task.
|
||||||
// TODO(jeremy): remove it?
|
|
||||||
pub table_id: Option<TableId>,
|
|
||||||
// For create multi logical tables
|
|
||||||
pub table_ids: Vec<TableId>,
|
pub table_ids: Vec<TableId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -285,11 +282,9 @@ impl TryFrom<PbDdlTaskResponse> for SubmitDdlTaskResponse {
|
|||||||
type Error = error::Error;
|
type Error = error::Error;
|
||||||
|
|
||||||
fn try_from(resp: PbDdlTaskResponse) -> Result<Self> {
|
fn try_from(resp: PbDdlTaskResponse) -> Result<Self> {
|
||||||
let table_id = resp.table_id.map(|t| t.id);
|
|
||||||
let table_ids = resp.table_ids.into_iter().map(|t| t.id).collect();
|
let table_ids = resp.table_ids.into_iter().map(|t| t.id).collect();
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
key: resp.pid.map(|pid| pid.key).unwrap_or_default(),
|
key: resp.pid.map(|pid| pid.key).unwrap_or_default(),
|
||||||
table_id,
|
|
||||||
table_ids,
|
table_ids,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -299,9 +294,6 @@ impl From<SubmitDdlTaskResponse> for PbDdlTaskResponse {
|
|||||||
fn from(val: SubmitDdlTaskResponse) -> Self {
|
fn from(val: SubmitDdlTaskResponse) -> Self {
|
||||||
Self {
|
Self {
|
||||||
pid: Some(ProcedureId { key: val.key }),
|
pid: Some(ProcedureId { key: val.key }),
|
||||||
table_id: val
|
|
||||||
.table_id
|
|
||||||
.map(|table_id| api::v1::TableId { id: table_id }),
|
|
||||||
table_ids: val
|
table_ids: val
|
||||||
.table_ids
|
.table_ids
|
||||||
.into_iter()
|
.into_iter()
|
||||||
@@ -332,6 +324,14 @@ impl CreateViewTask {
|
|||||||
pub fn raw_logical_plan(&self) -> &Vec<u8> {
|
pub fn raw_logical_plan(&self) -> &Vec<u8> {
|
||||||
&self.create_view.logical_plan
|
&self.create_view.logical_plan
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn table_names(&self) -> HashSet<TableName> {
|
||||||
|
self.create_view
|
||||||
|
.table_names
|
||||||
|
.iter()
|
||||||
|
.map(|t| t.clone().into())
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TryFrom<PbCreateViewTask> for CreateViewTask {
|
impl TryFrom<PbCreateViewTask> for CreateViewTask {
|
||||||
|
|||||||
@@ -25,11 +25,11 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
|||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
use store_api::storage::{RegionId, RegionNumber};
|
use store_api::storage::{RegionId, RegionNumber};
|
||||||
use strum::AsRefStr;
|
use strum::AsRefStr;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
use crate::key::RegionDistribution;
|
use crate::key::RegionDistribution;
|
||||||
use crate::peer::Peer;
|
use crate::peer::Peer;
|
||||||
use crate::table_name::TableName;
|
|
||||||
use crate::DatanodeId;
|
use crate::DatanodeId;
|
||||||
|
|
||||||
pub fn region_distribution(region_routes: &[RegionRoute]) -> RegionDistribution {
|
pub fn region_distribution(region_routes: &[RegionRoute]) -> RegionDistribution {
|
||||||
|
|||||||
@@ -179,7 +179,7 @@ impl StateStore for ObjectStateStore {
|
|||||||
))
|
))
|
||||||
})
|
})
|
||||||
.context(ListStateSnafu { path: key })?;
|
.context(ListStateSnafu { path: key })?;
|
||||||
yield (key.into(), value.to_vec());
|
yield (key.into(), value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -4,12 +4,16 @@ version.workspace = true
|
|||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
|
[features]
|
||||||
|
testing = []
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
api.workspace = true
|
api.workspace = true
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
|
bytes.workspace = true
|
||||||
common-error.workspace = true
|
common-error.workspace = true
|
||||||
common-macro.workspace = true
|
common-macro.workspace = true
|
||||||
common-recordbatch.workspace = true
|
common-recordbatch.workspace = true
|
||||||
|
|||||||
@@ -206,6 +206,13 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to decode logical plan: {source}"))]
|
||||||
|
DecodePlan {
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
source: BoxedError,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to do table mutation"))]
|
#[snafu(display("Failed to do table mutation"))]
|
||||||
TableMutation {
|
TableMutation {
|
||||||
source: BoxedError,
|
source: BoxedError,
|
||||||
@@ -282,11 +289,12 @@ impl ErrorExt for Error {
|
|||||||
| Error::InvalidFuncArgs { .. } => StatusCode::InvalidArguments,
|
| Error::InvalidFuncArgs { .. } => StatusCode::InvalidArguments,
|
||||||
|
|
||||||
Error::ConvertDfRecordBatchStream { source, .. } => source.status_code(),
|
Error::ConvertDfRecordBatchStream { source, .. } => source.status_code(),
|
||||||
Error::ExecutePhysicalPlan { source, .. } => source.status_code(),
|
|
||||||
Error::Execute { source, .. } => source.status_code(),
|
Error::DecodePlan { source, .. }
|
||||||
Error::ProcedureService { source, .. } | Error::TableMutation { source, .. } => {
|
| Error::Execute { source, .. }
|
||||||
source.status_code()
|
| Error::ExecutePhysicalPlan { source, .. }
|
||||||
}
|
| Error::ProcedureService { source, .. }
|
||||||
|
| Error::TableMutation { source, .. } => source.status_code(),
|
||||||
|
|
||||||
Error::PermissionDenied { .. } => StatusCode::PermissionDenied,
|
Error::PermissionDenied { .. } => StatusCode::PermissionDenied,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,7 +18,8 @@ mod function;
|
|||||||
pub mod logical_plan;
|
pub mod logical_plan;
|
||||||
pub mod prelude;
|
pub mod prelude;
|
||||||
mod signature;
|
mod signature;
|
||||||
|
#[cfg(any(test, feature = "testing"))]
|
||||||
|
pub mod test_util;
|
||||||
use std::fmt::{Debug, Display, Formatter};
|
use std::fmt::{Debug, Display, Formatter};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
|||||||
@@ -19,12 +19,15 @@ mod udf;
|
|||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use datafusion::catalog::CatalogProviderList;
|
||||||
|
use datafusion::logical_expr::LogicalPlan;
|
||||||
use datatypes::prelude::ConcreteDataType;
|
use datatypes::prelude::ConcreteDataType;
|
||||||
pub use expr::build_filter_from_timestamp;
|
pub use expr::build_filter_from_timestamp;
|
||||||
|
|
||||||
pub use self::accumulator::{Accumulator, AggregateFunctionCreator, AggregateFunctionCreatorRef};
|
pub use self::accumulator::{Accumulator, AggregateFunctionCreator, AggregateFunctionCreatorRef};
|
||||||
pub use self::udaf::AggregateFunction;
|
pub use self::udaf::AggregateFunction;
|
||||||
pub use self::udf::ScalarUdf;
|
pub use self::udf::ScalarUdf;
|
||||||
|
use crate::error::Result;
|
||||||
use crate::function::{ReturnTypeFunction, ScalarFunctionImplementation};
|
use crate::function::{ReturnTypeFunction, ScalarFunctionImplementation};
|
||||||
use crate::logical_plan::accumulator::*;
|
use crate::logical_plan::accumulator::*;
|
||||||
use crate::signature::{Signature, Volatility};
|
use crate::signature::{Signature, Volatility};
|
||||||
@@ -68,6 +71,25 @@ pub fn create_aggregate_function(
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The datafusion `[LogicalPlan]` decoder.
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
pub trait SubstraitPlanDecoder {
|
||||||
|
/// Decode the [`LogicalPlan`] from bytes with the [`CatalogProviderList`].
|
||||||
|
/// When `optimize` is true, it will do the optimization for decoded plan.
|
||||||
|
///
|
||||||
|
/// TODO(dennis): It's not a good design for an API to do many things.
|
||||||
|
/// The `optimize` was introduced because of `query` and `catalog` cyclic dependency issue
|
||||||
|
/// I am happy to refactor it if we have a better solution.
|
||||||
|
async fn decode(
|
||||||
|
&self,
|
||||||
|
message: bytes::Bytes,
|
||||||
|
catalog_list: Arc<dyn CatalogProviderList>,
|
||||||
|
optimize: bool,
|
||||||
|
) -> Result<LogicalPlan>;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type SubstraitPlanDecoderRef = Arc<dyn SubstraitPlanDecoder + Send + Sync>;
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|||||||
42
src/common/query/src/test_util.rs
Normal file
42
src/common/query/src/test_util.rs
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use datafusion::catalog::CatalogProviderList;
|
||||||
|
use datafusion::logical_expr::LogicalPlan;
|
||||||
|
|
||||||
|
use crate::error::Result;
|
||||||
|
use crate::logical_plan::SubstraitPlanDecoder;
|
||||||
|
|
||||||
|
/// Dummy `[SubstraitPlanDecoder]` for test.
|
||||||
|
pub struct DummyDecoder;
|
||||||
|
|
||||||
|
impl DummyDecoder {
|
||||||
|
pub fn arc() -> Arc<Self> {
|
||||||
|
Arc::new(DummyDecoder)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl SubstraitPlanDecoder for DummyDecoder {
|
||||||
|
async fn decode(
|
||||||
|
&self,
|
||||||
|
_message: bytes::Bytes,
|
||||||
|
_catalog_list: Arc<dyn CatalogProviderList>,
|
||||||
|
_optimize: bool,
|
||||||
|
) -> Result<LogicalPlan> {
|
||||||
|
unreachable!()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -10,19 +10,15 @@ workspace = true
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
bytes.workspace = true
|
bytes.workspace = true
|
||||||
catalog.workspace = true
|
|
||||||
common-error.workspace = true
|
common-error.workspace = true
|
||||||
common-function.workspace = true
|
|
||||||
common-macro.workspace = true
|
common-macro.workspace = true
|
||||||
common-telemetry.workspace = true
|
common-telemetry.workspace = true
|
||||||
datafusion.workspace = true
|
datafusion.workspace = true
|
||||||
datafusion-common.workspace = true
|
datafusion-common.workspace = true
|
||||||
datafusion-expr.workspace = true
|
datafusion-expr.workspace = true
|
||||||
datafusion-substrait.workspace = true
|
datafusion-substrait.workspace = true
|
||||||
datatypes.workspace = true
|
|
||||||
promql.workspace = true
|
promql.workspace = true
|
||||||
prost.workspace = true
|
prost.workspace = true
|
||||||
session.workspace = true
|
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
|
|
||||||
[dependencies.substrait_proto]
|
[dependencies.substrait_proto]
|
||||||
|
|||||||
@@ -16,26 +16,19 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use bytes::{Buf, Bytes, BytesMut};
|
use bytes::{Buf, Bytes, BytesMut};
|
||||||
use common_function::function_registry::FUNCTION_REGISTRY;
|
|
||||||
use common_function::scalars::udf::create_udf;
|
|
||||||
use datafusion::catalog::CatalogProviderList;
|
use datafusion::catalog::CatalogProviderList;
|
||||||
use datafusion::execution::context::SessionState;
|
use datafusion::execution::context::SessionState;
|
||||||
use datafusion::execution::runtime_env::RuntimeEnv;
|
use datafusion::execution::runtime_env::RuntimeEnv;
|
||||||
use datafusion::execution::FunctionRegistry;
|
|
||||||
use datafusion::prelude::{SessionConfig, SessionContext};
|
use datafusion::prelude::{SessionConfig, SessionContext};
|
||||||
use datafusion_expr::LogicalPlan;
|
use datafusion_expr::LogicalPlan;
|
||||||
use datafusion_substrait::logical_plan::consumer::from_substrait_plan;
|
use datafusion_substrait::logical_plan::consumer::from_substrait_plan;
|
||||||
use datafusion_substrait::logical_plan::producer::to_substrait_plan;
|
use datafusion_substrait::logical_plan::producer::to_substrait_plan;
|
||||||
use datafusion_substrait::substrait::proto::Plan;
|
use datafusion_substrait::substrait::proto::Plan;
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use session::context::QueryContextRef;
|
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{DecodeDfPlanSnafu, DecodeRelSnafu, EncodeDfPlanSnafu, EncodeRelSnafu, Error};
|
||||||
DFInternalSnafu, DecodeDfPlanSnafu, DecodeRelSnafu, EncodeDfPlanSnafu, EncodeRelSnafu, Error,
|
use crate::{SerializerRegistry, SubstraitPlan};
|
||||||
};
|
|
||||||
use crate::extension_serializer::ExtensionSerializer;
|
|
||||||
use crate::SubstraitPlan;
|
|
||||||
|
|
||||||
pub struct DFLogicalSubstraitConvertor;
|
pub struct DFLogicalSubstraitConvertor;
|
||||||
|
|
||||||
@@ -49,15 +42,8 @@ impl SubstraitPlan for DFLogicalSubstraitConvertor {
|
|||||||
&self,
|
&self,
|
||||||
message: B,
|
message: B,
|
||||||
catalog_list: Arc<dyn CatalogProviderList>,
|
catalog_list: Arc<dyn CatalogProviderList>,
|
||||||
mut state: SessionState,
|
state: SessionState,
|
||||||
query_ctx: QueryContextRef,
|
|
||||||
) -> Result<Self::Plan, Self::Error> {
|
) -> Result<Self::Plan, Self::Error> {
|
||||||
// substrait decoder will look up the UDFs in SessionState, so we need to register them
|
|
||||||
for func in FUNCTION_REGISTRY.functions() {
|
|
||||||
let udf = Arc::new(create_udf(func, query_ctx.clone(), Default::default()).into());
|
|
||||||
state.register_udf(udf).context(DFInternalSnafu)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut context = SessionContext::new_with_state(state);
|
let mut context = SessionContext::new_with_state(state);
|
||||||
context.register_catalog_list(catalog_list);
|
context.register_catalog_list(catalog_list);
|
||||||
let plan = Plan::decode(message).context(DecodeRelSnafu)?;
|
let plan = Plan::decode(message).context(DecodeRelSnafu)?;
|
||||||
@@ -67,10 +53,13 @@ impl SubstraitPlan for DFLogicalSubstraitConvertor {
|
|||||||
Ok(df_plan)
|
Ok(df_plan)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn encode(&self, plan: &Self::Plan) -> Result<Bytes, Self::Error> {
|
fn encode(
|
||||||
|
&self,
|
||||||
|
plan: &Self::Plan,
|
||||||
|
serializer: impl SerializerRegistry + 'static,
|
||||||
|
) -> Result<Bytes, Self::Error> {
|
||||||
let mut buf = BytesMut::new();
|
let mut buf = BytesMut::new();
|
||||||
|
let substrait_plan = self.to_sub_plan(plan, serializer)?;
|
||||||
let substrait_plan = self.to_sub_plan(plan)?;
|
|
||||||
substrait_plan.encode(&mut buf).context(EncodeRelSnafu)?;
|
substrait_plan.encode(&mut buf).context(EncodeRelSnafu)?;
|
||||||
|
|
||||||
Ok(buf.freeze())
|
Ok(buf.freeze())
|
||||||
@@ -78,10 +67,14 @@ impl SubstraitPlan for DFLogicalSubstraitConvertor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl DFLogicalSubstraitConvertor {
|
impl DFLogicalSubstraitConvertor {
|
||||||
pub fn to_sub_plan(&self, plan: &LogicalPlan) -> Result<Box<Plan>, Error> {
|
pub fn to_sub_plan(
|
||||||
|
&self,
|
||||||
|
plan: &LogicalPlan,
|
||||||
|
serializer: impl SerializerRegistry + 'static,
|
||||||
|
) -> Result<Box<Plan>, Error> {
|
||||||
let session_state =
|
let session_state =
|
||||||
SessionState::new_with_config_rt(SessionConfig::new(), Arc::new(RuntimeEnv::default()))
|
SessionState::new_with_config_rt(SessionConfig::new(), Arc::new(RuntimeEnv::default()))
|
||||||
.with_serializer_registry(Arc::new(ExtensionSerializer));
|
.with_serializer_registry(Arc::new(serializer));
|
||||||
let context = SessionContext::new_with_state(session_state);
|
let context = SessionContext::new_with_state(session_state);
|
||||||
|
|
||||||
to_substrait_plan(plan, &context).context(EncodeDfPlanSnafu)
|
to_substrait_plan(plan, &context).context(EncodeDfPlanSnafu)
|
||||||
|
|||||||
@@ -18,7 +18,6 @@ use common_error::ext::{BoxedError, ErrorExt};
|
|||||||
use common_error::status_code::StatusCode;
|
use common_error::status_code::StatusCode;
|
||||||
use common_macro::stack_trace_debug;
|
use common_macro::stack_trace_debug;
|
||||||
use datafusion::error::DataFusionError;
|
use datafusion::error::DataFusionError;
|
||||||
use datatypes::prelude::ConcreteDataType;
|
|
||||||
use prost::{DecodeError, EncodeError};
|
use prost::{DecodeError, EncodeError};
|
||||||
use snafu::{Location, Snafu};
|
use snafu::{Location, Snafu};
|
||||||
|
|
||||||
@@ -26,34 +25,6 @@ use snafu::{Location, Snafu};
|
|||||||
#[snafu(visibility(pub))]
|
#[snafu(visibility(pub))]
|
||||||
#[stack_trace_debug]
|
#[stack_trace_debug]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
#[snafu(display("Unsupported physical plan: {}", name))]
|
|
||||||
UnsupportedPlan {
|
|
||||||
name: String,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Unsupported expr: {}", name))]
|
|
||||||
UnsupportedExpr {
|
|
||||||
name: String,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Unsupported concrete type: {:?}", ty))]
|
|
||||||
UnsupportedConcreteType {
|
|
||||||
ty: ConcreteDataType,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Unsupported substrait type: {}", ty))]
|
|
||||||
UnsupportedSubstraitType {
|
|
||||||
ty: String,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to decode substrait relation"))]
|
#[snafu(display("Failed to decode substrait relation"))]
|
||||||
DecodeRel {
|
DecodeRel {
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
@@ -70,33 +41,6 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Input plan is empty"))]
|
|
||||||
EmptyPlan {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Input expression is empty"))]
|
|
||||||
EmptyExpr {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Missing required field in protobuf, field: {}, plan: {}", field, plan))]
|
|
||||||
MissingField {
|
|
||||||
field: String,
|
|
||||||
plan: String,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Invalid parameters: {}", reason))]
|
|
||||||
InvalidParameters {
|
|
||||||
reason: String,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Internal error from DataFusion"))]
|
#[snafu(display("Internal error from DataFusion"))]
|
||||||
DFInternal {
|
DFInternal {
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
@@ -118,35 +62,6 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display(
|
|
||||||
"Schema from Substrait proto doesn't match with the schema in storage.
|
|
||||||
Substrait schema: {:?}
|
|
||||||
Storage schema: {:?}",
|
|
||||||
substrait_schema,
|
|
||||||
storage_schema
|
|
||||||
))]
|
|
||||||
SchemaNotMatch {
|
|
||||||
substrait_schema: datafusion::arrow::datatypes::SchemaRef,
|
|
||||||
storage_schema: datafusion::arrow::datatypes::SchemaRef,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to convert DataFusion schema"))]
|
|
||||||
ConvertDfSchema {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: datatypes::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Unable to resolve table: {table_name}, error: "))]
|
|
||||||
ResolveTable {
|
|
||||||
table_name: String,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: catalog::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to encode DataFusion plan"))]
|
#[snafu(display("Failed to encode DataFusion plan"))]
|
||||||
EncodeDfPlan {
|
EncodeDfPlan {
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
@@ -169,24 +84,13 @@ pub type Result<T> = std::result::Result<T, Error>;
|
|||||||
impl ErrorExt for Error {
|
impl ErrorExt for Error {
|
||||||
fn status_code(&self) -> StatusCode {
|
fn status_code(&self) -> StatusCode {
|
||||||
match self {
|
match self {
|
||||||
Error::UnsupportedConcreteType { .. }
|
Error::UnknownPlan { .. } | Error::EncodeRel { .. } | Error::DecodeRel { .. } => {
|
||||||
| Error::UnsupportedPlan { .. }
|
StatusCode::InvalidArguments
|
||||||
| Error::UnsupportedExpr { .. }
|
}
|
||||||
| Error::UnsupportedSubstraitType { .. } => StatusCode::Unsupported,
|
|
||||||
Error::UnknownPlan { .. }
|
|
||||||
| Error::EncodeRel { .. }
|
|
||||||
| Error::DecodeRel { .. }
|
|
||||||
| Error::EmptyPlan { .. }
|
|
||||||
| Error::EmptyExpr { .. }
|
|
||||||
| Error::MissingField { .. }
|
|
||||||
| Error::InvalidParameters { .. }
|
|
||||||
| Error::SchemaNotMatch { .. } => StatusCode::InvalidArguments,
|
|
||||||
Error::DFInternal { .. }
|
Error::DFInternal { .. }
|
||||||
| Error::Internal { .. }
|
| Error::Internal { .. }
|
||||||
| Error::EncodeDfPlan { .. }
|
| Error::EncodeDfPlan { .. }
|
||||||
| Error::DecodeDfPlan { .. } => StatusCode::Internal,
|
| Error::DecodeDfPlan { .. } => StatusCode::Internal,
|
||||||
Error::ConvertDfSchema { source, .. } => source.status_code(),
|
|
||||||
Error::ResolveTable { source, .. } => source.status_code(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -67,7 +67,6 @@ impl SerializerRegistry for ExtensionSerializer {
|
|||||||
name if name == EmptyMetric::name() => Err(DataFusionError::Substrait(
|
name if name == EmptyMetric::name() => Err(DataFusionError::Substrait(
|
||||||
"EmptyMetric should not be serialized".to_string(),
|
"EmptyMetric should not be serialized".to_string(),
|
||||||
)),
|
)),
|
||||||
"MergeScan" => Ok(vec![]),
|
|
||||||
other => Err(DataFusionError::NotImplemented(format!(
|
other => Err(DataFusionError::NotImplemented(format!(
|
||||||
"Serizlize logical plan for {}",
|
"Serizlize logical plan for {}",
|
||||||
other
|
other
|
||||||
|
|||||||
@@ -23,11 +23,11 @@ use async_trait::async_trait;
|
|||||||
use bytes::{Buf, Bytes};
|
use bytes::{Buf, Bytes};
|
||||||
use datafusion::catalog::CatalogProviderList;
|
use datafusion::catalog::CatalogProviderList;
|
||||||
use datafusion::execution::context::SessionState;
|
use datafusion::execution::context::SessionState;
|
||||||
|
pub use datafusion::execution::registry::SerializerRegistry;
|
||||||
/// Re-export the Substrait module of datafusion,
|
/// Re-export the Substrait module of datafusion,
|
||||||
/// note this is a different version of the `substrait_proto` crate
|
/// note this is a different version of the `substrait_proto` crate
|
||||||
pub use datafusion_substrait::substrait as substrait_proto_df;
|
pub use datafusion_substrait::substrait as substrait_proto_df;
|
||||||
pub use datafusion_substrait::{logical_plan as df_logical_plan, variation_const};
|
pub use datafusion_substrait::{logical_plan as df_logical_plan, variation_const};
|
||||||
use session::context::QueryContextRef;
|
|
||||||
pub use substrait_proto;
|
pub use substrait_proto;
|
||||||
|
|
||||||
pub use crate::df_substrait::DFLogicalSubstraitConvertor;
|
pub use crate::df_substrait::DFLogicalSubstraitConvertor;
|
||||||
@@ -42,8 +42,11 @@ pub trait SubstraitPlan {
|
|||||||
message: B,
|
message: B,
|
||||||
catalog_list: Arc<dyn CatalogProviderList>,
|
catalog_list: Arc<dyn CatalogProviderList>,
|
||||||
state: SessionState,
|
state: SessionState,
|
||||||
query_ctx: QueryContextRef,
|
|
||||||
) -> Result<Self::Plan, Self::Error>;
|
) -> Result<Self::Plan, Self::Error>;
|
||||||
|
|
||||||
fn encode(&self, plan: &Self::Plan) -> Result<Bytes, Self::Error>;
|
fn encode(
|
||||||
|
&self,
|
||||||
|
plan: &Self::Plan,
|
||||||
|
serializer: impl SerializerRegistry + 'static,
|
||||||
|
) -> Result<Bytes, Self::Error>;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -57,7 +57,6 @@ servers.workspace = true
|
|||||||
session.workspace = true
|
session.workspace = true
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
store-api.workspace = true
|
store-api.workspace = true
|
||||||
substrait.workspace = true
|
|
||||||
table.workspace = true
|
table.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
toml.workspace = true
|
toml.workspace = true
|
||||||
|
|||||||
@@ -64,11 +64,18 @@ pub enum Error {
|
|||||||
source: query::error::Error,
|
source: query::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to create plan decoder"))]
|
||||||
|
NewPlanDecoder {
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
source: query::error::Error,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to decode logical plan"))]
|
#[snafu(display("Failed to decode logical plan"))]
|
||||||
DecodeLogicalPlan {
|
DecodeLogicalPlan {
|
||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: substrait::error::Error,
|
source: common_query::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Incorrect internal state: {}", state))]
|
#[snafu(display("Incorrect internal state: {}", state))]
|
||||||
@@ -388,7 +395,9 @@ impl ErrorExt for Error {
|
|||||||
fn status_code(&self) -> StatusCode {
|
fn status_code(&self) -> StatusCode {
|
||||||
use Error::*;
|
use Error::*;
|
||||||
match self {
|
match self {
|
||||||
ExecuteLogicalPlan { source, .. } => source.status_code(),
|
NewPlanDecoder { source, .. } | ExecuteLogicalPlan { source, .. } => {
|
||||||
|
source.status_code()
|
||||||
|
}
|
||||||
|
|
||||||
BuildRegionRequests { source, .. } => source.status_code(),
|
BuildRegionRequests { source, .. } => source.status_code(),
|
||||||
HandleHeartbeatResponse { source, .. } | GetMetadata { source, .. } => {
|
HandleHeartbeatResponse { source, .. } | GetMetadata { source, .. } => {
|
||||||
|
|||||||
@@ -41,19 +41,13 @@ pub struct RegionServerEventSender(pub(crate) UnboundedSender<RegionServerEvent>
|
|||||||
impl RegionServerEventListener for RegionServerEventSender {
|
impl RegionServerEventListener for RegionServerEventSender {
|
||||||
fn on_region_registered(&self, region_id: RegionId) {
|
fn on_region_registered(&self, region_id: RegionId) {
|
||||||
if let Err(e) = self.0.send(RegionServerEvent::Registered(region_id)) {
|
if let Err(e) = self.0.send(RegionServerEvent::Registered(region_id)) {
|
||||||
error!(
|
error!(e; "Failed to send registering region: {region_id} event");
|
||||||
"Failed to send registering region: {region_id} event, source: {}",
|
|
||||||
e
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn on_region_deregistered(&self, region_id: RegionId) {
|
fn on_region_deregistered(&self, region_id: RegionId) {
|
||||||
if let Err(e) = self.0.send(RegionServerEvent::Deregistered(region_id)) {
|
if let Err(e) = self.0.send(RegionServerEvent::Deregistered(region_id)) {
|
||||||
error!(
|
error!(e; "Failed to send deregistering region: {region_id} event");
|
||||||
"Failed to send deregistering region: {region_id} event, source: {}",
|
|
||||||
e
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -51,13 +51,13 @@ use store_api::metric_engine_consts::{
|
|||||||
use store_api::region_engine::{RegionEngineRef, RegionRole, SetReadonlyResponse};
|
use store_api::region_engine::{RegionEngineRef, RegionRole, SetReadonlyResponse};
|
||||||
use store_api::region_request::{AffectedRows, RegionCloseRequest, RegionRequest};
|
use store_api::region_request::{AffectedRows, RegionCloseRequest, RegionRequest};
|
||||||
use store_api::storage::RegionId;
|
use store_api::storage::RegionId;
|
||||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
|
||||||
use tonic::{Request, Response, Result as TonicResult};
|
use tonic::{Request, Response, Result as TonicResult};
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
self, BuildRegionRequestsSnafu, DecodeLogicalPlanSnafu, ExecuteLogicalPlanSnafu,
|
self, BuildRegionRequestsSnafu, DecodeLogicalPlanSnafu, ExecuteLogicalPlanSnafu,
|
||||||
FindLogicalRegionsSnafu, HandleRegionRequestSnafu, RegionEngineNotFoundSnafu,
|
FindLogicalRegionsSnafu, HandleRegionRequestSnafu, NewPlanDecoderSnafu,
|
||||||
RegionNotFoundSnafu, Result, StopRegionEngineSnafu, UnexpectedSnafu, UnsupportedOutputSnafu,
|
RegionEngineNotFoundSnafu, RegionNotFoundSnafu, Result, StopRegionEngineSnafu, UnexpectedSnafu,
|
||||||
|
UnsupportedOutputSnafu,
|
||||||
};
|
};
|
||||||
use crate::event_listener::RegionServerEventListenerRef;
|
use crate::event_listener::RegionServerEventListenerRef;
|
||||||
|
|
||||||
@@ -653,14 +653,13 @@ impl RegionServerInner {
|
|||||||
|
|
||||||
let catalog_list = Arc::new(DummyCatalogList::with_table_provider(table_provider));
|
let catalog_list = Arc::new(DummyCatalogList::with_table_provider(table_provider));
|
||||||
let query_engine_ctx = self.query_engine.engine_context(ctx.clone());
|
let query_engine_ctx = self.query_engine.engine_context(ctx.clone());
|
||||||
|
let plan_decoder = query_engine_ctx
|
||||||
|
.new_plan_decoder()
|
||||||
|
.context(NewPlanDecoderSnafu)?;
|
||||||
|
|
||||||
// decode substrait plan to logical plan and execute it
|
// decode substrait plan to logical plan and execute it
|
||||||
let logical_plan = DFLogicalSubstraitConvertor
|
let logical_plan = plan_decoder
|
||||||
.decode(
|
.decode(Bytes::from(plan), catalog_list, false)
|
||||||
Bytes::from(plan),
|
|
||||||
catalog_list,
|
|
||||||
query_engine_ctx.state().clone(),
|
|
||||||
ctx.clone(),
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
.context(DecodeLogicalPlanSnafu)?;
|
.context(DecodeLogicalPlanSnafu)?;
|
||||||
|
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ mod gcs;
|
|||||||
mod oss;
|
mod oss;
|
||||||
mod s3;
|
mod s3;
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use std::{env, path};
|
use std::{env, path};
|
||||||
|
|
||||||
@@ -28,7 +29,7 @@ use common_telemetry::info;
|
|||||||
use object_store::layers::{LruCacheLayer, RetryLayer};
|
use object_store::layers::{LruCacheLayer, RetryLayer};
|
||||||
use object_store::services::Fs;
|
use object_store::services::Fs;
|
||||||
use object_store::util::{join_dir, normalize_dir, with_instrument_layers};
|
use object_store::util::{join_dir, normalize_dir, with_instrument_layers};
|
||||||
use object_store::{HttpClient, ObjectStore};
|
use object_store::{HttpClient, ObjectStore, ObjectStoreBuilder};
|
||||||
use snafu::prelude::*;
|
use snafu::prelude::*;
|
||||||
|
|
||||||
use crate::config::{ObjectStoreConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE};
|
use crate::config::{ObjectStoreConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE};
|
||||||
@@ -106,13 +107,13 @@ async fn create_object_store_with_cache(
|
|||||||
if let Some(path) = cache_path {
|
if let Some(path) = cache_path {
|
||||||
let atomic_temp_dir = join_dir(path, ".tmp/");
|
let atomic_temp_dir = join_dir(path, ".tmp/");
|
||||||
clean_temp_dir(&atomic_temp_dir)?;
|
clean_temp_dir(&atomic_temp_dir)?;
|
||||||
let mut builder = Fs::default();
|
let cache_store = Fs::default()
|
||||||
builder.root(path).atomic_write_dir(&atomic_temp_dir);
|
.root(path)
|
||||||
let cache_store = ObjectStore::new(builder)
|
.atomic_write_dir(&atomic_temp_dir)
|
||||||
.context(error::InitBackendSnafu)?
|
.build()
|
||||||
.finish();
|
.context(error::InitBackendSnafu)?;
|
||||||
|
|
||||||
let cache_layer = LruCacheLayer::new(cache_store, cache_capacity.0 as usize)
|
let cache_layer = LruCacheLayer::new(Arc::new(cache_store), cache_capacity.0 as usize)
|
||||||
.await
|
.await
|
||||||
.context(error::InitBackendSnafu)?;
|
.context(error::InitBackendSnafu)?;
|
||||||
|
|
||||||
|
|||||||
@@ -229,8 +229,9 @@ impl EngineInner {
|
|||||||
let res = FileRegion::create(region_id, request, &self.object_store).await;
|
let res = FileRegion::create(region_id, request, &self.object_store).await;
|
||||||
let region = res.inspect_err(|err| {
|
let region = res.inspect_err(|err| {
|
||||||
error!(
|
error!(
|
||||||
"Failed to create region, region_id: {}, err: {}",
|
err;
|
||||||
region_id, err
|
"Failed to create region, region_id: {}",
|
||||||
|
region_id
|
||||||
);
|
);
|
||||||
})?;
|
})?;
|
||||||
self.regions.write().unwrap().insert(region_id, region);
|
self.regions.write().unwrap().insert(region_id, region);
|
||||||
@@ -259,8 +260,9 @@ impl EngineInner {
|
|||||||
let res = FileRegion::open(region_id, request, &self.object_store).await;
|
let res = FileRegion::open(region_id, request, &self.object_store).await;
|
||||||
let region = res.inspect_err(|err| {
|
let region = res.inspect_err(|err| {
|
||||||
error!(
|
error!(
|
||||||
"Failed to open region, region_id: {}, err: {}",
|
err;
|
||||||
region_id, err
|
"Failed to open region, region_id: {}",
|
||||||
|
region_id
|
||||||
);
|
);
|
||||||
})?;
|
})?;
|
||||||
self.regions.write().unwrap().insert(region_id, region);
|
self.regions.write().unwrap().insert(region_id, region);
|
||||||
@@ -302,8 +304,9 @@ impl EngineInner {
|
|||||||
let res = FileRegion::drop(®ion, &self.object_store).await;
|
let res = FileRegion::drop(®ion, &self.object_store).await;
|
||||||
res.inspect_err(|err| {
|
res.inspect_err(|err| {
|
||||||
error!(
|
error!(
|
||||||
"Failed to drop region, region_id: {}, err: {}",
|
err;
|
||||||
region_id, err
|
"Failed to drop region, region_id: {}",
|
||||||
|
region_id
|
||||||
);
|
);
|
||||||
})?;
|
})?;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -71,8 +71,7 @@ impl FileRegionManifest {
|
|||||||
let bs = object_store
|
let bs = object_store
|
||||||
.read(path)
|
.read(path)
|
||||||
.await
|
.await
|
||||||
.context(LoadRegionManifestSnafu { region_id })?
|
.context(LoadRegionManifestSnafu { region_id })?;
|
||||||
.to_vec();
|
|
||||||
Self::decode(bs.as_slice())
|
Self::decode(bs.as_slice())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -159,7 +159,7 @@ pub struct FlownodeManager {
|
|||||||
table_info_source: TableSource,
|
table_info_source: TableSource,
|
||||||
frontend_invoker: RwLock<Option<Box<dyn FrontendInvoker + Send + Sync>>>,
|
frontend_invoker: RwLock<Option<Box<dyn FrontendInvoker + Send + Sync>>>,
|
||||||
/// contains mapping from table name to global id, and table schema
|
/// contains mapping from table name to global id, and table schema
|
||||||
node_context: Mutex<FlownodeContext>,
|
node_context: RwLock<FlownodeContext>,
|
||||||
flow_err_collectors: RwLock<BTreeMap<FlowId, ErrCollector>>,
|
flow_err_collectors: RwLock<BTreeMap<FlowId, ErrCollector>>,
|
||||||
src_send_buf_lens: RwLock<BTreeMap<TableId, watch::Receiver<usize>>>,
|
src_send_buf_lens: RwLock<BTreeMap<TableId, watch::Receiver<usize>>>,
|
||||||
tick_manager: FlowTickManager,
|
tick_manager: FlowTickManager,
|
||||||
@@ -194,7 +194,7 @@ impl FlownodeManager {
|
|||||||
query_engine,
|
query_engine,
|
||||||
table_info_source: srv_map,
|
table_info_source: srv_map,
|
||||||
frontend_invoker: RwLock::new(None),
|
frontend_invoker: RwLock::new(None),
|
||||||
node_context: Mutex::new(node_context),
|
node_context: RwLock::new(node_context),
|
||||||
flow_err_collectors: Default::default(),
|
flow_err_collectors: Default::default(),
|
||||||
src_send_buf_lens: Default::default(),
|
src_send_buf_lens: Default::default(),
|
||||||
tick_manager,
|
tick_manager,
|
||||||
@@ -298,7 +298,7 @@ impl FlownodeManager {
|
|||||||
} else {
|
} else {
|
||||||
// TODO(discord9): condiser remove buggy auto create by schema
|
// TODO(discord9): condiser remove buggy auto create by schema
|
||||||
|
|
||||||
let node_ctx = self.node_context.lock().await;
|
let node_ctx = self.node_context.read().await;
|
||||||
let gid: GlobalId = node_ctx
|
let gid: GlobalId = node_ctx
|
||||||
.table_repr
|
.table_repr
|
||||||
.get_by_name(&table_name)
|
.get_by_name(&table_name)
|
||||||
@@ -462,7 +462,7 @@ impl FlownodeManager {
|
|||||||
let mut output = BTreeMap::new();
|
let mut output = BTreeMap::new();
|
||||||
for (name, sink_recv) in self
|
for (name, sink_recv) in self
|
||||||
.node_context
|
.node_context
|
||||||
.lock()
|
.write()
|
||||||
.await
|
.await
|
||||||
.sink_receiver
|
.sink_receiver
|
||||||
.iter_mut()
|
.iter_mut()
|
||||||
@@ -542,11 +542,11 @@ impl FlownodeManager {
|
|||||||
}
|
}
|
||||||
// first check how many inputs were sent
|
// first check how many inputs were sent
|
||||||
let (flush_res, buf_len) = if blocking {
|
let (flush_res, buf_len) = if blocking {
|
||||||
let mut ctx = self.node_context.lock().await;
|
let ctx = self.node_context.read().await;
|
||||||
(ctx.flush_all_sender(), ctx.get_send_buf_size())
|
(ctx.flush_all_sender().await, ctx.get_send_buf_size().await)
|
||||||
} else {
|
} else {
|
||||||
match self.node_context.try_lock() {
|
match self.node_context.try_read() {
|
||||||
Ok(mut ctx) => (ctx.flush_all_sender(), ctx.get_send_buf_size()),
|
Ok(ctx) => (ctx.flush_all_sender().await, ctx.get_send_buf_size().await),
|
||||||
Err(_) => return Ok(()),
|
Err(_) => return Ok(()),
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@@ -580,7 +580,7 @@ impl FlownodeManager {
|
|||||||
rows.len()
|
rows.len()
|
||||||
);
|
);
|
||||||
let table_id = region_id.table_id();
|
let table_id = region_id.table_id();
|
||||||
self.node_context.lock().await.send(table_id, rows)?;
|
self.node_context.read().await.send(table_id, rows).await?;
|
||||||
// TODO(discord9): put it in a background task?
|
// TODO(discord9): put it in a background task?
|
||||||
// self.run_available(false).await?;
|
// self.run_available(false).await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -628,7 +628,7 @@ impl FlownodeManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut node_ctx = self.node_context.lock().await;
|
let mut node_ctx = self.node_context.write().await;
|
||||||
// assign global id to source and sink table
|
// assign global id to source and sink table
|
||||||
for source in source_table_ids {
|
for source in source_table_ids {
|
||||||
node_ctx
|
node_ctx
|
||||||
|
|||||||
@@ -116,7 +116,7 @@ impl Flownode for FlownodeManager {
|
|||||||
let now = self.tick_manager.tick();
|
let now = self.tick_manager.tick();
|
||||||
|
|
||||||
let fetch_order = {
|
let fetch_order = {
|
||||||
let ctx = self.node_context.lock().await;
|
let ctx = self.node_context.read().await;
|
||||||
let table_col_names = ctx
|
let table_col_names = ctx
|
||||||
.table_repr
|
.table_repr
|
||||||
.get_by_table_id(&table_id)
|
.get_by_table_id(&table_id)
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ use common_telemetry::debug;
|
|||||||
use session::context::QueryContext;
|
use session::context::QueryContext;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use table::metadata::TableId;
|
use table::metadata::TableId;
|
||||||
use tokio::sync::{broadcast, mpsc};
|
use tokio::sync::{broadcast, mpsc, RwLock};
|
||||||
|
|
||||||
use crate::adapter::error::{Error, EvalSnafu, TableNotFoundSnafu};
|
use crate::adapter::error::{Error, EvalSnafu, TableNotFoundSnafu};
|
||||||
use crate::adapter::{FlowId, TableName, TableSource};
|
use crate::adapter::{FlowId, TableName, TableSource};
|
||||||
@@ -65,7 +65,7 @@ pub struct FlownodeContext {
|
|||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct SourceSender {
|
pub struct SourceSender {
|
||||||
sender: broadcast::Sender<DiffRow>,
|
sender: broadcast::Sender<DiffRow>,
|
||||||
send_buf: VecDeque<DiffRow>,
|
send_buf: RwLock<VecDeque<DiffRow>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for SourceSender {
|
impl Default for SourceSender {
|
||||||
@@ -78,6 +78,7 @@ impl Default for SourceSender {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: make all send operation immut
|
||||||
impl SourceSender {
|
impl SourceSender {
|
||||||
pub fn get_receiver(&self) -> broadcast::Receiver<DiffRow> {
|
pub fn get_receiver(&self) -> broadcast::Receiver<DiffRow> {
|
||||||
self.sender.subscribe()
|
self.sender.subscribe()
|
||||||
@@ -85,15 +86,16 @@ impl SourceSender {
|
|||||||
|
|
||||||
/// send as many as possible rows from send buf
|
/// send as many as possible rows from send buf
|
||||||
/// until send buf is empty or broadchannel is full
|
/// until send buf is empty or broadchannel is full
|
||||||
pub fn try_send_all(&mut self) -> Result<usize, Error> {
|
pub async fn try_send_all(&self) -> Result<usize, Error> {
|
||||||
let mut row_cnt = 0;
|
let mut row_cnt = 0;
|
||||||
loop {
|
loop {
|
||||||
|
let mut send_buf = self.send_buf.write().await;
|
||||||
// if inner sender channel is empty or send buf is empty, there
|
// if inner sender channel is empty or send buf is empty, there
|
||||||
// is nothing to do for now, just break
|
// is nothing to do for now, just break
|
||||||
if self.sender.len() >= BROADCAST_CAP || self.send_buf.is_empty() {
|
if self.sender.len() >= BROADCAST_CAP || send_buf.is_empty() {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if let Some(row) = self.send_buf.pop_front() {
|
if let Some(row) = send_buf.pop_front() {
|
||||||
self.sender
|
self.sender
|
||||||
.send(row)
|
.send(row)
|
||||||
.map_err(|err| {
|
.map_err(|err| {
|
||||||
@@ -108,17 +110,20 @@ impl SourceSender {
|
|||||||
}
|
}
|
||||||
if row_cnt > 0 {
|
if row_cnt > 0 {
|
||||||
debug!("Send {} rows", row_cnt);
|
debug!("Send {} rows", row_cnt);
|
||||||
debug!("Remaining Send buf.len() = {}", self.send_buf.len());
|
debug!(
|
||||||
|
"Remaining Send buf.len() = {}",
|
||||||
|
self.send_buf.read().await.len()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(row_cnt)
|
Ok(row_cnt)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// return number of rows it actual send(including what's in the buffer)
|
/// return number of rows it actual send(including what's in the buffer)
|
||||||
pub fn send_rows(&mut self, rows: Vec<DiffRow>) -> Result<usize, Error> {
|
pub async fn send_rows(&self, rows: Vec<DiffRow>) -> Result<usize, Error> {
|
||||||
self.send_buf.extend(rows);
|
self.send_buf.write().await.extend(rows);
|
||||||
|
|
||||||
let row_cnt = self.try_send_all()?;
|
let row_cnt = self.try_send_all().await?;
|
||||||
|
|
||||||
Ok(row_cnt)
|
Ok(row_cnt)
|
||||||
}
|
}
|
||||||
@@ -128,30 +133,35 @@ impl FlownodeContext {
|
|||||||
/// return number of rows it actual send(including what's in the buffer)
|
/// return number of rows it actual send(including what's in the buffer)
|
||||||
///
|
///
|
||||||
/// TODO(discord9): make this concurrent
|
/// TODO(discord9): make this concurrent
|
||||||
pub fn send(&mut self, table_id: TableId, rows: Vec<DiffRow>) -> Result<usize, Error> {
|
pub async fn send(&self, table_id: TableId, rows: Vec<DiffRow>) -> Result<usize, Error> {
|
||||||
let sender = self
|
let sender = self
|
||||||
.source_sender
|
.source_sender
|
||||||
.get_mut(&table_id)
|
.get(&table_id)
|
||||||
.with_context(|| TableNotFoundSnafu {
|
.with_context(|| TableNotFoundSnafu {
|
||||||
name: table_id.to_string(),
|
name: table_id.to_string(),
|
||||||
})?;
|
})?;
|
||||||
// debug!("FlownodeContext::send: trying to send {} rows", rows.len());
|
// debug!("FlownodeContext::send: trying to send {} rows", rows.len());
|
||||||
sender.send_rows(rows)
|
sender.send_rows(rows).await
|
||||||
}
|
}
|
||||||
|
|
||||||
/// flush all sender's buf
|
/// flush all sender's buf
|
||||||
///
|
///
|
||||||
/// return numbers being sent
|
/// return numbers being sent
|
||||||
pub fn flush_all_sender(&mut self) -> Result<usize, Error> {
|
pub async fn flush_all_sender(&self) -> Result<usize, Error> {
|
||||||
self.source_sender
|
let mut sum = 0;
|
||||||
.iter_mut()
|
for sender in self.source_sender.values() {
|
||||||
.map(|(_table_id, src_sender)| src_sender.try_send_all())
|
sender.try_send_all().await.inspect(|x| sum += x)?;
|
||||||
.try_fold(0, |acc, x| x.map(|x| x + acc))
|
}
|
||||||
|
Ok(sum)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the sum number of rows in all send buf
|
/// Return the sum number of rows in all send buf
|
||||||
pub fn get_send_buf_size(&self) -> usize {
|
pub async fn get_send_buf_size(&self) -> usize {
|
||||||
self.source_sender.values().map(|v| v.send_buf.len()).sum()
|
let mut sum = 0;
|
||||||
|
for sender in self.source_sender.values() {
|
||||||
|
sum += sender.send_buf.read().await.len();
|
||||||
|
}
|
||||||
|
sum
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -285,8 +285,8 @@ impl<'s> Worker<'s> {
|
|||||||
Ok(Some((id, resp))) => {
|
Ok(Some((id, resp))) => {
|
||||||
if let Err(err) = self.itc_server.blocking_lock().resp(id, resp) {
|
if let Err(err) = self.itc_server.blocking_lock().resp(id, resp) {
|
||||||
common_telemetry::error!(
|
common_telemetry::error!(
|
||||||
"Worker's itc server has been closed unexpectedly, shutting down worker: {}",
|
err;
|
||||||
err
|
"Worker's itc server has been closed unexpectedly, shutting down worker"
|
||||||
);
|
);
|
||||||
break;
|
break;
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ use literal::{from_substrait_literal, from_substrait_type};
|
|||||||
use prost::Message;
|
use prost::Message;
|
||||||
use query::parser::QueryLanguageParser;
|
use query::parser::QueryLanguageParser;
|
||||||
use query::plan::LogicalPlan;
|
use query::plan::LogicalPlan;
|
||||||
|
use query::query_engine::DefaultSerializer;
|
||||||
use query::QueryEngine;
|
use query::QueryEngine;
|
||||||
use session::context::QueryContext;
|
use session::context::QueryContext;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
@@ -121,7 +122,7 @@ pub async fn sql_to_flow_plan(
|
|||||||
.context(ExternalSnafu)?;
|
.context(ExternalSnafu)?;
|
||||||
let LogicalPlan::DfPlan(plan) = plan;
|
let LogicalPlan::DfPlan(plan) = plan;
|
||||||
let sub_plan = DFLogicalSubstraitConvertor {}
|
let sub_plan = DFLogicalSubstraitConvertor {}
|
||||||
.to_sub_plan(&plan)
|
.to_sub_plan(&plan, DefaultSerializer)
|
||||||
.map_err(BoxedError::new)
|
.map_err(BoxedError::new)
|
||||||
.context(ExternalSnafu)?;
|
.context(ExternalSnafu)?;
|
||||||
|
|
||||||
@@ -294,7 +295,9 @@ mod test {
|
|||||||
let LogicalPlan::DfPlan(plan) = plan;
|
let LogicalPlan::DfPlan(plan) = plan;
|
||||||
|
|
||||||
// encode then decode so to rely on the impl of conversion from logical plan to substrait plan
|
// encode then decode so to rely on the impl of conversion from logical plan to substrait plan
|
||||||
let bytes = DFLogicalSubstraitConvertor {}.encode(&plan).unwrap();
|
let bytes = DFLogicalSubstraitConvertor {}
|
||||||
|
.encode(&plan, DefaultSerializer)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
proto::Plan::decode(bytes).unwrap()
|
proto::Plan::decode(bytes).unwrap()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,7 +18,6 @@ use api::v1::query_request::Query;
|
|||||||
use api::v1::{DeleteRequests, DropFlowExpr, InsertRequests, RowDeleteRequests, RowInsertRequests};
|
use api::v1::{DeleteRequests, DropFlowExpr, InsertRequests, RowDeleteRequests, RowInsertRequests};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
|
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
|
||||||
use common_meta::table_name::TableName;
|
|
||||||
use common_query::Output;
|
use common_query::Output;
|
||||||
use common_telemetry::tracing;
|
use common_telemetry::tracing;
|
||||||
use query::parser::PromQuery;
|
use query::parser::PromQuery;
|
||||||
@@ -27,6 +26,7 @@ use servers::query_handler::grpc::GrpcQueryHandler;
|
|||||||
use servers::query_handler::sql::SqlQueryHandler;
|
use servers::query_handler::sql::SqlQueryHandler;
|
||||||
use session::context::QueryContextRef;
|
use session::context::QueryContextRef;
|
||||||
use snafu::{ensure, OptionExt, ResultExt};
|
use snafu::{ensure, OptionExt, ResultExt};
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
Error, IncompleteGrpcRequestSnafu, NotSupportedSnafu, PermissionSnafu, Result,
|
Error, IncompleteGrpcRequestSnafu, NotSupportedSnafu, PermissionSnafu, Result,
|
||||||
|
|||||||
@@ -72,12 +72,12 @@ mod python {
|
|||||||
use arc_swap::ArcSwap;
|
use arc_swap::ArcSwap;
|
||||||
use catalog::RegisterSystemTableRequest;
|
use catalog::RegisterSystemTableRequest;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::table_name::TableName;
|
|
||||||
use common_telemetry::{error, info};
|
use common_telemetry::{error, info};
|
||||||
use script::manager::ScriptManager;
|
use script::manager::ScriptManager;
|
||||||
use servers::query_handler::grpc::GrpcQueryHandler;
|
use servers::query_handler::grpc::GrpcQueryHandler;
|
||||||
use session::context::QueryContext;
|
use session::context::QueryContext;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::error::{CatalogSnafu, TableNotFoundSnafu};
|
use crate::error::{CatalogSnafu, TableNotFoundSnafu};
|
||||||
|
|||||||
@@ -256,7 +256,7 @@ impl ExternalSorter {
|
|||||||
IntermediateWriter::new(writer).write_all(values, bitmap_leading_zeros as _).await.inspect(|_|
|
IntermediateWriter::new(writer).write_all(values, bitmap_leading_zeros as _).await.inspect(|_|
|
||||||
debug!("Dumped {entries} entries ({memory_usage} bytes) to intermediate file {file_id} for index {index_name}")
|
debug!("Dumped {entries} entries ({memory_usage} bytes) to intermediate file {file_id} for index {index_name}")
|
||||||
).inspect_err(|e|
|
).inspect_err(|e|
|
||||||
error!("Failed to dump {entries} entries to intermediate file {file_id} for index {index_name}. Error: {e}")
|
error!(e; "Failed to dump {entries} entries to intermediate file {file_id} for index {index_name}")
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -29,7 +29,6 @@ use common_meta::key::datanode_table::DatanodeTableKey;
|
|||||||
use common_meta::key::{TableMetadataManagerRef, MAINTENANCE_KEY};
|
use common_meta::key::{TableMetadataManagerRef, MAINTENANCE_KEY};
|
||||||
use common_meta::kv_backend::{KvBackendRef, ResettableKvBackendRef};
|
use common_meta::kv_backend::{KvBackendRef, ResettableKvBackendRef};
|
||||||
use common_meta::lock_key::{CatalogLock, RegionLock, SchemaLock, TableLock};
|
use common_meta::lock_key::{CatalogLock, RegionLock, SchemaLock, TableLock};
|
||||||
use common_meta::table_name::TableName;
|
|
||||||
use common_meta::{ClusterId, RegionIdent};
|
use common_meta::{ClusterId, RegionIdent};
|
||||||
use common_procedure::error::{
|
use common_procedure::error::{
|
||||||
Error as ProcedureError, FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu,
|
Error as ProcedureError, FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu,
|
||||||
@@ -44,6 +43,7 @@ use serde::{Deserialize, Serialize};
|
|||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use store_api::storage::{RegionId, RegionNumber};
|
use store_api::storage::{RegionId, RegionNumber};
|
||||||
use table::metadata::TableId;
|
use table::metadata::TableId;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
self, KvBackendSnafu, RegisterProcedureLoaderSnafu, Result, TableMetadataManagerSnafu,
|
self, KvBackendSnafu, RegisterProcedureLoaderSnafu, Result, TableMetadataManagerSnafu,
|
||||||
|
|||||||
@@ -22,12 +22,12 @@ use common_meta::key::table_info::TableInfoValue;
|
|||||||
use common_meta::key::table_route::TableRouteValue;
|
use common_meta::key::table_route::TableRouteValue;
|
||||||
use common_meta::peer::Peer;
|
use common_meta::peer::Peer;
|
||||||
use common_meta::rpc::router::RegionRoute;
|
use common_meta::rpc::router::RegionRoute;
|
||||||
use common_meta::table_name::TableName;
|
|
||||||
use common_meta::ClusterId;
|
use common_meta::ClusterId;
|
||||||
use common_procedure::{watcher, ProcedureId, ProcedureManagerRef, ProcedureWithId};
|
use common_procedure::{watcher, ProcedureId, ProcedureManagerRef, ProcedureWithId};
|
||||||
use common_telemetry::{error, info};
|
use common_telemetry::{error, info};
|
||||||
use snafu::{ensure, OptionExt, ResultExt};
|
use snafu::{ensure, OptionExt, ResultExt};
|
||||||
use store_api::storage::RegionId;
|
use store_api::storage::RegionId;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
use crate::procedure::region_migration::{
|
use crate::procedure::region_migration::{
|
||||||
|
|||||||
@@ -171,9 +171,10 @@ impl MetricEngineInner {
|
|||||||
|
|
||||||
// check if the logical region already exist
|
// check if the logical region already exist
|
||||||
if self
|
if self
|
||||||
.metadata_region
|
.state
|
||||||
.is_logical_region_exists(metadata_region_id, logical_region_id)
|
.read()
|
||||||
.await?
|
.unwrap()
|
||||||
|
.is_logical_region_exists(logical_region_id)
|
||||||
{
|
{
|
||||||
info!("Create a existing logical region {logical_region_id}. Skipped");
|
info!("Create a existing logical region {logical_region_id}. Skipped");
|
||||||
return Ok(data_region_id);
|
return Ok(data_region_id);
|
||||||
|
|||||||
@@ -104,7 +104,7 @@ impl MetricEngineInner {
|
|||||||
// check if the region exists
|
// check if the region exists
|
||||||
let data_region_id = to_data_region_id(physical_region_id);
|
let data_region_id = to_data_region_id(physical_region_id);
|
||||||
let state = self.state.read().unwrap();
|
let state = self.state.read().unwrap();
|
||||||
if !state.is_logical_region_exist(logical_region_id) {
|
if !state.is_logical_region_exists(logical_region_id) {
|
||||||
error!("Trying to write to an nonexistent region {logical_region_id}");
|
error!("Trying to write to an nonexistent region {logical_region_id}");
|
||||||
return LogicalRegionNotFoundSnafu {
|
return LogicalRegionNotFoundSnafu {
|
||||||
region_id: logical_region_id,
|
region_id: logical_region_id,
|
||||||
|
|||||||
@@ -149,7 +149,7 @@ impl MetricEngineState {
|
|||||||
Ok(exist)
|
Ok(exist)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_logical_region_exist(&self, logical_region_id: RegionId) -> bool {
|
pub fn is_logical_region_exists(&self, logical_region_id: RegionId) -> bool {
|
||||||
self.logical_regions().contains_key(&logical_region_id)
|
self.logical_regions().contains_key(&logical_region_id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -139,17 +139,6 @@ impl MetadataRegion {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check if the given logical region exists.
|
|
||||||
pub async fn is_logical_region_exists(
|
|
||||||
&self,
|
|
||||||
physical_region_id: RegionId,
|
|
||||||
logical_region_id: RegionId,
|
|
||||||
) -> Result<bool> {
|
|
||||||
let region_id = utils::to_metadata_region_id(physical_region_id);
|
|
||||||
let region_key = Self::concat_region_key(logical_region_id);
|
|
||||||
self.exists(region_id, ®ion_key).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Check if the given column exists. Return the semantic type if exists.
|
/// Check if the given column exists. Return the semantic type if exists.
|
||||||
pub async fn column_semantic_type(
|
pub async fn column_semantic_type(
|
||||||
&self,
|
&self,
|
||||||
@@ -669,10 +658,6 @@ mod test {
|
|||||||
.add_logical_region(physical_region_id, logical_region_id)
|
.add_logical_region(physical_region_id, logical_region_id)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(metadata_region
|
|
||||||
.is_logical_region_exists(physical_region_id, logical_region_id)
|
|
||||||
.await
|
|
||||||
.unwrap());
|
|
||||||
|
|
||||||
// add it again
|
// add it again
|
||||||
assert!(metadata_region
|
assert!(metadata_region
|
||||||
|
|||||||
19
src/mito2/src/cache/file_cache.rs
vendored
19
src/mito2/src/cache/file_cache.rs
vendored
@@ -112,10 +112,6 @@ impl FileCache {
|
|||||||
self.memory_index.insert(key, value).await;
|
self.memory_index.insert(key, value).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn get(&self, key: IndexKey) -> Option<IndexValue> {
|
|
||||||
self.memory_index.get(&key).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Reads a file from the cache.
|
/// Reads a file from the cache.
|
||||||
pub(crate) async fn reader(&self, key: IndexKey) -> Option<Reader> {
|
pub(crate) async fn reader(&self, key: IndexKey) -> Option<Reader> {
|
||||||
// We must use `get()` to update the estimator of the cache.
|
// We must use `get()` to update the estimator of the cache.
|
||||||
@@ -376,6 +372,7 @@ fn parse_index_key(name: &str) -> Option<IndexKey> {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use common_test_util::temp_dir::create_temp_dir;
|
use common_test_util::temp_dir::create_temp_dir;
|
||||||
|
use futures::AsyncReadExt;
|
||||||
use object_store::services::Fs;
|
use object_store::services::Fs;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
@@ -454,9 +451,10 @@ mod tests {
|
|||||||
.await;
|
.await;
|
||||||
|
|
||||||
// Read file content.
|
// Read file content.
|
||||||
let reader = cache.reader(key).await.unwrap();
|
let mut reader = cache.reader(key).await.unwrap();
|
||||||
let buf = reader.read(..).await.unwrap().to_vec();
|
let mut buf = String::new();
|
||||||
assert_eq!("hello", String::from_utf8(buf).unwrap());
|
reader.read_to_string(&mut buf).await.unwrap();
|
||||||
|
assert_eq!("hello", buf);
|
||||||
|
|
||||||
// Get weighted size.
|
// Get weighted size.
|
||||||
cache.memory_index.run_pending_tasks().await;
|
cache.memory_index.run_pending_tasks().await;
|
||||||
@@ -551,9 +549,10 @@ mod tests {
|
|||||||
|
|
||||||
for (i, file_id) in file_ids.iter().enumerate() {
|
for (i, file_id) in file_ids.iter().enumerate() {
|
||||||
let key = IndexKey::new(region_id, *file_id, file_type);
|
let key = IndexKey::new(region_id, *file_id, file_type);
|
||||||
let reader = cache.reader(key).await.unwrap();
|
let mut reader = cache.reader(key).await.unwrap();
|
||||||
let buf = reader.read(..).await.unwrap().to_vec();
|
let mut buf = String::new();
|
||||||
assert_eq!(i.to_string(), String::from_utf8(buf).unwrap());
|
reader.read_to_string(&mut buf).await.unwrap();
|
||||||
|
assert_eq!(i.to_string(), buf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
25
src/mito2/src/cache/write_cache.rs
vendored
25
src/mito2/src/cache/write_cache.rs
vendored
@@ -19,7 +19,6 @@ use std::time::Duration;
|
|||||||
|
|
||||||
use common_base::readable_size::ReadableSize;
|
use common_base::readable_size::ReadableSize;
|
||||||
use common_telemetry::{debug, info};
|
use common_telemetry::{debug, info};
|
||||||
use futures::AsyncWriteExt;
|
|
||||||
use object_store::manager::ObjectStoreManagerRef;
|
use object_store::manager::ObjectStoreManagerRef;
|
||||||
use object_store::ObjectStore;
|
use object_store::ObjectStore;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
@@ -176,27 +175,19 @@ impl WriteCache {
|
|||||||
}])
|
}])
|
||||||
.start_timer();
|
.start_timer();
|
||||||
|
|
||||||
let cached_value = self
|
|
||||||
.file_cache
|
|
||||||
.local_store()
|
|
||||||
.stat(&cache_path)
|
|
||||||
.await
|
|
||||||
.context(error::OpenDalSnafu)?;
|
|
||||||
let reader = self
|
let reader = self
|
||||||
.file_cache
|
.file_cache
|
||||||
.local_store()
|
.local_store()
|
||||||
.reader(&cache_path)
|
.reader(&cache_path)
|
||||||
.await
|
.await
|
||||||
.context(error::OpenDalSnafu)?
|
.context(error::OpenDalSnafu)?;
|
||||||
.into_futures_async_read(0..cached_value.content_length());
|
|
||||||
|
|
||||||
let mut writer = remote_store
|
let mut writer = remote_store
|
||||||
.writer_with(upload_path)
|
.writer_with(upload_path)
|
||||||
.chunk(DEFAULT_WRITE_BUFFER_SIZE.as_bytes() as usize)
|
.buffer(DEFAULT_WRITE_BUFFER_SIZE.as_bytes() as usize)
|
||||||
.concurrent(DEFAULT_WRITE_CONCURRENCY)
|
.concurrent(DEFAULT_WRITE_CONCURRENCY)
|
||||||
.await
|
.await
|
||||||
.context(error::OpenDalSnafu)?
|
.context(error::OpenDalSnafu)?;
|
||||||
.into_futures_async_write();
|
|
||||||
|
|
||||||
let bytes_written =
|
let bytes_written =
|
||||||
futures::io::copy(reader, &mut writer)
|
futures::io::copy(reader, &mut writer)
|
||||||
@@ -208,11 +199,7 @@ impl WriteCache {
|
|||||||
})?;
|
})?;
|
||||||
|
|
||||||
// Must close to upload all data.
|
// Must close to upload all data.
|
||||||
writer.close().await.context(error::UploadSnafu {
|
writer.close().await.context(error::OpenDalSnafu)?;
|
||||||
region_id,
|
|
||||||
file_id,
|
|
||||||
file_type,
|
|
||||||
})?;
|
|
||||||
|
|
||||||
UPLOAD_BYTES_TOTAL.inc_by(bytes_written);
|
UPLOAD_BYTES_TOTAL.inc_by(bytes_written);
|
||||||
|
|
||||||
@@ -328,7 +315,7 @@ mod tests {
|
|||||||
.read(&write_cache.file_cache.cache_file_path(key))
|
.read(&write_cache.file_cache.cache_file_path(key))
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(remote_data.to_vec(), cache_data.to_vec());
|
assert_eq!(remote_data, cache_data);
|
||||||
|
|
||||||
// Check write cache contains the index key
|
// Check write cache contains the index key
|
||||||
let index_key = IndexKey::new(region_id, file_id, FileType::Puffin);
|
let index_key = IndexKey::new(region_id, file_id, FileType::Puffin);
|
||||||
@@ -339,7 +326,7 @@ mod tests {
|
|||||||
.read(&write_cache.file_cache.cache_file_path(index_key))
|
.read(&write_cache.file_cache.cache_file_path(index_key))
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(remote_index_data.to_vec(), cache_index_data.to_vec());
|
assert_eq!(remote_index_data, cache_index_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
|
|||||||
@@ -331,6 +331,13 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Invalid wal read request, {}", reason))]
|
||||||
|
InvalidWalReadRequest {
|
||||||
|
reason: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to convert array to vector"))]
|
#[snafu(display("Failed to convert array to vector"))]
|
||||||
ConvertVector {
|
ConvertVector {
|
||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
@@ -787,7 +794,8 @@ impl ErrorExt for Error {
|
|||||||
| ConvertColumnDataType { .. }
|
| ConvertColumnDataType { .. }
|
||||||
| ColumnNotFound { .. }
|
| ColumnNotFound { .. }
|
||||||
| InvalidMetadata { .. }
|
| InvalidMetadata { .. }
|
||||||
| InvalidRegionOptions { .. } => StatusCode::InvalidArguments,
|
| InvalidRegionOptions { .. }
|
||||||
|
| InvalidWalReadRequest { .. } => StatusCode::InvalidArguments,
|
||||||
|
|
||||||
InvalidRegionRequestSchemaVersion { .. } => StatusCode::RequestOutdated,
|
InvalidRegionRequestSchemaVersion { .. } => StatusCode::RequestOutdated,
|
||||||
|
|
||||||
|
|||||||
@@ -497,7 +497,7 @@ impl ManifestObjectStore {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let checkpoint_metadata = CheckpointMetadata::decode(&last_checkpoint_data.to_vec())?;
|
let checkpoint_metadata = CheckpointMetadata::decode(&last_checkpoint_data)?;
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
"Load checkpoint in path: {}, metadata: {:?}",
|
"Load checkpoint in path: {}, metadata: {:?}",
|
||||||
@@ -509,11 +509,7 @@ impl ManifestObjectStore {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub async fn read_file(&self, path: &str) -> Result<Vec<u8>> {
|
pub async fn read_file(&self, path: &str) -> Result<Vec<u8>> {
|
||||||
self.object_store
|
self.object_store.read(path).await.context(OpenDalSnafu)
|
||||||
.read(path)
|
|
||||||
.await
|
|
||||||
.context(OpenDalSnafu)
|
|
||||||
.map(|v| v.to_vec())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|||||||
@@ -121,17 +121,9 @@ impl SstIndexApplier {
|
|||||||
return Ok(None);
|
return Ok(None);
|
||||||
};
|
};
|
||||||
|
|
||||||
let Some(indexed_value) = file_cache
|
|
||||||
.get(IndexKey::new(self.region_id, file_id, FileType::Puffin))
|
|
||||||
.await
|
|
||||||
else {
|
|
||||||
return Ok(None);
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(file_cache
|
Ok(file_cache
|
||||||
.reader(IndexKey::new(self.region_id, file_id, FileType::Puffin))
|
.reader(IndexKey::new(self.region_id, file_id, FileType::Puffin))
|
||||||
.await
|
.await
|
||||||
.map(|v| v.into_futures_async_read(0..indexed_value.file_size as u64))
|
|
||||||
.map(PuffinFileReader::new))
|
.map(PuffinFileReader::new))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -198,13 +190,7 @@ mod tests {
|
|||||||
let region_dir = "region_dir".to_string();
|
let region_dir = "region_dir".to_string();
|
||||||
let path = location::index_file_path(®ion_dir, file_id);
|
let path = location::index_file_path(®ion_dir, file_id);
|
||||||
|
|
||||||
let mut puffin_writer = PuffinFileWriter::new(
|
let mut puffin_writer = PuffinFileWriter::new(object_store.writer(&path).await.unwrap());
|
||||||
object_store
|
|
||||||
.writer(&path)
|
|
||||||
.await
|
|
||||||
.unwrap()
|
|
||||||
.into_futures_async_write(),
|
|
||||||
);
|
|
||||||
puffin_writer
|
puffin_writer
|
||||||
.add_blob(Blob {
|
.add_blob(Blob {
|
||||||
blob_type: INDEX_BLOB_TYPE.to_string(),
|
blob_type: INDEX_BLOB_TYPE.to_string(),
|
||||||
@@ -250,13 +236,7 @@ mod tests {
|
|||||||
let region_dir = "region_dir".to_string();
|
let region_dir = "region_dir".to_string();
|
||||||
let path = location::index_file_path(®ion_dir, file_id);
|
let path = location::index_file_path(®ion_dir, file_id);
|
||||||
|
|
||||||
let mut puffin_writer = PuffinFileWriter::new(
|
let mut puffin_writer = PuffinFileWriter::new(object_store.writer(&path).await.unwrap());
|
||||||
object_store
|
|
||||||
.writer(&path)
|
|
||||||
.await
|
|
||||||
.unwrap()
|
|
||||||
.into_futures_async_write(),
|
|
||||||
);
|
|
||||||
puffin_writer
|
puffin_writer
|
||||||
.add_blob(Blob {
|
.add_blob(Blob {
|
||||||
blob_type: "invalid_blob_type".to_string(),
|
blob_type: "invalid_blob_type".to_string(),
|
||||||
|
|||||||
@@ -26,8 +26,6 @@ use crate::error::{OpenDalSnafu, Result};
|
|||||||
|
|
||||||
/// A wrapper around [`ObjectStore`] that adds instrumentation for monitoring
|
/// A wrapper around [`ObjectStore`] that adds instrumentation for monitoring
|
||||||
/// metrics such as bytes read, bytes written, and the number of seek operations.
|
/// metrics such as bytes read, bytes written, and the number of seek operations.
|
||||||
///
|
|
||||||
/// TODO: Consider refactor InstrumentedStore to use async in trait instead of AsyncRead.
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub(crate) struct InstrumentedStore {
|
pub(crate) struct InstrumentedStore {
|
||||||
/// The underlying object store.
|
/// The underlying object store.
|
||||||
@@ -60,14 +58,8 @@ impl InstrumentedStore {
|
|||||||
read_byte_count: &'a IntCounter,
|
read_byte_count: &'a IntCounter,
|
||||||
read_count: &'a IntCounter,
|
read_count: &'a IntCounter,
|
||||||
seek_count: &'a IntCounter,
|
seek_count: &'a IntCounter,
|
||||||
) -> Result<InstrumentedAsyncRead<'a, object_store::FuturesAsyncReader>> {
|
) -> Result<InstrumentedAsyncRead<'a, object_store::Reader>> {
|
||||||
let meta = self.object_store.stat(path).await.context(OpenDalSnafu)?;
|
let reader = self.object_store.reader(path).await.context(OpenDalSnafu)?;
|
||||||
let reader = self
|
|
||||||
.object_store
|
|
||||||
.reader(path)
|
|
||||||
.await
|
|
||||||
.context(OpenDalSnafu)?
|
|
||||||
.into_futures_async_read(0..meta.content_length());
|
|
||||||
Ok(InstrumentedAsyncRead::new(
|
Ok(InstrumentedAsyncRead::new(
|
||||||
reader,
|
reader,
|
||||||
read_byte_count,
|
read_byte_count,
|
||||||
@@ -85,21 +77,15 @@ impl InstrumentedStore {
|
|||||||
write_byte_count: &'a IntCounter,
|
write_byte_count: &'a IntCounter,
|
||||||
write_count: &'a IntCounter,
|
write_count: &'a IntCounter,
|
||||||
flush_count: &'a IntCounter,
|
flush_count: &'a IntCounter,
|
||||||
) -> Result<InstrumentedAsyncWrite<'a, object_store::FuturesAsyncWriter>> {
|
) -> Result<InstrumentedAsyncWrite<'a, object_store::Writer>> {
|
||||||
let writer = match self.write_buffer_size {
|
let writer = match self.write_buffer_size {
|
||||||
Some(size) => self
|
Some(size) => self
|
||||||
.object_store
|
.object_store
|
||||||
.writer_with(path)
|
.writer_with(path)
|
||||||
.chunk(size)
|
.buffer(size)
|
||||||
.await
|
.await
|
||||||
.context(OpenDalSnafu)?
|
.context(OpenDalSnafu)?,
|
||||||
.into_futures_async_write(),
|
None => self.object_store.writer(path).await.context(OpenDalSnafu)?,
|
||||||
None => self
|
|
||||||
.object_store
|
|
||||||
.writer(path)
|
|
||||||
.await
|
|
||||||
.context(OpenDalSnafu)?
|
|
||||||
.into_futures_async_write(),
|
|
||||||
};
|
};
|
||||||
Ok(InstrumentedAsyncWrite::new(
|
Ok(InstrumentedAsyncWrite::new(
|
||||||
writer,
|
writer,
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user