mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-06 21:32:58 +00:00
Compare commits
31 Commits
v0.1.0-alp
...
v0.1.0-alp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0b3f955ca7 | ||
|
|
4b58a8a18d | ||
|
|
bd377ef329 | ||
|
|
df751c38b4 | ||
|
|
f6e871708a | ||
|
|
819c990a89 | ||
|
|
a8b4e8d933 | ||
|
|
710e2ed133 | ||
|
|
81eab74b90 | ||
|
|
8f67d8ca93 | ||
|
|
4cc3ac37d5 | ||
|
|
b48c851b96 | ||
|
|
fdd17c6eeb | ||
|
|
51641db39e | ||
|
|
98ef74bff4 | ||
|
|
f42acc90c2 | ||
|
|
2df8143ad5 | ||
|
|
fb2e0c7cf3 | ||
|
|
390e9095f6 | ||
|
|
bcd44b90c1 | ||
|
|
c6f2db8ae0 | ||
|
|
e17d5a1c41 | ||
|
|
23092a5208 | ||
|
|
4bbad6ab1e | ||
|
|
6833b405d9 | ||
|
|
aaaf24143d | ||
|
|
9161796dfa | ||
|
|
68b231987c | ||
|
|
6e9964ac97 | ||
|
|
6afd79cab8 | ||
|
|
4e88a01638 |
6
.github/workflows/develop.yml
vendored
6
.github/workflows/develop.yml
vendored
@@ -31,8 +31,8 @@ jobs:
|
||||
name: Spell Check with Typos
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: crate-ci/typos@v1.0.4
|
||||
- uses: actions/checkout@v3
|
||||
- uses: crate-ci/typos@v1.13.10
|
||||
|
||||
check:
|
||||
name: Check
|
||||
@@ -223,5 +223,5 @@ jobs:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./lcov.info
|
||||
flags: rust
|
||||
fail_ci_if_error: true
|
||||
fail_ci_if_error: false
|
||||
verbose: true
|
||||
|
||||
2
.github/workflows/license.yaml
vendored
2
.github/workflows/license.yaml
vendored
@@ -13,4 +13,4 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Check License Header
|
||||
uses: apache/skywalking-eyes/header@main
|
||||
uses: apache/skywalking-eyes/header@df70871af1a8109c9a5b1dc824faaf65246c5236
|
||||
|
||||
450
Cargo.lock
generated
450
Cargo.lock
generated
@@ -143,7 +143,7 @@ dependencies = [
|
||||
"common-time",
|
||||
"datatypes",
|
||||
"greptime-proto",
|
||||
"prost 0.11.6",
|
||||
"prost",
|
||||
"snafu",
|
||||
"tonic",
|
||||
"tonic-build",
|
||||
@@ -301,9 +301,9 @@ dependencies = [
|
||||
"bytes",
|
||||
"futures",
|
||||
"proc-macro2",
|
||||
"prost 0.11.6",
|
||||
"prost-build 0.11.3",
|
||||
"prost-derive 0.11.6",
|
||||
"prost",
|
||||
"prost-build",
|
||||
"prost-derive",
|
||||
"tokio",
|
||||
"tonic",
|
||||
"tonic-build",
|
||||
@@ -653,6 +653,18 @@ dependencies = [
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "backon"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f34fac4d7cdaefa2deded0eda2d5d59dbfd43370ff3f856209e72340ae84c294"
|
||||
dependencies = [
|
||||
"futures",
|
||||
"pin-project",
|
||||
"rand 0.8.5",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "backtrace"
|
||||
version = "0.3.67"
|
||||
@@ -729,25 +741,6 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bincode"
|
||||
version = "2.0.0-rc.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7bb50c5a2ef4b9b1e7ae73e3a73b52ea24b20312d629f9c4df28260b7ad2c3c4"
|
||||
dependencies = [
|
||||
"bincode_derive",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bincode_derive"
|
||||
version = "2.0.0-rc.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0a45a23389446d2dd25dc8e73a7a3b3c43522b630cac068927f0649d43d719d2"
|
||||
dependencies = [
|
||||
"virtue",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bindgen"
|
||||
version = "0.59.2"
|
||||
@@ -1318,12 +1311,11 @@ dependencies = [
|
||||
"enum_dispatch",
|
||||
"futures-util",
|
||||
"parking_lot",
|
||||
"prost 0.11.6",
|
||||
"prost 0.9.0",
|
||||
"prost",
|
||||
"rand 0.8.5",
|
||||
"snafu",
|
||||
"substrait 0.1.0",
|
||||
"substrait 0.2.0",
|
||||
"substrait 0.4.0",
|
||||
"tokio",
|
||||
"tonic",
|
||||
"tracing",
|
||||
@@ -1357,14 +1349,21 @@ dependencies = [
|
||||
"anymap",
|
||||
"build-data",
|
||||
"clap 3.2.23",
|
||||
"client",
|
||||
"common-base",
|
||||
"common-error",
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
"common-telemetry",
|
||||
"datanode",
|
||||
"either",
|
||||
"frontend",
|
||||
"futures",
|
||||
"meta-client",
|
||||
"meta-srv",
|
||||
"nu-ansi-term",
|
||||
"rexpect",
|
||||
"rustyline",
|
||||
"serde",
|
||||
"servers",
|
||||
"snafu",
|
||||
@@ -1394,6 +1393,12 @@ dependencies = [
|
||||
"unicode-width",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "comma"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
|
||||
|
||||
[[package]]
|
||||
name = "common-base"
|
||||
version = "0.1.0"
|
||||
@@ -1488,7 +1493,7 @@ dependencies = [
|
||||
"datatypes",
|
||||
"flatbuffers",
|
||||
"futures",
|
||||
"prost 0.11.6",
|
||||
"prost",
|
||||
"rand 0.8.5",
|
||||
"snafu",
|
||||
"tokio",
|
||||
@@ -1644,8 +1649,8 @@ version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e57ff02e8ad8e06ab9731d5dc72dc23bef9200778eae1a89d555d8c42e5d4a86"
|
||||
dependencies = [
|
||||
"prost 0.11.6",
|
||||
"prost-types 0.11.6",
|
||||
"prost",
|
||||
"prost-types",
|
||||
"tonic",
|
||||
"tracing-core",
|
||||
]
|
||||
@@ -1662,7 +1667,7 @@ dependencies = [
|
||||
"futures",
|
||||
"hdrhistogram",
|
||||
"humantime",
|
||||
"prost-types 0.11.6",
|
||||
"prost-types",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"thread_local",
|
||||
@@ -2172,7 +2177,7 @@ dependencies = [
|
||||
"axum",
|
||||
"axum-macros",
|
||||
"axum-test-helper",
|
||||
"backon",
|
||||
"backon 0.2.0",
|
||||
"catalog",
|
||||
"client",
|
||||
"common-base",
|
||||
@@ -2180,6 +2185,7 @@ dependencies = [
|
||||
"common-error",
|
||||
"common-grpc",
|
||||
"common-grpc-expr",
|
||||
"common-procedure",
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
"common-runtime",
|
||||
@@ -2199,7 +2205,7 @@ dependencies = [
|
||||
"mito",
|
||||
"object-store",
|
||||
"pin-project",
|
||||
"prost 0.11.6",
|
||||
"prost",
|
||||
"query",
|
||||
"script",
|
||||
"serde",
|
||||
@@ -2212,9 +2218,11 @@ dependencies = [
|
||||
"store-api",
|
||||
"substrait 0.1.0",
|
||||
"table",
|
||||
"table-procedure",
|
||||
"tempdir",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"toml",
|
||||
"tonic",
|
||||
"tower",
|
||||
"tower-http",
|
||||
@@ -2248,6 +2256,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de"
|
||||
dependencies = [
|
||||
"const-oid",
|
||||
"pem-rfc7468",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2336,6 +2346,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f"
|
||||
dependencies = [
|
||||
"block-buffer",
|
||||
"const-oid",
|
||||
"crypto-common",
|
||||
"subtle",
|
||||
]
|
||||
@@ -2538,7 +2549,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b1259da3b15ec7e54bd7203adb2c4335adb9ca1d47b56220d650e52c247e824a"
|
||||
dependencies = [
|
||||
"http",
|
||||
"prost 0.11.6",
|
||||
"prost",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tonic",
|
||||
@@ -2697,7 +2708,7 @@ dependencies = [
|
||||
"moka",
|
||||
"openmetrics-parser",
|
||||
"partition",
|
||||
"prost 0.11.6",
|
||||
"prost",
|
||||
"query",
|
||||
"rustls",
|
||||
"serde",
|
||||
@@ -2712,6 +2723,7 @@ dependencies = [
|
||||
"table",
|
||||
"tempdir",
|
||||
"tokio",
|
||||
"toml",
|
||||
"tonic",
|
||||
"tower",
|
||||
]
|
||||
@@ -2975,9 +2987,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=3e6349be127b65a8b42a38cda9d527ec423ca77d#3e6349be127b65a8b42a38cda9d527ec423ca77d"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=1599ae2a0d1d8f42ee23ed26e4ad7a7b34134c60#1599ae2a0d1d8f42ee23ed26e4ad7a7b34134c60"
|
||||
dependencies = [
|
||||
"prost 0.11.6",
|
||||
"prost",
|
||||
"tonic",
|
||||
"tonic-build",
|
||||
]
|
||||
@@ -3111,6 +3123,15 @@ dependencies = [
|
||||
"digest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "home"
|
||||
version = "0.5.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "747309b4b440c06d57b0b25f2aee03ee9b5e5397d288c60e21fc709bb98a7408"
|
||||
dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "http"
|
||||
version = "0.2.8"
|
||||
@@ -3464,6 +3485,9 @@ name = "lazy_static"
|
||||
version = "1.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
dependencies = [
|
||||
"spin",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lazycell"
|
||||
@@ -3662,7 +3686,7 @@ version = "0.12.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2f270b952b07995fe874b10a5ed7dd28c80aa2130e37a7de7ed667d034e0a521"
|
||||
dependencies = [
|
||||
"bincode 1.3.3",
|
||||
"bincode",
|
||||
"cactus",
|
||||
"cfgrammar",
|
||||
"filetime",
|
||||
@@ -3904,7 +3928,7 @@ dependencies = [
|
||||
"http-body",
|
||||
"lazy_static",
|
||||
"parking_lot",
|
||||
"prost 0.11.6",
|
||||
"prost",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -4027,6 +4051,7 @@ dependencies = [
|
||||
"chrono",
|
||||
"common-catalog",
|
||||
"common-error",
|
||||
"common-procedure",
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
"common-telemetry",
|
||||
@@ -4320,6 +4345,23 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-bigint-dig"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2399c9463abc5f909349d8aa9ba080e0b88b3ce2885389b60b993f39b1a56905"
|
||||
dependencies = [
|
||||
"byteorder",
|
||||
"lazy_static",
|
||||
"libm",
|
||||
"num-integer",
|
||||
"num-iter",
|
||||
"num-traits",
|
||||
"rand 0.8.5",
|
||||
"smallvec",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-complex"
|
||||
version = "0.4.3"
|
||||
@@ -4479,16 +4521,15 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
|
||||
|
||||
[[package]]
|
||||
name = "opendal"
|
||||
version = "0.25.1"
|
||||
version = "0.27.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "73829d3a057542556dc2c2d2b70700a44dda913cdb5483094c20ef9673ca283c"
|
||||
checksum = "ef6f7b936f2f8483e19643357cb50d9ec9a49c506971ef69ca676913cf5afd91"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-compat",
|
||||
"async-trait",
|
||||
"backon",
|
||||
"backon 0.4.0",
|
||||
"base64 0.21.0",
|
||||
"bincode 2.0.0-rc.2",
|
||||
"bytes",
|
||||
"flagset",
|
||||
"futures",
|
||||
@@ -4527,7 +4568,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "opensrv-mysql"
|
||||
version = "0.3.0"
|
||||
source = "git+https://github.com/datafuselabs/opensrv?rev=b44c9d1360da297b305abf33aecfa94888e1554c#b44c9d1360da297b305abf33aecfa94888e1554c"
|
||||
source = "git+https://github.com/sunng87/opensrv?branch=fix/buffer-overread#d5c24b25543ba48b69c3c4fe97f71e499819bd99"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"byteorder",
|
||||
@@ -4794,6 +4835,15 @@ dependencies = [
|
||||
"base64 0.13.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pem-rfc7468"
|
||||
version = "0.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "24d159833a9105500e0398934e205e0773f0b27529557134ecfc51c27646adac"
|
||||
dependencies = [
|
||||
"base64ct",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "percent-encoding"
|
||||
version = "2.2.0"
|
||||
@@ -4995,6 +5045,28 @@ version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "pkcs1"
|
||||
version = "0.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eff33bdbdfc54cc98a2eca766ebdec3e1b8fb7387523d5c9c9a2891da856f719"
|
||||
dependencies = [
|
||||
"der",
|
||||
"pkcs8",
|
||||
"spki",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pkcs8"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba"
|
||||
dependencies = [
|
||||
"der",
|
||||
"spki",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pkg-config"
|
||||
version = "0.3.26"
|
||||
@@ -5274,16 +5346,6 @@ dependencies = [
|
||||
"regex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prost"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"prost-derive 0.9.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prost"
|
||||
version = "0.11.6"
|
||||
@@ -5291,27 +5353,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "21dc42e00223fc37204bd4aa177e69420c604ca4a183209a8f9de30c6d934698"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"prost-derive 0.11.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prost-build"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "62941722fb675d463659e49c4f3fe1fe792ff24fe5bbaa9c08cd3b98a1c354f5"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"heck 0.3.3",
|
||||
"itertools",
|
||||
"lazy_static",
|
||||
"log",
|
||||
"multimap",
|
||||
"petgraph",
|
||||
"prost 0.9.0",
|
||||
"prost-types 0.9.0",
|
||||
"regex",
|
||||
"tempfile",
|
||||
"which",
|
||||
"prost-derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5328,27 +5370,14 @@ dependencies = [
|
||||
"multimap",
|
||||
"petgraph",
|
||||
"prettyplease",
|
||||
"prost 0.11.6",
|
||||
"prost-types 0.11.6",
|
||||
"prost",
|
||||
"prost-types",
|
||||
"regex",
|
||||
"syn",
|
||||
"tempfile",
|
||||
"which",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prost-derive"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"itertools",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prost-derive"
|
||||
version = "0.11.6"
|
||||
@@ -5362,16 +5391,6 @@ dependencies = [
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prost-types"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"prost 0.9.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prost-types"
|
||||
version = "0.11.6"
|
||||
@@ -5379,7 +5398,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a5e0526209433e96d83d750dd81a99118edbc55739e7e61a46764fd2ad537788"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"prost 0.11.6",
|
||||
"prost",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5742,6 +5761,15 @@ version = "0.6.28"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848"
|
||||
|
||||
[[package]]
|
||||
name = "regress"
|
||||
version = "0.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0a92ff21fe8026ce3f2627faaf43606f0b67b014dbc9ccf027181a804f75d92e"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "remove_dir_all"
|
||||
version = "0.5.3"
|
||||
@@ -5762,12 +5790,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "reqsign"
|
||||
version = "0.8.1"
|
||||
version = "0.8.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3f446438814fde3785305a59a85a6d1b361ce2c9d29e58dd87c9103a242c40b6"
|
||||
checksum = "ef4d5fefeaaa1e64f4aabb79da4ea68bf6d0e7935ad927728280d2a8e95735fc"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"backon",
|
||||
"backon 0.4.0",
|
||||
"base64 0.21.0",
|
||||
"bytes",
|
||||
"dirs",
|
||||
@@ -5780,6 +5808,8 @@ dependencies = [
|
||||
"once_cell",
|
||||
"percent-encoding",
|
||||
"quick-xml",
|
||||
"rand 0.8.5",
|
||||
"rsa",
|
||||
"rust-ini",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -5853,6 +5883,19 @@ dependencies = [
|
||||
"syn-ext",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rexpect"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "01ff60778f96fb5a48adbe421d21bf6578ed58c0872d712e7e08593c195adff8"
|
||||
dependencies = [
|
||||
"comma",
|
||||
"nix 0.25.1",
|
||||
"regex",
|
||||
"tempfile",
|
||||
"thiserror",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ring"
|
||||
version = "0.16.20"
|
||||
@@ -5904,6 +5947,27 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rsa"
|
||||
version = "0.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "89b3896c9b7790b70a9aa314a30e4ae114200992a19c96cbe0ca6070edd32ab8"
|
||||
dependencies = [
|
||||
"byteorder",
|
||||
"digest",
|
||||
"num-bigint-dig",
|
||||
"num-integer",
|
||||
"num-iter",
|
||||
"num-traits",
|
||||
"pkcs1",
|
||||
"pkcs8",
|
||||
"rand_core 0.6.4",
|
||||
"sha2",
|
||||
"signature",
|
||||
"subtle",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rust-ini"
|
||||
version = "0.18.0"
|
||||
@@ -5962,6 +6026,19 @@ dependencies = [
|
||||
"semver 1.0.16",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustfmt-wrapper"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ed729e3bee08ec2befd593c27e90ca9fdd25efdc83c94c3b82eaef16e4f7406e"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"tempfile",
|
||||
"thiserror",
|
||||
"toml",
|
||||
"toolchain_find",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustix"
|
||||
version = "0.36.7"
|
||||
@@ -6075,7 +6152,7 @@ name = "rustpython-compiler-core"
|
||||
version = "0.1.2"
|
||||
source = "git+https://github.com/discord9/RustPython?rev=2e126345#2e12634569d01674724490193eb9638f056e51ca"
|
||||
dependencies = [
|
||||
"bincode 1.3.3",
|
||||
"bincode",
|
||||
"bitflags",
|
||||
"bstr",
|
||||
"itertools",
|
||||
@@ -6657,6 +6734,17 @@ dependencies = [
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_tokenstream"
|
||||
version = "0.1.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "274f512d6748a01e67cbcde5b4307ab2c9d52a98a2b870a980ef0793a351deff"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"serde",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_urlencoded"
|
||||
version = "0.7.1"
|
||||
@@ -6669,6 +6757,19 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_yaml"
|
||||
version = "0.9.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8fb06d4b6cdaef0e0c51fa881acb721bed3c924cfaa71d9c94a3b771dfdf6567"
|
||||
dependencies = [
|
||||
"indexmap",
|
||||
"itoa 1.0.5",
|
||||
"ryu",
|
||||
"serde",
|
||||
"unsafe-libyaml",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "servers"
|
||||
version = "0.1.0"
|
||||
@@ -6684,6 +6785,7 @@ dependencies = [
|
||||
"bytes",
|
||||
"catalog",
|
||||
"chrono",
|
||||
"client",
|
||||
"common-base",
|
||||
"common-catalog",
|
||||
"common-error",
|
||||
@@ -6712,7 +6814,8 @@ dependencies = [
|
||||
"pgwire",
|
||||
"pin-project",
|
||||
"postgres-types",
|
||||
"prost 0.11.6",
|
||||
"promql-parser",
|
||||
"prost",
|
||||
"query",
|
||||
"rand 0.8.5",
|
||||
"regex",
|
||||
@@ -6823,6 +6926,10 @@ name = "signature"
|
||||
version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8fe458c98333f9c8152221191a77e2a44e8325d0193484af2e9421a53019e57d"
|
||||
dependencies = [
|
||||
"digest",
|
||||
"rand_core 0.6.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "simba"
|
||||
@@ -7100,7 +7207,7 @@ dependencies = [
|
||||
"parquet",
|
||||
"paste",
|
||||
"planus",
|
||||
"prost 0.11.6",
|
||||
"prost",
|
||||
"rand 0.8.5",
|
||||
"regex",
|
||||
"serde",
|
||||
@@ -7262,23 +7369,29 @@ dependencies = [
|
||||
"datafusion-expr",
|
||||
"datatypes",
|
||||
"futures",
|
||||
"prost 0.9.0",
|
||||
"prost",
|
||||
"snafu",
|
||||
"substrait 0.2.0",
|
||||
"substrait 0.4.0",
|
||||
"table",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "substrait"
|
||||
version = "0.2.0"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "46079e9004f5e069eae2976d4e23ea29c4e215b1096d3d53b76b19879f346100"
|
||||
checksum = "e2feb96a6a106e21161551af32dc4e0fdab3aceb926b940d7e92a086b640fc7c"
|
||||
dependencies = [
|
||||
"glob",
|
||||
"prost 0.9.0",
|
||||
"prost-build 0.9.0",
|
||||
"prost-types 0.9.0",
|
||||
"heck 0.4.0",
|
||||
"prost",
|
||||
"prost-build",
|
||||
"prost-types",
|
||||
"schemars",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_yaml",
|
||||
"typify",
|
||||
"walkdir",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -7341,8 +7454,10 @@ dependencies = [
|
||||
"anymap",
|
||||
"async-trait",
|
||||
"chrono",
|
||||
"common-base",
|
||||
"common-catalog",
|
||||
"common-error",
|
||||
"common-procedure",
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
"common-telemetry",
|
||||
@@ -7353,10 +7468,13 @@ dependencies = [
|
||||
"datatypes",
|
||||
"derive_builder 0.11.2",
|
||||
"futures",
|
||||
"humantime",
|
||||
"humantime-serde",
|
||||
"parquet",
|
||||
"parquet-format-async-temp",
|
||||
"paste",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"snafu",
|
||||
"store-api",
|
||||
"tempdir",
|
||||
@@ -7364,6 +7482,28 @@ dependencies = [
|
||||
"tokio-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "table-procedure"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"catalog",
|
||||
"common-error",
|
||||
"common-procedure",
|
||||
"common-telemetry",
|
||||
"datatypes",
|
||||
"log-store",
|
||||
"mito",
|
||||
"object-store",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"snafu",
|
||||
"storage",
|
||||
"table",
|
||||
"tempdir",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tagptr"
|
||||
version = "0.2.0"
|
||||
@@ -7812,8 +7952,8 @@ dependencies = [
|
||||
"hyper-timeout",
|
||||
"percent-encoding",
|
||||
"pin-project",
|
||||
"prost 0.11.6",
|
||||
"prost-derive 0.11.6",
|
||||
"prost",
|
||||
"prost-derive",
|
||||
"rustls-pemfile",
|
||||
"tokio",
|
||||
"tokio-rustls",
|
||||
@@ -7834,11 +7974,24 @@ checksum = "5bf5e9b9c0f7e0a7c027dcfaba7b2c60816c7049171f679d99ee2ff65d0de8c4"
|
||||
dependencies = [
|
||||
"prettyplease",
|
||||
"proc-macro2",
|
||||
"prost-build 0.11.3",
|
||||
"prost-build",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toolchain_find"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5e85654a10e7a07a47c6f19d93818f3f343e22927f2fa280c84f7c8042743413"
|
||||
dependencies = [
|
||||
"home",
|
||||
"lazy_static",
|
||||
"regex",
|
||||
"semver 0.11.0",
|
||||
"walkdir",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tower"
|
||||
version = "0.4.13"
|
||||
@@ -8058,6 +8211,51 @@ version = "1.16.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
|
||||
|
||||
[[package]]
|
||||
name = "typify"
|
||||
version = "0.0.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2e8486352f3c946e69f983558cfc09b295250b01e01b381ec67a05a812d01d63"
|
||||
dependencies = [
|
||||
"typify-impl",
|
||||
"typify-macro",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "typify-impl"
|
||||
version = "0.0.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a7624d0b911df6e2bbf34a236f76281f93b294cdde1d4df1dbdb748e5a7fefa5"
|
||||
dependencies = [
|
||||
"heck 0.4.0",
|
||||
"log",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"regress",
|
||||
"rustfmt-wrapper",
|
||||
"schemars",
|
||||
"serde_json",
|
||||
"syn",
|
||||
"thiserror",
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "typify-macro"
|
||||
version = "0.0.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0c42802aa033cee7650a4e1509ba7d5848a56f84be7c4b31e4385ee12445e942"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"schemars",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_tokenstream",
|
||||
"syn",
|
||||
"typify-impl",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ucd-trie"
|
||||
version = "0.1.5"
|
||||
@@ -8258,6 +8456,12 @@ version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "029df4cc8238cefc911704ff8fa210853a0f3bce2694d8f51181dd41ee0f3301"
|
||||
|
||||
[[package]]
|
||||
name = "unsafe-libyaml"
|
||||
version = "0.2.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bc7ed8ba44ca06be78ea1ad2c3682a43349126c8818054231ee6f4748012aed2"
|
||||
|
||||
[[package]]
|
||||
name = "untrusted"
|
||||
version = "0.7.1"
|
||||
@@ -8360,12 +8564,6 @@ version = "0.9.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
|
||||
|
||||
[[package]]
|
||||
name = "virtue"
|
||||
version = "0.0.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7b60dcd6a64dd45abf9bd426970c9843726da7fc08f44cd6fcebf68c21220a63"
|
||||
|
||||
[[package]]
|
||||
name = "vob"
|
||||
version = "3.0.2"
|
||||
@@ -8748,6 +8946,12 @@ dependencies = [
|
||||
"lzma-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zeroize"
|
||||
version = "1.5.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f"
|
||||
|
||||
[[package]]
|
||||
name = "zstd"
|
||||
version = "0.12.2+zstd.1.5.2"
|
||||
|
||||
@@ -37,6 +37,7 @@ members = [
|
||||
"src/storage",
|
||||
"src/store-api",
|
||||
"src/table",
|
||||
"src/table-procedure",
|
||||
"tests-integration",
|
||||
"tests/runner",
|
||||
]
|
||||
@@ -54,7 +55,7 @@ arrow-schema = { version = "29.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
# TODO(LFC): Use released Datafusion when it officially dpendent on Arrow 29.0
|
||||
# TODO(LFC): Use released Datafusion when it officially dependent on Arrow 29.0
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
|
||||
4
Makefile
4
Makefile
@@ -19,6 +19,10 @@ clean: ## Clean the project.
|
||||
fmt: ## Format all the Rust code.
|
||||
cargo fmt --all
|
||||
|
||||
.PHONY: fmt-toml
|
||||
fmt-toml: ## Format all TOML files.
|
||||
taplo format --check --option "indent_string= "
|
||||
|
||||
.PHONY: docker-image
|
||||
docker-image: ## Build docker image.
|
||||
docker build --network host -f docker/Dockerfile -t ${IMAGE_REGISTRY}:${IMAGE_TAG} .
|
||||
|
||||
@@ -27,7 +27,7 @@ use arrow::record_batch::RecordBatch;
|
||||
use clap::Parser;
|
||||
use client::api::v1::column::Values;
|
||||
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, TableId};
|
||||
use client::{Client, Database};
|
||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
||||
use tokio::task::JoinSet;
|
||||
@@ -422,7 +422,7 @@ fn main() {
|
||||
.unwrap()
|
||||
.block_on(async {
|
||||
let client = Client::with_urls(vec![&args.endpoint]);
|
||||
let db = Database::with_client(client);
|
||||
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
|
||||
if !args.skip_write {
|
||||
do_write(&args, &db).await;
|
||||
|
||||
@@ -8,3 +8,5 @@ coverage:
|
||||
ignore:
|
||||
- "**/error*.rs" # ignore all error.rs files
|
||||
- "tests/runner/*.rs" # ignore integration test runner
|
||||
comment: # this is a top-level key
|
||||
layout: "diff"
|
||||
|
||||
@@ -19,7 +19,7 @@ sync_write = false
|
||||
type = 'File'
|
||||
data_dir = '/tmp/greptimedb/data/'
|
||||
|
||||
[meta_client_opts]
|
||||
[meta_client_options]
|
||||
metasrv_addrs = ['127.0.0.1:3002']
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
@@ -29,3 +29,7 @@ tcp_nodelay = false
|
||||
max_inflight_tasks = 4
|
||||
max_files_in_level0 = 16
|
||||
max_purge_tasks = 32
|
||||
|
||||
[procedure.store]
|
||||
type = 'File'
|
||||
data_dir = '/tmp/greptimedb/procedure/'
|
||||
|
||||
@@ -5,7 +5,7 @@ datanode_rpc_addr = '127.0.0.1:3001'
|
||||
addr = '127.0.0.1:4000'
|
||||
timeout = "30s"
|
||||
|
||||
[meta_client_opts]
|
||||
[meta_client_options]
|
||||
metasrv_addrs = ['127.0.0.1:3002']
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
|
||||
@@ -14,7 +14,6 @@ purge_threshold = '50GB'
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
|
||||
|
||||
[storage]
|
||||
type = 'File'
|
||||
data_dir = '/tmp/greptimedb/data/'
|
||||
@@ -42,3 +41,7 @@ enable = true
|
||||
addr = '127.0.0.1:4003'
|
||||
runtime_size = 2
|
||||
check_pwd = false
|
||||
|
||||
[procedure.store]
|
||||
type = 'File'
|
||||
data_dir = '/tmp/greptimedb/procedure/'
|
||||
|
||||
@@ -149,10 +149,10 @@ inputs:
|
||||
- title: 'Series Normalize: \noffset = 0'
|
||||
operator: prom
|
||||
inputs:
|
||||
- title: 'Filter: \ntimetamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
|
||||
- title: 'Filter: \ntimestamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
|
||||
operator: filter
|
||||
inputs:
|
||||
- title: 'Table Scan: \ntable = request_duration, timetamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
|
||||
- title: 'Table Scan: \ntable = request_duration, timestamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
|
||||
operator: scan -->
|
||||
|
||||

|
||||
|
||||
@@ -10,7 +10,7 @@ common-base = { path = "../common/base" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-time = { path = "../common/time" }
|
||||
datatypes = { path = "../datatypes" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "3e6349be127b65a8b42a38cda9d527ec423ca77d" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "1599ae2a0d1d8f42ee23ed26e4ad7a7b34134c60" }
|
||||
prost.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
tonic.workspace = true
|
||||
|
||||
@@ -97,7 +97,9 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
TimestampType::Microsecond(_) => ColumnDataType::TimestampMicrosecond,
|
||||
TimestampType::Nanosecond(_) => ColumnDataType::TimestampNanosecond,
|
||||
},
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) => {
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_) => {
|
||||
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
|
||||
}
|
||||
});
|
||||
|
||||
@@ -24,10 +24,10 @@ use serde::{Deserialize, Serialize, Serializer};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::metadata::{RawTableInfo, TableId, TableVersion};
|
||||
|
||||
const CATALOG_KEY_PREFIX: &str = "__c";
|
||||
const SCHEMA_KEY_PREFIX: &str = "__s";
|
||||
const TABLE_GLOBAL_KEY_PREFIX: &str = "__tg";
|
||||
const TABLE_REGIONAL_KEY_PREFIX: &str = "__tr";
|
||||
pub const CATALOG_KEY_PREFIX: &str = "__c";
|
||||
pub const SCHEMA_KEY_PREFIX: &str = "__s";
|
||||
pub const TABLE_GLOBAL_KEY_PREFIX: &str = "__tg";
|
||||
pub const TABLE_REGIONAL_KEY_PREFIX: &str = "__tr";
|
||||
|
||||
const ALPHANUMERICS_NAME_PATTERN: &str = "[a-zA-Z_][a-zA-Z0-9_]*";
|
||||
|
||||
@@ -370,4 +370,10 @@ mod tests {
|
||||
let deserialized = TableGlobalValue::parse(serialized).unwrap();
|
||||
assert_eq!(value, deserialized);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_table_global_value_compatibility() {
|
||||
let s = r#"{"node_id":1,"regions_id_map":{"1":[0]},"table_info":{"ident":{"table_id":1098,"version":1},"name":"container_cpu_limit","desc":"Created on insertion","catalog_name":"greptime","schema_name":"dd","meta":{"schema":{"column_schemas":[{"name":"container_id","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"container_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"docker_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"host","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_tag","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"interval","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"runtime","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"short_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"type","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"dd_value","data_type":{"Float64":{}},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"ts","data_type":{"Timestamp":{"Millisecond":null}},"is_nullable":false,"is_time_index":true,"default_constraint":null,"metadata":{"greptime:time_index":"true"}},{"name":"git.repository_url","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}}],"timestamp_index":11,"version":1},"primary_key_indices":[0,1,2,3,4,5,6,7,8,9,12],"value_indices":[10,11],"engine":"mito","next_column_id":12,"region_numbers":[],"engine_options":{},"options":{},"created_on":"1970-01-01T00:00:00Z"},"table_type":"Base"}}"#;
|
||||
TableGlobalValue::parse(s).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,7 +32,9 @@ use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::requests::{CreateTableRequest, DeleteRequest, InsertRequest, OpenTableRequest};
|
||||
use table::requests::{
|
||||
CreateTableRequest, DeleteRequest, InsertRequest, OpenTableRequest, TableOptions,
|
||||
};
|
||||
use table::{Table, TableRef};
|
||||
|
||||
use crate::error::{
|
||||
@@ -109,7 +111,7 @@ impl SystemCatalogTable {
|
||||
region_numbers: vec![0],
|
||||
primary_key_indices: vec![ENTRY_TYPE_INDEX, KEY_INDEX],
|
||||
create_if_not_exists: true,
|
||||
table_options: HashMap::new(),
|
||||
table_options: TableOptions::default(),
|
||||
};
|
||||
|
||||
let table = engine
|
||||
@@ -397,7 +399,7 @@ mod tests {
|
||||
use log_store::NoopLogStore;
|
||||
use mito::config::EngineConfig;
|
||||
use mito::engine::MitoEngine;
|
||||
use object_store::ObjectStore;
|
||||
use object_store::{ObjectStore, ObjectStoreBuilder};
|
||||
use storage::compaction::noop::NoopCompactionScheduler;
|
||||
use storage::config::EngineConfig as StorageEngineConfig;
|
||||
use storage::EngineImpl;
|
||||
@@ -480,11 +482,11 @@ mod tests {
|
||||
pub async fn prepare_table_engine() -> (TempDir, TableEngineRef) {
|
||||
let dir = TempDir::new("system-table-test").unwrap();
|
||||
let store_dir = dir.path().to_string_lossy();
|
||||
let accessor = object_store::backend::fs::Builder::default()
|
||||
let accessor = object_store::services::Fs::default()
|
||||
.root(&store_dir)
|
||||
.build()
|
||||
.unwrap();
|
||||
let object_store = ObjectStore::new(accessor);
|
||||
let object_store = ObjectStore::new(accessor).finish();
|
||||
let noop_compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
|
||||
let table_engine = Arc::new(MitoEngine::new(
|
||||
EngineConfig::default(),
|
||||
|
||||
@@ -147,6 +147,7 @@ impl TableEngine for MockTableEngine {
|
||||
let table_id = TableId::from_str(
|
||||
request
|
||||
.table_options
|
||||
.extra_options
|
||||
.get("table_id")
|
||||
.unwrap_or(&default_table_id),
|
||||
)
|
||||
|
||||
@@ -32,12 +32,8 @@ substrait = { path = "../common/substrait" }
|
||||
tokio.workspace = true
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
|
||||
# TODO(ruihang): upgrade to 0.11 once substrait-rs supports it.
|
||||
[dev-dependencies.prost_09]
|
||||
package = "prost"
|
||||
version = "0.9"
|
||||
prost.workspace = true
|
||||
|
||||
[dev-dependencies.substrait_proto]
|
||||
package = "substrait"
|
||||
version = "0.2"
|
||||
version = "0.4"
|
||||
|
||||
@@ -14,11 +14,12 @@
|
||||
|
||||
use api::v1::{ColumnDataType, ColumnDef, CreateTableExpr, TableId};
|
||||
use client::{Client, Database};
|
||||
use prost_09::Message;
|
||||
use substrait_proto::protobuf::plan_rel::RelType as PlanRelType;
|
||||
use substrait_proto::protobuf::read_rel::{NamedTable, ReadType};
|
||||
use substrait_proto::protobuf::rel::RelType;
|
||||
use substrait_proto::protobuf::{PlanRel, ReadRel, Rel};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use prost::Message;
|
||||
use substrait_proto::proto::plan_rel::RelType as PlanRelType;
|
||||
use substrait_proto::proto::read_rel::{NamedTable, ReadType};
|
||||
use substrait_proto::proto::rel::RelType;
|
||||
use substrait_proto::proto::{PlanRel, ReadRel, Rel};
|
||||
use tracing::{event, Level};
|
||||
|
||||
fn main() {
|
||||
@@ -65,7 +66,7 @@ async fn run() {
|
||||
region_ids: vec![0],
|
||||
};
|
||||
|
||||
let db = Database::with_client(client);
|
||||
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
let result = db.create(create_table_expr).await.unwrap();
|
||||
event!(Level::INFO, "create table result: {:#?}", result);
|
||||
|
||||
@@ -88,12 +89,8 @@ fn mock_logical_plan() -> Vec<u8> {
|
||||
let read_type = ReadType::NamedTable(named_table);
|
||||
|
||||
let read_rel = ReadRel {
|
||||
common: None,
|
||||
base_schema: None,
|
||||
filter: None,
|
||||
projection: None,
|
||||
advanced_extension: None,
|
||||
read_type: Some(read_type),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut buf = vec![];
|
||||
|
||||
@@ -14,15 +14,15 @@
|
||||
|
||||
use std::str::FromStr;
|
||||
|
||||
use api::v1::auth_header::AuthScheme;
|
||||
use api::v1::ddl_request::Expr as DdlExpr;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::query_request::Query;
|
||||
use api::v1::{
|
||||
AlterExpr, CreateTableExpr, DdlRequest, DropTableExpr, GreptimeRequest, InsertRequest,
|
||||
QueryRequest, RequestHeader,
|
||||
AlterExpr, AuthHeader, CreateTableExpr, DdlRequest, DropTableExpr, GreptimeRequest,
|
||||
InsertRequest, QueryRequest, RequestHeader,
|
||||
};
|
||||
use arrow_flight::{FlightData, Ticket};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::prelude::*;
|
||||
use common_grpc::flight::{flight_messages_to_recordbatches, FlightDecoder, FlightMessage};
|
||||
use common_query::Output;
|
||||
@@ -42,6 +42,7 @@ pub struct Database {
|
||||
schema: String,
|
||||
|
||||
client: Client,
|
||||
ctx: FlightContext,
|
||||
}
|
||||
|
||||
impl Database {
|
||||
@@ -50,17 +51,24 @@ impl Database {
|
||||
catalog: catalog.into(),
|
||||
schema: schema.into(),
|
||||
client,
|
||||
ctx: FlightContext::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_client(client: Client) -> Self {
|
||||
Self::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client)
|
||||
pub fn set_catalog(&mut self, catalog: impl Into<String>) {
|
||||
self.catalog = catalog.into();
|
||||
}
|
||||
|
||||
pub fn set_schema(&mut self, schema: impl Into<String>) {
|
||||
self.schema = schema.into();
|
||||
}
|
||||
|
||||
pub fn set_auth(&mut self, auth: AuthScheme) {
|
||||
self.ctx.auth_header = Some(AuthHeader {
|
||||
auth_scheme: Some(auth),
|
||||
});
|
||||
}
|
||||
|
||||
pub async fn insert(&self, request: InsertRequest) -> Result<Output> {
|
||||
self.do_get(Request::Insert(request)).await
|
||||
}
|
||||
@@ -105,6 +113,7 @@ impl Database {
|
||||
header: Some(RequestHeader {
|
||||
catalog: self.catalog.clone(),
|
||||
schema: self.schema.clone(),
|
||||
authorization: self.ctx.auth_header.clone(),
|
||||
}),
|
||||
request: Some(request),
|
||||
};
|
||||
@@ -164,12 +173,18 @@ fn get_metadata_value(e: &tonic::Status, key: &str) -> Option<String> {
|
||||
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct FlightContext {
|
||||
auth_header: Option<AuthHeader>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
use api::v1::Column;
|
||||
use api::v1::auth_header::AuthScheme;
|
||||
use api::v1::{AuthHeader, Basic, Column};
|
||||
use common_grpc::select::{null_mask, values};
|
||||
use common_grpc_expr::column_to_vector;
|
||||
use datatypes::prelude::{Vector, VectorRef};
|
||||
@@ -179,6 +194,8 @@ mod tests {
|
||||
UInt32Vector, UInt64Vector, UInt8Vector,
|
||||
};
|
||||
|
||||
use crate::database::FlightContext;
|
||||
|
||||
#[test]
|
||||
fn test_column_to_vector() {
|
||||
let mut column = create_test_column(Arc::new(BooleanVector::from(vec![true])));
|
||||
@@ -262,4 +279,26 @@ mod tests {
|
||||
datatype: wrapper.datatype() as i32,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_flight_ctx() {
|
||||
let mut ctx = FlightContext::default();
|
||||
assert!(ctx.auth_header.is_none());
|
||||
|
||||
let basic = AuthScheme::Basic(Basic {
|
||||
username: "u".to_string(),
|
||||
password: "p".to_string(),
|
||||
});
|
||||
|
||||
ctx.auth_header = Some(AuthHeader {
|
||||
auth_scheme: Some(basic),
|
||||
});
|
||||
|
||||
assert!(matches!(
|
||||
ctx.auth_header,
|
||||
Some(AuthHeader {
|
||||
auth_scheme: Some(AuthScheme::Basic(_)),
|
||||
})
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ mod error;
|
||||
pub mod load_balance;
|
||||
|
||||
pub use api;
|
||||
pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
|
||||
pub use self::client::Client;
|
||||
pub use self::database::Database;
|
||||
|
||||
@@ -12,16 +12,22 @@ path = "src/bin/greptime.rs"
|
||||
[dependencies]
|
||||
anymap = "1.0.0-beta.2"
|
||||
clap = { version = "3.1", features = ["derive"] }
|
||||
client = { path = "../client" }
|
||||
common-base = { path = "../common/base" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-query = { path = "../common/query" }
|
||||
common-recordbatch = { path = "../common/recordbatch" }
|
||||
common-telemetry = { path = "../common/telemetry", features = [
|
||||
"deadlock_detection",
|
||||
] }
|
||||
datanode = { path = "../datanode" }
|
||||
either = "1.8"
|
||||
frontend = { path = "../frontend" }
|
||||
futures.workspace = true
|
||||
meta-client = { path = "../meta-client" }
|
||||
meta-srv = { path = "../meta-srv" }
|
||||
nu-ansi-term = "0.46"
|
||||
rustyline = "10.1"
|
||||
serde.workspace = true
|
||||
servers = { path = "../servers" }
|
||||
snafu.workspace = true
|
||||
@@ -29,6 +35,7 @@ tokio.workspace = true
|
||||
toml = "0.5"
|
||||
|
||||
[dev-dependencies]
|
||||
rexpect = "0.5"
|
||||
serde.workspace = true
|
||||
tempdir = "0.3"
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ use std::fmt;
|
||||
|
||||
use clap::Parser;
|
||||
use cmd::error::Result;
|
||||
use cmd::{datanode, frontend, metasrv, standalone};
|
||||
use cmd::{cli, datanode, frontend, metasrv, standalone};
|
||||
use common_telemetry::logging::{error, info};
|
||||
|
||||
#[derive(Parser)]
|
||||
@@ -46,6 +46,8 @@ enum SubCommand {
|
||||
Metasrv(metasrv::Command),
|
||||
#[clap(name = "standalone")]
|
||||
Standalone(standalone::Command),
|
||||
#[clap(name = "cli")]
|
||||
Cli(cli::Command),
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
@@ -55,6 +57,7 @@ impl SubCommand {
|
||||
SubCommand::Frontend(cmd) => cmd.run().await,
|
||||
SubCommand::Metasrv(cmd) => cmd.run().await,
|
||||
SubCommand::Standalone(cmd) => cmd.run().await,
|
||||
SubCommand::Cli(cmd) => cmd.run().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -66,6 +69,7 @@ impl fmt::Display for SubCommand {
|
||||
SubCommand::Frontend(..) => write!(f, "greptime-frontend"),
|
||||
SubCommand::Metasrv(..) => write!(f, "greptime-metasrv"),
|
||||
SubCommand::Standalone(..) => write!(f, "greptime-standalone"),
|
||||
SubCommand::Cli(_) => write!(f, "greptime-cli"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
62
src/cmd/src/cli.rs
Normal file
62
src/cmd/src/cli.rs
Normal file
@@ -0,0 +1,62 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod cmd;
|
||||
mod helper;
|
||||
mod repl;
|
||||
|
||||
use clap::Parser;
|
||||
use repl::Repl;
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
#[derive(Parser)]
|
||||
pub struct Command {
|
||||
#[clap(subcommand)]
|
||||
cmd: SubCommand,
|
||||
}
|
||||
|
||||
impl Command {
|
||||
pub async fn run(self) -> Result<()> {
|
||||
self.cmd.run().await
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
enum SubCommand {
|
||||
Attach(AttachCommand),
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
match self {
|
||||
SubCommand::Attach(cmd) => cmd.run().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
pub(crate) struct AttachCommand {
|
||||
#[clap(long)]
|
||||
pub(crate) grpc_addr: String,
|
||||
#[clap(long, action)]
|
||||
pub(crate) disable_helper: bool,
|
||||
}
|
||||
|
||||
impl AttachCommand {
|
||||
async fn run(self) -> Result<()> {
|
||||
let mut repl = Repl::try_new(&self)?;
|
||||
repl.run().await
|
||||
}
|
||||
}
|
||||
154
src/cmd/src/cli/cmd.rs
Normal file
154
src/cmd/src/cli/cmd.rs
Normal file
@@ -0,0 +1,154 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::{Error, InvalidReplCommandSnafu, Result};
|
||||
|
||||
/// Represents the parsed command from the user (which may be over many lines)
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub(crate) enum ReplCommand {
|
||||
Help,
|
||||
UseDatabase { db_name: String },
|
||||
Sql { sql: String },
|
||||
Exit,
|
||||
}
|
||||
|
||||
impl TryFrom<&str> for ReplCommand {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(input: &str) -> Result<Self> {
|
||||
let input = input.trim();
|
||||
if input.is_empty() {
|
||||
return InvalidReplCommandSnafu {
|
||||
reason: "No command specified".to_string(),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
// If line ends with ';', it must be treated as a complete input.
|
||||
// However, the opposite is not true.
|
||||
let input_is_completed = input.ends_with(';');
|
||||
|
||||
let input = input.strip_suffix(';').map(|x| x.trim()).unwrap_or(input);
|
||||
let lowercase = input.to_lowercase();
|
||||
match lowercase.as_str() {
|
||||
"help" => Ok(Self::Help),
|
||||
"exit" | "quit" => Ok(Self::Exit),
|
||||
_ => match input.split_once(' ') {
|
||||
Some((maybe_use, database)) if maybe_use.to_lowercase() == "use" => {
|
||||
Ok(Self::UseDatabase {
|
||||
db_name: database.trim().to_string(),
|
||||
})
|
||||
}
|
||||
// Any valid SQL must contains at least one whitespace.
|
||||
Some(_) if input_is_completed => Ok(Self::Sql {
|
||||
sql: input.to_string(),
|
||||
}),
|
||||
_ => InvalidReplCommandSnafu {
|
||||
reason: format!("unknown command '{input}', maybe input is not completed"),
|
||||
}
|
||||
.fail(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ReplCommand {
|
||||
pub fn help() -> &'static str {
|
||||
r#"
|
||||
Available commands (case insensitive):
|
||||
- 'help': print this help
|
||||
- 'exit' or 'quit': exit the REPL
|
||||
- 'use <your database name>': switch to another database/schema context
|
||||
- Other typed in text will be treated as SQL.
|
||||
You can enter new line while typing, just remember to end it with ';'.
|
||||
"#
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::error::Error::InvalidReplCommand;
|
||||
|
||||
#[test]
|
||||
fn test_from_str() {
|
||||
fn test_ok(s: &str, expected: ReplCommand) {
|
||||
let actual: ReplCommand = s.try_into().unwrap();
|
||||
assert_eq!(expected, actual, "'{}'", s);
|
||||
}
|
||||
|
||||
fn test_err(s: &str) {
|
||||
let result: Result<ReplCommand> = s.try_into();
|
||||
assert!(matches!(result, Err(InvalidReplCommand { .. })))
|
||||
}
|
||||
|
||||
test_err("");
|
||||
test_err(" ");
|
||||
test_err("\t");
|
||||
|
||||
test_ok("help", ReplCommand::Help);
|
||||
test_ok("help", ReplCommand::Help);
|
||||
test_ok(" help", ReplCommand::Help);
|
||||
test_ok(" help ", ReplCommand::Help);
|
||||
test_ok(" HELP ", ReplCommand::Help);
|
||||
test_ok(" Help; ", ReplCommand::Help);
|
||||
test_ok(" help ; ", ReplCommand::Help);
|
||||
|
||||
test_ok("exit", ReplCommand::Exit);
|
||||
test_ok("exit;", ReplCommand::Exit);
|
||||
test_ok("exit ;", ReplCommand::Exit);
|
||||
test_ok("EXIT", ReplCommand::Exit);
|
||||
|
||||
test_ok("quit", ReplCommand::Exit);
|
||||
test_ok("quit;", ReplCommand::Exit);
|
||||
test_ok("quit ;", ReplCommand::Exit);
|
||||
test_ok("QUIT", ReplCommand::Exit);
|
||||
|
||||
test_ok(
|
||||
"use Foo",
|
||||
ReplCommand::UseDatabase {
|
||||
db_name: "Foo".to_string(),
|
||||
},
|
||||
);
|
||||
test_ok(
|
||||
" use Foo ; ",
|
||||
ReplCommand::UseDatabase {
|
||||
db_name: "Foo".to_string(),
|
||||
},
|
||||
);
|
||||
// ensure that database name is case sensitive
|
||||
test_ok(
|
||||
" use FOO ; ",
|
||||
ReplCommand::UseDatabase {
|
||||
db_name: "FOO".to_string(),
|
||||
},
|
||||
);
|
||||
|
||||
// ensure that we aren't messing with capitalization
|
||||
test_ok(
|
||||
"SELECT * from foo;",
|
||||
ReplCommand::Sql {
|
||||
sql: "SELECT * from foo".to_string(),
|
||||
},
|
||||
);
|
||||
// Input line (that don't belong to any other cases above) must ends with ';' to make it a valid SQL.
|
||||
test_err("insert blah");
|
||||
test_ok(
|
||||
"insert blah;",
|
||||
ReplCommand::Sql {
|
||||
sql: "insert blah".to_string(),
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
112
src/cmd/src/cli/helper.rs
Normal file
112
src/cmd/src/cli/helper.rs
Normal file
@@ -0,0 +1,112 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
|
||||
use rustyline::completion::Completer;
|
||||
use rustyline::highlight::{Highlighter, MatchingBracketHighlighter};
|
||||
use rustyline::hint::{Hinter, HistoryHinter};
|
||||
use rustyline::validate::{ValidationContext, ValidationResult, Validator};
|
||||
|
||||
use crate::cli::cmd::ReplCommand;
|
||||
|
||||
pub(crate) struct RustylineHelper {
|
||||
hinter: HistoryHinter,
|
||||
highlighter: MatchingBracketHighlighter,
|
||||
}
|
||||
|
||||
impl Default for RustylineHelper {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
hinter: HistoryHinter {},
|
||||
highlighter: MatchingBracketHighlighter::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl rustyline::Helper for RustylineHelper {}
|
||||
|
||||
impl Validator for RustylineHelper {
|
||||
fn validate(&self, ctx: &mut ValidationContext<'_>) -> rustyline::Result<ValidationResult> {
|
||||
let input = ctx.input();
|
||||
match ReplCommand::try_from(input) {
|
||||
Ok(_) => Ok(ValidationResult::Valid(None)),
|
||||
Err(e) => {
|
||||
if input.trim_end().ends_with(';') {
|
||||
// If line ends with ';', it HAS to be a valid command.
|
||||
Ok(ValidationResult::Invalid(Some(e.to_string())))
|
||||
} else {
|
||||
Ok(ValidationResult::Incomplete)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Hinter for RustylineHelper {
|
||||
type Hint = String;
|
||||
|
||||
fn hint(&self, line: &str, pos: usize, ctx: &rustyline::Context<'_>) -> Option<Self::Hint> {
|
||||
self.hinter.hint(line, pos, ctx)
|
||||
}
|
||||
}
|
||||
|
||||
impl Highlighter for RustylineHelper {
|
||||
fn highlight<'l>(&self, line: &'l str, pos: usize) -> Cow<'l, str> {
|
||||
self.highlighter.highlight(line, pos)
|
||||
}
|
||||
|
||||
fn highlight_prompt<'b, 's: 'b, 'p: 'b>(
|
||||
&'s self,
|
||||
prompt: &'p str,
|
||||
default: bool,
|
||||
) -> Cow<'b, str> {
|
||||
self.highlighter.highlight_prompt(prompt, default)
|
||||
}
|
||||
|
||||
fn highlight_hint<'h>(&self, hint: &'h str) -> Cow<'h, str> {
|
||||
use nu_ansi_term::Style;
|
||||
Cow::Owned(Style::new().dimmed().paint(hint).to_string())
|
||||
}
|
||||
|
||||
fn highlight_candidate<'c>(
|
||||
&self,
|
||||
candidate: &'c str,
|
||||
completion: rustyline::CompletionType,
|
||||
) -> Cow<'c, str> {
|
||||
self.highlighter.highlight_candidate(candidate, completion)
|
||||
}
|
||||
|
||||
fn highlight_char(&self, line: &str, pos: usize) -> bool {
|
||||
self.highlighter.highlight_char(line, pos)
|
||||
}
|
||||
}
|
||||
|
||||
impl Completer for RustylineHelper {
|
||||
type Candidate = String;
|
||||
|
||||
fn complete(
|
||||
&self,
|
||||
line: &str,
|
||||
pos: usize,
|
||||
ctx: &rustyline::Context<'_>,
|
||||
) -> rustyline::Result<(usize, Vec<Self::Candidate>)> {
|
||||
// If there is a hint, use that as the auto-complete when user hits `tab`
|
||||
if let Some(hint) = self.hinter.hint(line, pos, ctx) {
|
||||
Ok((pos, vec![hint]))
|
||||
} else {
|
||||
Ok((0, vec![]))
|
||||
}
|
||||
}
|
||||
}
|
||||
199
src/cmd/src/cli/repl.rs
Normal file
199
src/cmd/src/cli/repl.rs
Normal file
@@ -0,0 +1,199 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::time::Instant;
|
||||
|
||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::prelude::ErrorExt;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use common_telemetry::logging;
|
||||
use either::Either;
|
||||
use rustyline::error::ReadlineError;
|
||||
use rustyline::Editor;
|
||||
use snafu::{ErrorCompat, ResultExt};
|
||||
|
||||
use crate::cli::cmd::ReplCommand;
|
||||
use crate::cli::helper::RustylineHelper;
|
||||
use crate::cli::AttachCommand;
|
||||
use crate::error::{
|
||||
CollectRecordBatchesSnafu, PrettyPrintRecordBatchesSnafu, ReadlineSnafu, ReplCreationSnafu,
|
||||
RequestDatabaseSnafu, Result,
|
||||
};
|
||||
|
||||
/// Captures the state of the repl, gathers commands and executes them one by one
|
||||
pub(crate) struct Repl {
|
||||
/// Rustyline editor for interacting with user on command line
|
||||
rl: Editor<RustylineHelper>,
|
||||
|
||||
/// Current prompt
|
||||
prompt: String,
|
||||
|
||||
/// Client for interacting with GreptimeDB
|
||||
database: Database,
|
||||
}
|
||||
|
||||
#[allow(clippy::print_stdout)]
|
||||
impl Repl {
|
||||
fn print_help(&self) {
|
||||
println!("{}", ReplCommand::help())
|
||||
}
|
||||
|
||||
pub(crate) fn try_new(cmd: &AttachCommand) -> Result<Self> {
|
||||
let mut rl = Editor::new().context(ReplCreationSnafu)?;
|
||||
|
||||
if !cmd.disable_helper {
|
||||
rl.set_helper(Some(RustylineHelper::default()));
|
||||
|
||||
let history_file = history_file();
|
||||
if let Err(e) = rl.load_history(&history_file) {
|
||||
logging::debug!(
|
||||
"failed to load history file on {}, error: {e}",
|
||||
history_file.display()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let client = Client::with_urls([&cmd.grpc_addr]);
|
||||
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
|
||||
Ok(Self {
|
||||
rl,
|
||||
prompt: "> ".to_string(),
|
||||
database,
|
||||
})
|
||||
}
|
||||
|
||||
/// Parse the next command
|
||||
fn next_command(&mut self) -> Result<ReplCommand> {
|
||||
match self.rl.readline(&self.prompt) {
|
||||
Ok(ref line) => {
|
||||
let request = line.trim();
|
||||
|
||||
self.rl.add_history_entry(request.to_string());
|
||||
|
||||
request.try_into()
|
||||
}
|
||||
Err(ReadlineError::Eof) | Err(ReadlineError::Interrupted) => Ok(ReplCommand::Exit),
|
||||
// Some sort of real underlying error
|
||||
Err(e) => Err(e).context(ReadlineSnafu),
|
||||
}
|
||||
}
|
||||
|
||||
/// Read Evaluate Print Loop (interactive command line) for GreptimeDB
|
||||
///
|
||||
/// Inspired / based on repl.rs from InfluxDB IOX
|
||||
pub(crate) async fn run(&mut self) -> Result<()> {
|
||||
println!("Ready for commands. (Hint: try 'help')");
|
||||
|
||||
loop {
|
||||
match self.next_command()? {
|
||||
ReplCommand::Help => {
|
||||
self.print_help();
|
||||
}
|
||||
ReplCommand::UseDatabase { db_name } => {
|
||||
if self.execute_sql(format!("USE {db_name}")).await {
|
||||
println!("Using {db_name}");
|
||||
self.database.set_schema(&db_name);
|
||||
self.prompt = format!("[{db_name}] > ");
|
||||
}
|
||||
}
|
||||
ReplCommand::Sql { sql } => {
|
||||
self.execute_sql(sql).await;
|
||||
}
|
||||
ReplCommand::Exit => {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn execute_sql(&self, sql: String) -> bool {
|
||||
self.do_execute_sql(sql)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let status_code = e.status_code();
|
||||
let root_cause = e.iter_chain().last().unwrap();
|
||||
println!("Error: {}({status_code}), {root_cause}", status_code as u32)
|
||||
})
|
||||
.is_ok()
|
||||
}
|
||||
|
||||
async fn do_execute_sql(&self, sql: String) -> Result<()> {
|
||||
let start = Instant::now();
|
||||
|
||||
let output = self
|
||||
.database
|
||||
.sql(&sql)
|
||||
.await
|
||||
.context(RequestDatabaseSnafu { sql: &sql })?;
|
||||
|
||||
let either = match output {
|
||||
Output::Stream(s) => {
|
||||
let x = RecordBatches::try_collect(s)
|
||||
.await
|
||||
.context(CollectRecordBatchesSnafu)?;
|
||||
Either::Left(x)
|
||||
}
|
||||
Output::RecordBatches(x) => Either::Left(x),
|
||||
Output::AffectedRows(rows) => Either::Right(rows),
|
||||
};
|
||||
|
||||
let end = Instant::now();
|
||||
|
||||
match either {
|
||||
Either::Left(recordbatches) => {
|
||||
let total_rows: usize = recordbatches.iter().map(|x| x.num_rows()).sum();
|
||||
if total_rows > 0 {
|
||||
println!(
|
||||
"{}",
|
||||
recordbatches
|
||||
.pretty_print()
|
||||
.context(PrettyPrintRecordBatchesSnafu)?
|
||||
);
|
||||
}
|
||||
println!("Total Rows: {total_rows}")
|
||||
}
|
||||
Either::Right(rows) => println!("Affected Rows: {rows}"),
|
||||
};
|
||||
|
||||
println!("Cost {} ms", (end - start).as_millis());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Repl {
|
||||
fn drop(&mut self) {
|
||||
if self.rl.helper().is_some() {
|
||||
let history_file = history_file();
|
||||
if let Err(e) = self.rl.save_history(&history_file) {
|
||||
logging::debug!(
|
||||
"failed to save history file on {}, error: {e}",
|
||||
history_file.display()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the location of the history file (defaults to $HOME/".greptimedb_cli_history")
|
||||
fn history_file() -> PathBuf {
|
||||
let mut buf = match std::env::var("HOME") {
|
||||
Ok(home) => PathBuf::from(home),
|
||||
Err(_) => PathBuf::new(),
|
||||
};
|
||||
buf.push(".greptimedb_cli_history");
|
||||
buf
|
||||
}
|
||||
@@ -14,8 +14,10 @@
|
||||
|
||||
use clap::Parser;
|
||||
use common_telemetry::logging;
|
||||
use datanode::datanode::{Datanode, DatanodeOptions, FileConfig, ObjectStoreConfig};
|
||||
use meta_client::MetaClientOpts;
|
||||
use datanode::datanode::{
|
||||
Datanode, DatanodeOptions, FileConfig, ObjectStoreConfig, ProcedureConfig,
|
||||
};
|
||||
use meta_client::MetaClientOptions;
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
@@ -65,6 +67,8 @@ struct StartCommand {
|
||||
data_dir: Option<String>,
|
||||
#[clap(long)]
|
||||
wal_dir: Option<String>,
|
||||
#[clap(long)]
|
||||
procedure_dir: Option<String>,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
@@ -110,8 +114,8 @@ impl TryFrom<StartCommand> for DatanodeOptions {
|
||||
}
|
||||
|
||||
if let Some(meta_addr) = cmd.metasrv_addr {
|
||||
opts.meta_client_opts
|
||||
.get_or_insert_with(MetaClientOpts::default)
|
||||
opts.meta_client_options
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs = meta_addr
|
||||
.split(',')
|
||||
.map(&str::trim)
|
||||
@@ -134,6 +138,11 @@ impl TryFrom<StartCommand> for DatanodeOptions {
|
||||
if let Some(wal_dir) = cmd.wal_dir {
|
||||
opts.wal.dir = wal_dir;
|
||||
}
|
||||
|
||||
if let Some(procedure_dir) = cmd.procedure_dir {
|
||||
opts.procedure = Some(ProcedureConfig::from_file_path(procedure_dir));
|
||||
}
|
||||
|
||||
Ok(opts)
|
||||
}
|
||||
}
|
||||
@@ -162,12 +171,12 @@ mod tests {
|
||||
assert_eq!("/tmp/greptimedb/wal".to_string(), options.wal.dir);
|
||||
assert_eq!("127.0.0.1:4406".to_string(), options.mysql_addr);
|
||||
assert_eq!(4, options.mysql_runtime_size);
|
||||
let MetaClientOpts {
|
||||
let MetaClientOptions {
|
||||
metasrv_addrs: metasrv_addr,
|
||||
timeout_millis,
|
||||
connect_timeout_millis,
|
||||
tcp_nodelay,
|
||||
} = options.meta_client_opts.unwrap();
|
||||
} = options.meta_client_options.unwrap();
|
||||
|
||||
assert_eq!(vec!["127.0.0.1:3002".to_string()], metasrv_addr);
|
||||
assert_eq!(5000, connect_timeout_millis);
|
||||
@@ -240,12 +249,12 @@ mod tests {
|
||||
assert_eq!(1024 * 1024 * 1024 * 50, dn_opts.wal.purge_threshold.0);
|
||||
assert!(!dn_opts.wal.sync_write);
|
||||
assert_eq!(Some(42), dn_opts.node_id);
|
||||
let MetaClientOpts {
|
||||
let MetaClientOptions {
|
||||
metasrv_addrs: metasrv_addr,
|
||||
timeout_millis,
|
||||
connect_timeout_millis,
|
||||
tcp_nodelay,
|
||||
} = dn_opts.meta_client_opts.unwrap();
|
||||
} = dn_opts.meta_client_options.unwrap();
|
||||
assert_eq!(vec!["127.0.0.1:3002".to_string()], metasrv_addr);
|
||||
assert_eq!(3000, timeout_millis);
|
||||
assert_eq!(5000, connect_timeout_millis);
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use rustyline::error::ReadlineError;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
@@ -68,6 +69,40 @@ pub enum Error {
|
||||
#[snafu(backtrace)]
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid REPL command: {reason}"))]
|
||||
InvalidReplCommand { reason: String },
|
||||
|
||||
#[snafu(display("Cannot create REPL: {}", source))]
|
||||
ReplCreation {
|
||||
source: ReadlineError,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Error reading command: {}", source))]
|
||||
Readline {
|
||||
source: ReadlineError,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to request database, sql: {sql}, source: {source}"))]
|
||||
RequestDatabase {
|
||||
sql: String,
|
||||
#[snafu(backtrace)]
|
||||
source: client::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to collect RecordBatches, source: {source}"))]
|
||||
CollectRecordBatches {
|
||||
#[snafu(backtrace)]
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to pretty print Recordbatches, source: {source}"))]
|
||||
PrettyPrintRecordBatches {
|
||||
#[snafu(backtrace)]
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -82,8 +117,15 @@ impl ErrorExt for Error {
|
||||
Error::ReadConfig { .. } | Error::ParseConfig { .. } | Error::MissingConfig { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::IllegalConfig { .. } => StatusCode::InvalidArguments,
|
||||
Error::IllegalConfig { .. } | Error::InvalidReplCommand { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::IllegalAuthConfig { .. } => StatusCode::InvalidArguments,
|
||||
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
|
||||
Error::RequestDatabase { source, .. } => source.status_code(),
|
||||
Error::CollectRecordBatches { source } | Error::PrettyPrintRecordBatches { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ use frontend::instance::Instance;
|
||||
use frontend::mysql::MysqlOptions;
|
||||
use frontend::opentsdb::OpentsdbOptions;
|
||||
use frontend::postgres::PostgresOptions;
|
||||
use meta_client::MetaClientOpts;
|
||||
use meta_client::MetaClientOptions;
|
||||
use servers::auth::UserProviderRef;
|
||||
use servers::http::HttpOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
@@ -158,8 +158,8 @@ impl TryFrom<StartCommand> for FrontendOptions {
|
||||
opts.influxdb_options = Some(InfluxdbOptions { enable });
|
||||
}
|
||||
if let Some(metasrv_addr) = cmd.metasrv_addr {
|
||||
opts.meta_client_opts
|
||||
.get_or_insert_with(MetaClientOpts::default)
|
||||
opts.meta_client_options
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs = metasrv_addr
|
||||
.split(',')
|
||||
.map(&str::trim)
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
#![feature(assert_matches)]
|
||||
|
||||
pub mod cli;
|
||||
pub mod datanode;
|
||||
pub mod error;
|
||||
pub mod frontend;
|
||||
|
||||
@@ -17,7 +17,9 @@ use std::sync::Arc;
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_telemetry::info;
|
||||
use datanode::datanode::{Datanode, DatanodeOptions, ObjectStoreConfig, WalConfig};
|
||||
use datanode::datanode::{
|
||||
CompactionConfig, Datanode, DatanodeOptions, ObjectStoreConfig, ProcedureConfig, WalConfig,
|
||||
};
|
||||
use datanode::instance::InstanceRef;
|
||||
use frontend::frontend::{Frontend, FrontendOptions};
|
||||
use frontend::grpc::GrpcOptions;
|
||||
@@ -66,6 +68,8 @@ impl SubCommand {
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct StandaloneOptions {
|
||||
pub mode: Mode,
|
||||
pub enable_memory_catalog: bool,
|
||||
pub http_options: Option<HttpOptions>,
|
||||
pub grpc_options: Option<GrpcOptions>,
|
||||
pub mysql_options: Option<MysqlOptions>,
|
||||
@@ -74,15 +78,17 @@ pub struct StandaloneOptions {
|
||||
pub influxdb_options: Option<InfluxdbOptions>,
|
||||
pub prometheus_options: Option<PrometheusOptions>,
|
||||
pub promql_options: Option<PromqlOptions>,
|
||||
pub mode: Mode,
|
||||
pub wal: WalConfig,
|
||||
pub storage: ObjectStoreConfig,
|
||||
pub enable_memory_catalog: bool,
|
||||
pub compaction: CompactionConfig,
|
||||
pub procedure: Option<ProcedureConfig>,
|
||||
}
|
||||
|
||||
impl Default for StandaloneOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
mode: Mode::Standalone,
|
||||
enable_memory_catalog: false,
|
||||
http_options: Some(HttpOptions::default()),
|
||||
grpc_options: Some(GrpcOptions::default()),
|
||||
mysql_options: Some(MysqlOptions::default()),
|
||||
@@ -91,10 +97,10 @@ impl Default for StandaloneOptions {
|
||||
influxdb_options: Some(InfluxdbOptions::default()),
|
||||
prometheus_options: Some(PrometheusOptions::default()),
|
||||
promql_options: Some(PromqlOptions::default()),
|
||||
mode: Mode::Standalone,
|
||||
wal: WalConfig::default(),
|
||||
storage: ObjectStoreConfig::default(),
|
||||
enable_memory_catalog: false,
|
||||
compaction: CompactionConfig::default(),
|
||||
procedure: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -102,6 +108,7 @@ impl Default for StandaloneOptions {
|
||||
impl StandaloneOptions {
|
||||
fn frontend_options(self) -> FrontendOptions {
|
||||
FrontendOptions {
|
||||
mode: self.mode,
|
||||
http_options: self.http_options,
|
||||
grpc_options: self.grpc_options,
|
||||
mysql_options: self.mysql_options,
|
||||
@@ -110,16 +117,17 @@ impl StandaloneOptions {
|
||||
influxdb_options: self.influxdb_options,
|
||||
prometheus_options: self.prometheus_options,
|
||||
promql_options: self.promql_options,
|
||||
mode: self.mode,
|
||||
meta_client_opts: None,
|
||||
meta_client_options: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn datanode_options(self) -> DatanodeOptions {
|
||||
DatanodeOptions {
|
||||
enable_memory_catalog: self.enable_memory_catalog,
|
||||
wal: self.wal,
|
||||
storage: self.storage,
|
||||
enable_memory_catalog: self.enable_memory_catalog,
|
||||
compaction: self.compaction,
|
||||
procedure: self.procedure,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
@@ -362,4 +370,11 @@ mod tests {
|
||||
.await;
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_toml() {
|
||||
let opts = StandaloneOptions::default();
|
||||
let toml_string = toml::to_string(&opts).unwrap();
|
||||
let _parsed: StandaloneOptions = toml::from_str(&toml_string).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
145
src/cmd/tests/cli.rs
Normal file
145
src/cmd/tests/cli.rs
Normal file
@@ -0,0 +1,145 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
mod tests {
|
||||
use std::path::PathBuf;
|
||||
use std::process::{Command, Stdio};
|
||||
use std::time::Duration;
|
||||
|
||||
use rexpect::session::PtyReplSession;
|
||||
use tempdir::TempDir;
|
||||
|
||||
struct Repl {
|
||||
repl: PtyReplSession,
|
||||
}
|
||||
|
||||
impl Repl {
|
||||
fn send_line(&mut self, line: &str) {
|
||||
self.repl.send_line(line).unwrap();
|
||||
|
||||
// read a line to consume the prompt
|
||||
self.read_line();
|
||||
}
|
||||
|
||||
fn read_line(&mut self) -> String {
|
||||
self.repl.read_line().unwrap()
|
||||
}
|
||||
|
||||
fn read_expect(&mut self, expect: &str) {
|
||||
assert_eq!(self.read_line(), expect);
|
||||
}
|
||||
|
||||
fn read_contains(&mut self, pat: &str) {
|
||||
assert!(self.read_line().contains(pat));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_repl() {
|
||||
let data_dir = TempDir::new_in("/tmp", "data").unwrap();
|
||||
let wal_dir = TempDir::new_in("/tmp", "wal").unwrap();
|
||||
|
||||
let mut bin_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
bin_path.push("../../target/debug");
|
||||
let bin_path = bin_path.to_str().unwrap();
|
||||
|
||||
let mut datanode = Command::new("./greptime")
|
||||
.current_dir(bin_path)
|
||||
.args([
|
||||
"datanode",
|
||||
"start",
|
||||
"--rpc-addr=0.0.0.0:4321",
|
||||
"--node-id=1",
|
||||
&format!("--data-dir={}", data_dir.path().display()),
|
||||
&format!("--wal-dir={}", wal_dir.path().display()),
|
||||
])
|
||||
.stdout(Stdio::null())
|
||||
.spawn()
|
||||
.unwrap();
|
||||
|
||||
// wait for Datanode actually started
|
||||
std::thread::sleep(Duration::from_secs(3));
|
||||
|
||||
let mut repl_cmd = Command::new("./greptime");
|
||||
repl_cmd.current_dir(bin_path).args([
|
||||
"--log-level=off",
|
||||
"cli",
|
||||
"attach",
|
||||
"--grpc-addr=0.0.0.0:4321",
|
||||
// history commands can sneaky into stdout and mess up our tests, so disable it
|
||||
"--disable-helper",
|
||||
]);
|
||||
let pty_session = rexpect::session::spawn_command(repl_cmd, Some(5_000)).unwrap();
|
||||
let repl = PtyReplSession {
|
||||
prompt: "> ".to_string(),
|
||||
pty_session,
|
||||
quit_command: None,
|
||||
echo_on: false,
|
||||
};
|
||||
let repl = &mut Repl { repl };
|
||||
repl.read_expect("Ready for commands. (Hint: try 'help')");
|
||||
|
||||
test_create_database(repl);
|
||||
|
||||
test_use_database(repl);
|
||||
|
||||
test_create_table(repl);
|
||||
|
||||
test_insert(repl);
|
||||
|
||||
test_select(repl);
|
||||
|
||||
datanode.kill().unwrap();
|
||||
datanode.wait().unwrap();
|
||||
}
|
||||
|
||||
fn test_create_database(repl: &mut Repl) {
|
||||
repl.send_line("CREATE DATABASE db;");
|
||||
repl.read_expect("Affected Rows: 1");
|
||||
repl.read_contains("Cost");
|
||||
}
|
||||
|
||||
fn test_use_database(repl: &mut Repl) {
|
||||
repl.send_line("USE db");
|
||||
repl.read_expect("Total Rows: 0");
|
||||
repl.read_contains("Cost");
|
||||
repl.read_expect("Using db");
|
||||
}
|
||||
|
||||
fn test_create_table(repl: &mut Repl) {
|
||||
repl.send_line("CREATE TABLE t(x STRING, ts TIMESTAMP TIME INDEX);");
|
||||
repl.read_expect("Affected Rows: 0");
|
||||
repl.read_contains("Cost");
|
||||
}
|
||||
|
||||
fn test_insert(repl: &mut Repl) {
|
||||
repl.send_line("INSERT INTO t(x, ts) VALUES ('hello', 1676895812239);");
|
||||
repl.read_expect("Affected Rows: 1");
|
||||
repl.read_contains("Cost");
|
||||
}
|
||||
|
||||
fn test_select(repl: &mut Repl) {
|
||||
repl.send_line("SELECT * FROM t;");
|
||||
|
||||
repl.read_expect("+-------+-------------------------+");
|
||||
repl.read_expect("| x | ts |");
|
||||
repl.read_expect("+-------+-------------------------+");
|
||||
repl.read_expect("| hello | 2023-02-20T12:23:32.239 |");
|
||||
repl.read_expect("+-------+-------------------------+");
|
||||
repl.read_expect("Total Rows: 1");
|
||||
|
||||
repl.read_contains("Cost");
|
||||
}
|
||||
}
|
||||
@@ -86,6 +86,34 @@ impl StatusCode {
|
||||
pub fn is_success(code: u32) -> bool {
|
||||
Self::Success as u32 == code
|
||||
}
|
||||
|
||||
pub fn is_retryable(&self) -> bool {
|
||||
match self {
|
||||
StatusCode::StorageUnavailable
|
||||
| StatusCode::RuntimeResourcesExhausted
|
||||
| StatusCode::Internal => true,
|
||||
|
||||
StatusCode::Success
|
||||
| StatusCode::Unknown
|
||||
| StatusCode::Unsupported
|
||||
| StatusCode::Unexpected
|
||||
| StatusCode::InvalidArguments
|
||||
| StatusCode::InvalidSyntax
|
||||
| StatusCode::PlanQuery
|
||||
| StatusCode::EngineExecuteQuery
|
||||
| StatusCode::TableAlreadyExists
|
||||
| StatusCode::TableNotFound
|
||||
| StatusCode::TableColumnNotFound
|
||||
| StatusCode::TableColumnExists
|
||||
| StatusCode::DatabaseNotFound
|
||||
| StatusCode::UserNotFound
|
||||
| StatusCode::UnsupportedPasswordType
|
||||
| StatusCode::UserPasswordMismatch
|
||||
| StatusCode::AuthHeaderNotFound
|
||||
| StatusCode::InvalidAuthHeader
|
||||
| StatusCode::AccessDenied => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for StatusCode {
|
||||
|
||||
@@ -18,11 +18,13 @@ use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use datatypes::schema::{ColumnSchema, RawSchema};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::metadata::TableId;
|
||||
use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest, CreateTableRequest};
|
||||
use table::requests::{
|
||||
AddColumnRequest, AlterKind, AlterTableRequest, CreateTableRequest, TableOptions,
|
||||
};
|
||||
|
||||
use crate::error::{
|
||||
ColumnNotFoundSnafu, InvalidColumnDefSnafu, MissingFieldSnafu, MissingTimestampColumnSnafu,
|
||||
Result,
|
||||
Result, UnrecognizedTableOptionSnafu,
|
||||
};
|
||||
|
||||
/// Convert an [`AlterExpr`] to an [`AlterTableRequest`]
|
||||
@@ -163,6 +165,8 @@ pub fn create_expr_to_request(
|
||||
expr.region_ids
|
||||
};
|
||||
|
||||
let table_options =
|
||||
TableOptions::try_from(&expr.table_options).context(UnrecognizedTableOptionSnafu)?;
|
||||
Ok(CreateTableRequest {
|
||||
id: table_id,
|
||||
catalog_name,
|
||||
@@ -173,7 +177,7 @@ pub fn create_expr_to_request(
|
||||
region_numbers: region_ids,
|
||||
primary_key_indices,
|
||||
create_if_not_exists: expr.create_if_not_exists,
|
||||
table_options: expr.table_options,
|
||||
table_options,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -84,6 +84,12 @@ pub enum Error {
|
||||
#[snafu(backtrace)]
|
||||
source: api::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Unrecognized table option: {}", source))]
|
||||
UnrecognizedTableOption {
|
||||
#[snafu(backtrace)]
|
||||
source: table::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -104,6 +110,7 @@ impl ErrorExt for Error {
|
||||
Error::MissingField { .. } => StatusCode::InvalidArguments,
|
||||
Error::ColumnDefaultConstraint { source, .. } => source.status_code(),
|
||||
Error::InvalidColumnDef { source, .. } => source.status_code(),
|
||||
Error::UnrecognizedTableOption { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
fn backtrace_opt(&self) -> Option<&Backtrace> {
|
||||
|
||||
@@ -419,8 +419,9 @@ fn convert_values(data_type: &ConcreteDataType, values: Values) -> Vec<Value> {
|
||||
.into_iter()
|
||||
.map(|v| Value::Timestamp(Timestamp::new_millisecond(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Null(_) => unreachable!(),
|
||||
ConcreteDataType::List(_) => unreachable!(),
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -67,7 +67,7 @@ macro_rules! convert_arrow_array_to_grpc_vals {
|
||||
return Ok(vals);
|
||||
},
|
||||
)+
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) => unreachable!("Should not send {:?} in gRPC", $data_type),
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => unreachable!("Should not send {:?} in gRPC", $data_type),
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
@@ -69,6 +69,18 @@ pub enum Error {
|
||||
key: String,
|
||||
source: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to deserialize from json, source: {}", source))]
|
||||
FromJson {
|
||||
source: serde_json::Error,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Procedure exec failed, source: {}", source))]
|
||||
RetryLater {
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -81,7 +93,9 @@ impl ErrorExt for Error {
|
||||
| Error::PutState { .. }
|
||||
| Error::DeleteState { .. }
|
||||
| Error::ListState { .. }
|
||||
| Error::ReadState { .. } => StatusCode::Internal,
|
||||
| Error::ReadState { .. }
|
||||
| Error::FromJson { .. }
|
||||
| Error::RetryLater { .. } => StatusCode::Internal,
|
||||
Error::LoaderConflict { .. } | Error::DuplicateProcedure { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
@@ -104,4 +118,26 @@ impl Error {
|
||||
source: BoxedError::new(err),
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new [Error::RetryLater] error from source `err`.
|
||||
pub fn retry_later<E: ErrorExt + Send + Sync + 'static>(err: E) -> Error {
|
||||
Error::RetryLater {
|
||||
source: BoxedError::new(err),
|
||||
}
|
||||
}
|
||||
|
||||
/// Determine whether it is a retry later type through [StatusCode]
|
||||
pub fn is_retry_later(&self) -> bool {
|
||||
matches!(self, Error::RetryLater { .. })
|
||||
}
|
||||
|
||||
/// Creates a new [Error::RetryLater] or [Error::External] error from source `err` according
|
||||
/// to its [StatusCode].
|
||||
pub fn from_error_ext<E: ErrorExt + Send + Sync + 'static>(err: E) -> Self {
|
||||
if err.status_code().is_retryable() {
|
||||
Error::retry_later(err)
|
||||
} else {
|
||||
Error::external(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,15 +15,12 @@
|
||||
//! Common traits and structures for the procedure framework.
|
||||
|
||||
pub mod error;
|
||||
#[allow(dead_code)]
|
||||
mod local;
|
||||
pub mod local;
|
||||
mod procedure;
|
||||
// TODO(yingwen): Remove this attribute once ProcedureManager is implemented.
|
||||
#[allow(dead_code)]
|
||||
mod store;
|
||||
|
||||
pub use crate::error::{Error, Result};
|
||||
pub use crate::procedure::{
|
||||
BoxedProcedure, Context, ContextProvider, LockKey, Procedure, ProcedureId, ProcedureManager,
|
||||
ProcedureManagerRef, ProcedureState, ProcedureWithId, Status,
|
||||
ProcedureManagerRef, ProcedureState, ProcedureWithId, Status, Watcher,
|
||||
};
|
||||
|
||||
@@ -22,6 +22,7 @@ use async_trait::async_trait;
|
||||
use common_telemetry::logging;
|
||||
use object_store::ObjectStore;
|
||||
use snafu::ensure;
|
||||
use tokio::sync::watch::{self, Receiver, Sender};
|
||||
use tokio::sync::Notify;
|
||||
|
||||
use crate::error::{DuplicateProcedureSnafu, LoaderConflictSnafu, Result};
|
||||
@@ -31,27 +32,9 @@ use crate::procedure::BoxedProcedureLoader;
|
||||
use crate::store::{ObjectStateStore, ProcedureMessage, ProcedureStore, StateStoreRef};
|
||||
use crate::{
|
||||
BoxedProcedure, ContextProvider, LockKey, ProcedureId, ProcedureManager, ProcedureState,
|
||||
ProcedureWithId,
|
||||
ProcedureWithId, Watcher,
|
||||
};
|
||||
|
||||
/// Mutable metadata of a procedure during execution.
|
||||
#[derive(Debug)]
|
||||
struct ExecMeta {
|
||||
/// Current procedure state.
|
||||
state: ProcedureState,
|
||||
/// Id of child procedures.
|
||||
children: Vec<ProcedureId>,
|
||||
}
|
||||
|
||||
impl Default for ExecMeta {
|
||||
fn default() -> ExecMeta {
|
||||
ExecMeta {
|
||||
state: ProcedureState::Running,
|
||||
children: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Shared metadata of a procedure.
|
||||
///
|
||||
/// # Note
|
||||
@@ -72,38 +55,55 @@ pub(crate) struct ProcedureMeta {
|
||||
child_notify: Notify,
|
||||
/// Lock required by this procedure.
|
||||
lock_key: LockKey,
|
||||
/// Mutable status during execution.
|
||||
exec_meta: Mutex<ExecMeta>,
|
||||
/// Sender to notify the procedure state.
|
||||
state_sender: Sender<ProcedureState>,
|
||||
/// Receiver to watch the procedure state.
|
||||
state_receiver: Receiver<ProcedureState>,
|
||||
/// Id of child procedures.
|
||||
children: Mutex<Vec<ProcedureId>>,
|
||||
}
|
||||
|
||||
impl ProcedureMeta {
|
||||
fn new(id: ProcedureId, parent_id: Option<ProcedureId>, lock_key: LockKey) -> ProcedureMeta {
|
||||
let (state_sender, state_receiver) = watch::channel(ProcedureState::Running);
|
||||
ProcedureMeta {
|
||||
id,
|
||||
lock_notify: Notify::new(),
|
||||
parent_id,
|
||||
child_notify: Notify::new(),
|
||||
lock_key,
|
||||
state_sender,
|
||||
state_receiver,
|
||||
children: Mutex::new(Vec::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns current [ProcedureState].
|
||||
fn state(&self) -> ProcedureState {
|
||||
let meta = self.exec_meta.lock().unwrap();
|
||||
meta.state.clone()
|
||||
self.state_receiver.borrow().clone()
|
||||
}
|
||||
|
||||
/// Update current [ProcedureState].
|
||||
fn set_state(&self, state: ProcedureState) {
|
||||
let mut meta = self.exec_meta.lock().unwrap();
|
||||
meta.state = state;
|
||||
// Safety: ProcedureMeta also holds the receiver, so `send()` should never fail.
|
||||
self.state_sender.send(state).unwrap();
|
||||
}
|
||||
|
||||
/// Push `procedure_id` of the subprocedure to the metadata.
|
||||
fn push_child(&self, procedure_id: ProcedureId) {
|
||||
let mut meta = self.exec_meta.lock().unwrap();
|
||||
meta.children.push(procedure_id);
|
||||
let mut children = self.children.lock().unwrap();
|
||||
children.push(procedure_id);
|
||||
}
|
||||
|
||||
/// Append subprocedures to given `buffer`.
|
||||
fn list_children(&self, buffer: &mut Vec<ProcedureId>) {
|
||||
let meta = self.exec_meta.lock().unwrap();
|
||||
buffer.extend_from_slice(&meta.children);
|
||||
let children = self.children.lock().unwrap();
|
||||
buffer.extend_from_slice(&children);
|
||||
}
|
||||
|
||||
/// Returns the number of subprocedures.
|
||||
fn num_children(&self) -> usize {
|
||||
self.exec_meta.lock().unwrap().children.len()
|
||||
self.children.lock().unwrap().len()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -123,8 +123,6 @@ pub(crate) struct ManagerContext {
|
||||
loaders: Mutex<HashMap<String, BoxedProcedureLoader>>,
|
||||
lock_map: LockMap,
|
||||
procedures: RwLock<HashMap<ProcedureId, ProcedureMetaRef>>,
|
||||
// TODO(yingwen): Now we never clean the messages. But when the root procedure is done, we
|
||||
// should be able to remove the its message and all its child messages.
|
||||
/// Messages loaded from the procedure store.
|
||||
messages: Mutex<HashMap<ProcedureId, ProcedureMessage>>,
|
||||
}
|
||||
@@ -175,6 +173,14 @@ impl ManagerContext {
|
||||
procedures.get(&procedure_id).map(|meta| meta.state())
|
||||
}
|
||||
|
||||
/// Returns the [Watcher] of specific `procedure_id`.
|
||||
fn watcher(&self, procedure_id: ProcedureId) -> Option<Watcher> {
|
||||
let procedures = self.procedures.read().unwrap();
|
||||
procedures
|
||||
.get(&procedure_id)
|
||||
.map(|meta| meta.state_receiver.clone())
|
||||
}
|
||||
|
||||
/// Notify a suspended parent procedure with specific `procedure_id` by its subprocedure.
|
||||
fn notify_by_subprocedure(&self, procedure_id: ProcedureId) {
|
||||
let procedures = self.procedures.read().unwrap();
|
||||
@@ -284,7 +290,7 @@ impl ManagerContext {
|
||||
#[derive(Debug)]
|
||||
pub struct ManagerConfig {
|
||||
/// Object store
|
||||
object_store: ObjectStore,
|
||||
pub object_store: ObjectStore,
|
||||
}
|
||||
|
||||
/// A [ProcedureManager] that maintains procedure states locally.
|
||||
@@ -308,15 +314,8 @@ impl LocalManager {
|
||||
procedure_id: ProcedureId,
|
||||
step: u32,
|
||||
procedure: BoxedProcedure,
|
||||
) -> Result<()> {
|
||||
let meta = Arc::new(ProcedureMeta {
|
||||
id: procedure_id,
|
||||
lock_notify: Notify::new(),
|
||||
parent_id: None,
|
||||
child_notify: Notify::new(),
|
||||
lock_key: procedure.lock_key(),
|
||||
exec_meta: Mutex::new(ExecMeta::default()),
|
||||
});
|
||||
) -> Result<Watcher> {
|
||||
let meta = Arc::new(ProcedureMeta::new(procedure_id, None, procedure.lock_key()));
|
||||
let runner = Runner {
|
||||
meta: meta.clone(),
|
||||
procedure,
|
||||
@@ -325,6 +324,8 @@ impl LocalManager {
|
||||
store: ProcedureStore::new(self.state_store.clone()),
|
||||
};
|
||||
|
||||
let watcher = meta.state_receiver.clone();
|
||||
|
||||
// Inserts meta into the manager before actually spawnd the runner.
|
||||
ensure!(
|
||||
self.manager_ctx.try_insert_procedure(meta),
|
||||
@@ -336,7 +337,7 @@ impl LocalManager {
|
||||
let _ = runner.run().await;
|
||||
});
|
||||
|
||||
Ok(())
|
||||
Ok(watcher)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -351,16 +352,14 @@ impl ProcedureManager for LocalManager {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn submit(&self, procedure: ProcedureWithId) -> Result<()> {
|
||||
async fn submit(&self, procedure: ProcedureWithId) -> Result<Watcher> {
|
||||
let procedure_id = procedure.id;
|
||||
ensure!(
|
||||
!self.manager_ctx.contains_procedure(procedure_id),
|
||||
DuplicateProcedureSnafu { procedure_id }
|
||||
);
|
||||
|
||||
self.submit_root(procedure.id, 0, procedure.procedure)?;
|
||||
|
||||
Ok(())
|
||||
self.submit_root(procedure.id, 0, procedure.procedure)
|
||||
}
|
||||
|
||||
async fn recover(&self) -> Result<()> {
|
||||
@@ -401,36 +400,36 @@ impl ProcedureManager for LocalManager {
|
||||
async fn procedure_state(&self, procedure_id: ProcedureId) -> Result<Option<ProcedureState>> {
|
||||
Ok(self.manager_ctx.state(procedure_id))
|
||||
}
|
||||
|
||||
fn procedure_watcher(&self, procedure_id: ProcedureId) -> Option<Watcher> {
|
||||
self.manager_ctx.watcher(procedure_id)
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new [ProcedureMeta] for test purpose.
|
||||
#[cfg(test)]
|
||||
mod test_util {
|
||||
use object_store::services::fs::Builder;
|
||||
use object_store::services::Fs as Builder;
|
||||
use object_store::ObjectStoreBuilder;
|
||||
use tempdir::TempDir;
|
||||
|
||||
use super::*;
|
||||
|
||||
pub(crate) fn procedure_meta_for_test() -> ProcedureMeta {
|
||||
ProcedureMeta {
|
||||
id: ProcedureId::random(),
|
||||
lock_notify: Notify::new(),
|
||||
parent_id: None,
|
||||
child_notify: Notify::new(),
|
||||
lock_key: LockKey::default(),
|
||||
exec_meta: Mutex::new(ExecMeta::default()),
|
||||
}
|
||||
ProcedureMeta::new(ProcedureId::random(), None, LockKey::default())
|
||||
}
|
||||
|
||||
pub(crate) fn new_object_store(dir: &TempDir) -> ObjectStore {
|
||||
let store_dir = dir.path().to_str().unwrap();
|
||||
let accessor = Builder::default().root(store_dir).build().unwrap();
|
||||
ObjectStore::new(accessor)
|
||||
ObjectStore::new(accessor).finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_error::mock::MockError;
|
||||
use common_error::prelude::StatusCode;
|
||||
use tempdir::TempDir;
|
||||
|
||||
use super::*;
|
||||
@@ -500,6 +499,7 @@ mod tests {
|
||||
#[derive(Debug)]
|
||||
struct ProcedureToLoad {
|
||||
content: String,
|
||||
lock_key: LockKey,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -517,7 +517,7 @@ mod tests {
|
||||
}
|
||||
|
||||
fn lock_key(&self) -> LockKey {
|
||||
LockKey::default()
|
||||
self.lock_key.clone()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -525,6 +525,7 @@ mod tests {
|
||||
fn new(content: &str) -> ProcedureToLoad {
|
||||
ProcedureToLoad {
|
||||
content: content.to_string(),
|
||||
lock_key: LockKey::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -608,39 +609,20 @@ mod tests {
|
||||
};
|
||||
let manager = LocalManager::new(config);
|
||||
|
||||
#[derive(Debug)]
|
||||
struct MockProcedure {}
|
||||
|
||||
#[async_trait]
|
||||
impl Procedure for MockProcedure {
|
||||
fn type_name(&self) -> &str {
|
||||
"MockProcedure"
|
||||
}
|
||||
|
||||
async fn execute(&mut self, _ctx: &Context) -> Result<Status> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn dump(&self) -> Result<String> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn lock_key(&self) -> LockKey {
|
||||
LockKey::single("test.submit")
|
||||
}
|
||||
}
|
||||
|
||||
let procedure_id = ProcedureId::random();
|
||||
assert!(manager
|
||||
.procedure_state(procedure_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_none());
|
||||
assert!(manager.procedure_watcher(procedure_id).is_none());
|
||||
|
||||
let mut procedure = ProcedureToLoad::new("submit");
|
||||
procedure.lock_key = LockKey::single("test.submit");
|
||||
manager
|
||||
.submit(ProcedureWithId {
|
||||
id: procedure_id,
|
||||
procedure: Box::new(MockProcedure {}),
|
||||
procedure: Box::new(procedure),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -649,15 +631,77 @@ mod tests {
|
||||
.await
|
||||
.unwrap()
|
||||
.is_some());
|
||||
// Wait for the procedure done.
|
||||
let mut watcher = manager.procedure_watcher(procedure_id).unwrap();
|
||||
watcher.changed().await.unwrap();
|
||||
assert_eq!(ProcedureState::Done, *watcher.borrow());
|
||||
|
||||
// Try to submit procedure with same id again.
|
||||
let err = manager
|
||||
.submit(ProcedureWithId {
|
||||
id: procedure_id,
|
||||
procedure: Box::new(MockProcedure {}),
|
||||
procedure: Box::new(ProcedureToLoad::new("submit")),
|
||||
})
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert!(matches!(err, Error::DuplicateProcedure { .. }), "{err}");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_state_changed_on_err() {
|
||||
let dir = TempDir::new("on_err").unwrap();
|
||||
let config = ManagerConfig {
|
||||
object_store: test_util::new_object_store(&dir),
|
||||
};
|
||||
let manager = LocalManager::new(config);
|
||||
|
||||
#[derive(Debug)]
|
||||
struct MockProcedure {
|
||||
panic: bool,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Procedure for MockProcedure {
|
||||
fn type_name(&self) -> &str {
|
||||
"MockProcedure"
|
||||
}
|
||||
|
||||
async fn execute(&mut self, _ctx: &Context) -> Result<Status> {
|
||||
if self.panic {
|
||||
// Test the runner can set the state to failed even the procedure
|
||||
// panics.
|
||||
panic!();
|
||||
} else {
|
||||
Err(Error::external(MockError::new(StatusCode::Unexpected)))
|
||||
}
|
||||
}
|
||||
|
||||
fn dump(&self) -> Result<String> {
|
||||
Ok(String::new())
|
||||
}
|
||||
|
||||
fn lock_key(&self) -> LockKey {
|
||||
LockKey::single("test.submit")
|
||||
}
|
||||
}
|
||||
|
||||
let check_procedure = |procedure| {
|
||||
async {
|
||||
let procedure_id = ProcedureId::random();
|
||||
let mut watcher = manager
|
||||
.submit(ProcedureWithId {
|
||||
id: procedure_id,
|
||||
procedure: Box::new(procedure),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
// Wait for the notification.
|
||||
watcher.changed().await.unwrap();
|
||||
assert_eq!(ProcedureState::Failed, *watcher.borrow());
|
||||
}
|
||||
};
|
||||
|
||||
check_procedure(MockProcedure { panic: false }).await;
|
||||
check_procedure(MockProcedure { panic: true }).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,15 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_telemetry::logging;
|
||||
use tokio::sync::Notify;
|
||||
use tokio::time;
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::local::{ExecMeta, ManagerContext, ProcedureMeta, ProcedureMetaRef};
|
||||
use crate::local::{ManagerContext, ProcedureMeta, ProcedureMetaRef};
|
||||
use crate::store::ProcedureStore;
|
||||
use crate::{BoxedProcedure, Context, ProcedureId, ProcedureState, ProcedureWithId, Status};
|
||||
|
||||
@@ -53,6 +52,52 @@ impl ExecResult {
|
||||
}
|
||||
}
|
||||
|
||||
/// A guard to cleanup procedure state.
|
||||
struct ProcedureGuard {
|
||||
meta: ProcedureMetaRef,
|
||||
manager_ctx: Arc<ManagerContext>,
|
||||
finish: bool,
|
||||
}
|
||||
|
||||
impl ProcedureGuard {
|
||||
/// Returns a new [ProcedureGuard].
|
||||
fn new(meta: ProcedureMetaRef, manager_ctx: Arc<ManagerContext>) -> ProcedureGuard {
|
||||
ProcedureGuard {
|
||||
meta,
|
||||
manager_ctx,
|
||||
finish: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// The procedure is finished successfully.
|
||||
fn finish(mut self) {
|
||||
self.finish = true;
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for ProcedureGuard {
|
||||
fn drop(&mut self) {
|
||||
if !self.finish {
|
||||
logging::error!("Procedure {} exits unexpectedly", self.meta.id);
|
||||
|
||||
// Set state to failed. This is useful in test as runtime may not abort when the runner task panics.
|
||||
// See https://github.com/tokio-rs/tokio/issues/2002 .
|
||||
// We set set_panic_hook() in the application's main function. But our tests don't have this panic hook.
|
||||
self.meta.set_state(ProcedureState::Failed);
|
||||
}
|
||||
|
||||
// Notify parent procedure.
|
||||
if let Some(parent_id) = self.meta.parent_id {
|
||||
self.manager_ctx.notify_by_subprocedure(parent_id);
|
||||
}
|
||||
|
||||
// Release lock in reverse order.
|
||||
for key in self.meta.lock_key.keys_to_unlock() {
|
||||
self.manager_ctx.lock_map.release_lock(key, self.meta.id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(yingwen): Support cancellation.
|
||||
pub(crate) struct Runner {
|
||||
pub(crate) meta: ProcedureMetaRef,
|
||||
@@ -65,6 +110,9 @@ pub(crate) struct Runner {
|
||||
impl Runner {
|
||||
/// Run the procedure.
|
||||
pub(crate) async fn run(mut self) -> Result<()> {
|
||||
// Ensure we can update the procedure state.
|
||||
let guard = ProcedureGuard::new(self.meta.clone(), self.manager_ctx.clone());
|
||||
|
||||
logging::info!(
|
||||
"Runner {}-{} starts",
|
||||
self.procedure.type_name(),
|
||||
@@ -88,15 +136,13 @@ impl Runner {
|
||||
result = Err(e);
|
||||
}
|
||||
|
||||
// Notify parent procedure.
|
||||
if let Some(parent_id) = self.meta.parent_id {
|
||||
self.manager_ctx.notify_by_subprocedure(parent_id);
|
||||
}
|
||||
// We can't remove the metadata of the procedure now as users and its parent might
|
||||
// need to query its state.
|
||||
// TODO(yingwen): 1. Add TTL to the metadata; 2. Only keep state in the procedure store
|
||||
// so we don't need to always store the metadata in memory after the procedure is done.
|
||||
|
||||
// Release lock in reverse order.
|
||||
for key in self.meta.lock_key.keys_to_unlock() {
|
||||
self.manager_ctx.lock_map.release_lock(key, self.meta.id);
|
||||
}
|
||||
// Release locks and notify parent procedure.
|
||||
guard.finish();
|
||||
|
||||
// If this is the root procedure, clean up message cache.
|
||||
if self.meta.parent_id.is_none() {
|
||||
@@ -104,11 +150,6 @@ impl Runner {
|
||||
self.manager_ctx.remove_messages(&procedure_ids);
|
||||
}
|
||||
|
||||
// We can't remove the metadata of the procedure now as users and its parent might
|
||||
// need to query its state.
|
||||
// TODO(yingwen): 1. Add TTL to the metadata; 2. Only keep state in the procedure store
|
||||
// so we don't need to always store the metadata in memory after the procedure is done.
|
||||
|
||||
logging::info!(
|
||||
"Runner {}-{} exits",
|
||||
self.procedure.type_name(),
|
||||
@@ -171,11 +212,16 @@ impl Runner {
|
||||
Err(e) => {
|
||||
logging::error!(
|
||||
e;
|
||||
"Failed to execute procedure {}-{}",
|
||||
"Failed to execute procedure {}-{}, retry: {}",
|
||||
self.procedure.type_name(),
|
||||
self.meta.id
|
||||
self.meta.id,
|
||||
e.is_retry_later(),
|
||||
);
|
||||
|
||||
if e.is_retry_later() {
|
||||
return ExecResult::RetryLater;
|
||||
}
|
||||
|
||||
self.meta.set_state(ProcedureState::Failed);
|
||||
|
||||
// Write rollback key so we can skip this procedure while recovering procedures.
|
||||
@@ -207,14 +253,11 @@ impl Runner {
|
||||
step = loaded_procedure.step;
|
||||
}
|
||||
|
||||
let meta = Arc::new(ProcedureMeta {
|
||||
id: procedure_id,
|
||||
lock_notify: Notify::new(),
|
||||
parent_id: Some(self.meta.id),
|
||||
child_notify: Notify::new(),
|
||||
lock_key: procedure.lock_key(),
|
||||
exec_meta: Mutex::new(ExecMeta::default()),
|
||||
});
|
||||
let meta = Arc::new(ProcedureMeta::new(
|
||||
procedure_id,
|
||||
Some(self.meta.id),
|
||||
procedure.lock_key(),
|
||||
));
|
||||
let runner = Runner {
|
||||
meta: meta.clone(),
|
||||
procedure,
|
||||
@@ -256,7 +299,7 @@ impl Runner {
|
||||
self.procedure.type_name(),
|
||||
self.meta.id,
|
||||
subprocedure.procedure.type_name(),
|
||||
subprocedure.id
|
||||
subprocedure.id,
|
||||
);
|
||||
|
||||
self.submit_subprocedure(subprocedure.id, subprocedure.procedure);
|
||||
@@ -338,7 +381,7 @@ impl Runner {
|
||||
logging::info!(
|
||||
"Procedure {}-{} done",
|
||||
self.procedure.type_name(),
|
||||
self.meta.id
|
||||
self.meta.id,
|
||||
);
|
||||
|
||||
// Mark the state of this procedure to done.
|
||||
@@ -667,6 +710,45 @@ mod tests {
|
||||
check_files(&object_store, ctx.procedure_id, &["0000000000.rollback"]).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_execute_on_retry_later_error() {
|
||||
let mut times = 0;
|
||||
|
||||
let exec_fn = move |_| {
|
||||
times += 1;
|
||||
async move {
|
||||
if times == 1 {
|
||||
Err(Error::retry_later(MockError::new(StatusCode::Unexpected)))
|
||||
} else {
|
||||
Ok(Status::Done)
|
||||
}
|
||||
}
|
||||
.boxed()
|
||||
};
|
||||
|
||||
let retry_later = ProcedureAdapter {
|
||||
data: "retry_later".to_string(),
|
||||
lock_key: LockKey::single("catalog.schema.table"),
|
||||
exec_fn,
|
||||
};
|
||||
|
||||
let dir = TempDir::new("retry_later").unwrap();
|
||||
let meta = retry_later.new_meta(ROOT_ID);
|
||||
let ctx = context_without_provider(meta.id);
|
||||
let object_store = test_util::new_object_store(&dir);
|
||||
let procedure_store = ProcedureStore::from(object_store.clone());
|
||||
let mut runner = new_runner(meta.clone(), Box::new(retry_later), procedure_store);
|
||||
|
||||
let res = runner.execute_once(&ctx).await;
|
||||
assert!(res.is_retry_later(), "{res:?}");
|
||||
assert_eq!(ProcedureState::Running, meta.state());
|
||||
|
||||
let res = runner.execute_once(&ctx).await;
|
||||
assert!(res.is_done(), "{res:?}");
|
||||
assert_eq!(ProcedureState::Done, meta.state());
|
||||
check_files(&object_store, ctx.procedure_id, &["0000000000.commit"]).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_child_error() {
|
||||
let mut times = 0;
|
||||
@@ -699,7 +781,7 @@ mod tests {
|
||||
let state = ctx.provider.procedure_state(child_id).await.unwrap();
|
||||
if state == Some(ProcedureState::Failed) {
|
||||
// The parent procedure to abort itself if child procedure is failed.
|
||||
Err(Error::external(PlainError::new(
|
||||
Err(Error::from_error_ext(PlainError::new(
|
||||
"subprocedure failed".to_string(),
|
||||
StatusCode::Unexpected,
|
||||
)))
|
||||
|
||||
@@ -20,6 +20,7 @@ use async_trait::async_trait;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use smallvec::{smallvec, SmallVec};
|
||||
use snafu::{ResultExt, Snafu};
|
||||
use tokio::sync::watch::Receiver;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::error::Result;
|
||||
@@ -209,6 +210,9 @@ pub enum ProcedureState {
|
||||
Failed,
|
||||
}
|
||||
|
||||
/// Watcher to watch procedure state.
|
||||
pub type Watcher = Receiver<ProcedureState>;
|
||||
|
||||
// TODO(yingwen): Shutdown
|
||||
/// `ProcedureManager` executes [Procedure] submitted to it.
|
||||
#[async_trait]
|
||||
@@ -217,7 +221,9 @@ pub trait ProcedureManager: Send + Sync + 'static {
|
||||
fn register_loader(&self, name: &str, loader: BoxedProcedureLoader) -> Result<()>;
|
||||
|
||||
/// Submits a procedure to execute.
|
||||
async fn submit(&self, procedure: ProcedureWithId) -> Result<()>;
|
||||
///
|
||||
/// Returns a [Watcher] to watch the created procedure.
|
||||
async fn submit(&self, procedure: ProcedureWithId) -> Result<Watcher>;
|
||||
|
||||
/// Recovers unfinished procedures and reruns them.
|
||||
///
|
||||
@@ -228,6 +234,9 @@ pub trait ProcedureManager: Send + Sync + 'static {
|
||||
///
|
||||
/// Returns `Ok(None)` if the procedure doesn't exist.
|
||||
async fn procedure_state(&self, procedure_id: ProcedureId) -> Result<Option<ProcedureState>>;
|
||||
|
||||
/// Returns a [Watcher] to watch [ProcedureState] of specific procedure.
|
||||
fn procedure_watcher(&self, procedure_id: ProcedureId) -> Option<Watcher>;
|
||||
}
|
||||
|
||||
/// Ref-counted pointer to the [ProcedureManager].
|
||||
|
||||
@@ -246,7 +246,8 @@ impl ParsedKey {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use async_trait::async_trait;
|
||||
use object_store::services::fs::Builder;
|
||||
use object_store::services::Fs as Builder;
|
||||
use object_store::ObjectStoreBuilder;
|
||||
use tempdir::TempDir;
|
||||
|
||||
use super::*;
|
||||
@@ -255,7 +256,7 @@ mod tests {
|
||||
fn procedure_store_for_test(dir: &TempDir) -> ProcedureStore {
|
||||
let store_dir = dir.path().to_str().unwrap();
|
||||
let accessor = Builder::default().root(store_dir).build().unwrap();
|
||||
let object_store = ObjectStore::new(accessor);
|
||||
let object_store = ObjectStore::new(accessor).finish();
|
||||
|
||||
ProcedureStore::from(object_store)
|
||||
}
|
||||
|
||||
@@ -20,9 +20,7 @@ use futures::{Stream, TryStreamExt};
|
||||
use object_store::{ObjectMode, ObjectStore};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{
|
||||
DeleteStateSnafu, Error, ListStateSnafu, PutStateSnafu, ReadStateSnafu, Result,
|
||||
};
|
||||
use crate::error::{DeleteStateSnafu, Error, PutStateSnafu, Result};
|
||||
|
||||
/// Key value from state store.
|
||||
type KeyValue = (String, Vec<u8>);
|
||||
@@ -72,22 +70,23 @@ impl StateStore for ObjectStateStore {
|
||||
|
||||
async fn walk_top_down(&self, path: &str) -> Result<KeyValueStream> {
|
||||
let path_string = path.to_string();
|
||||
let op = self.store.batch();
|
||||
// Note that there is no guarantee about the order between files and dirs
|
||||
// at the same level.
|
||||
// See https://docs.rs/opendal/0.25.2/opendal/raw/struct.TopDownWalker.html#note
|
||||
let stream = op
|
||||
.walk_top_down(path)
|
||||
.context(ListStateSnafu { path })?
|
||||
.map_err(move |e| Error::ListState {
|
||||
|
||||
let lister = self
|
||||
.store
|
||||
.object(path)
|
||||
.scan()
|
||||
.await
|
||||
.map_err(|e| Error::ListState {
|
||||
path: path_string.clone(),
|
||||
source: e,
|
||||
})
|
||||
})?;
|
||||
|
||||
let stream = lister
|
||||
.try_filter_map(|entry| async move {
|
||||
let key = entry.path();
|
||||
let key_value = match entry.mode().await.context(ReadStateSnafu { key })? {
|
||||
let key_value = match entry.mode().await? {
|
||||
ObjectMode::FILE => {
|
||||
let value = entry.read().await.context(ReadStateSnafu { key })?;
|
||||
let value = entry.read().await?;
|
||||
|
||||
Some((key.to_string(), value))
|
||||
}
|
||||
@@ -95,6 +94,10 @@ impl StateStore for ObjectStateStore {
|
||||
};
|
||||
|
||||
Ok(key_value)
|
||||
})
|
||||
.map_err(move |e| Error::ListState {
|
||||
path: path_string.clone(),
|
||||
source: e,
|
||||
});
|
||||
|
||||
Ok(Box::pin(stream))
|
||||
@@ -112,7 +115,8 @@ impl StateStore for ObjectStateStore {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use object_store::services::fs::Builder;
|
||||
use object_store::services::Fs as Builder;
|
||||
use object_store::ObjectStoreBuilder;
|
||||
use tempdir::TempDir;
|
||||
|
||||
use super::*;
|
||||
@@ -122,7 +126,7 @@ mod tests {
|
||||
let dir = TempDir::new("state_store").unwrap();
|
||||
let store_dir = dir.path().to_str().unwrap();
|
||||
let accessor = Builder::default().root(store_dir).build().unwrap();
|
||||
let object_store = ObjectStore::new(accessor);
|
||||
let object_store = ObjectStore::new(accessor).finish();
|
||||
let state_store = ObjectStateStore::new(object_store);
|
||||
|
||||
let data: Vec<_> = state_store
|
||||
|
||||
@@ -14,13 +14,13 @@ datafusion.workspace = true
|
||||
datafusion-expr.workspace = true
|
||||
datatypes = { path = "../../datatypes" }
|
||||
futures = "0.3"
|
||||
prost = "0.9"
|
||||
prost.workspace = true
|
||||
snafu.workspace = true
|
||||
table = { path = "../../table" }
|
||||
|
||||
[dependencies.substrait_proto]
|
||||
package = "substrait"
|
||||
version = "0.2"
|
||||
version = "0.4"
|
||||
|
||||
[dev-dependencies]
|
||||
datatypes = { path = "../../datatypes" }
|
||||
|
||||
@@ -15,10 +15,10 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use datafusion::common::DFSchemaRef;
|
||||
use substrait_proto::protobuf::extensions::simple_extension_declaration::{
|
||||
use substrait_proto::proto::extensions::simple_extension_declaration::{
|
||||
ExtensionFunction, MappingType,
|
||||
};
|
||||
use substrait_proto::protobuf::extensions::SimpleExtensionDeclaration;
|
||||
use substrait_proto::proto::extensions::SimpleExtensionDeclaration;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ConvertorContext {
|
||||
|
||||
@@ -20,15 +20,15 @@ use datafusion_expr::expr::Sort;
|
||||
use datafusion_expr::{expr_fn, lit, Between, BinaryExpr, BuiltinScalarFunction, Expr, Operator};
|
||||
use datatypes::schema::Schema;
|
||||
use snafu::{ensure, OptionExt};
|
||||
use substrait_proto::protobuf::expression::field_reference::ReferenceType as FieldReferenceType;
|
||||
use substrait_proto::protobuf::expression::reference_segment::{
|
||||
use substrait_proto::proto::expression::field_reference::ReferenceType as FieldReferenceType;
|
||||
use substrait_proto::proto::expression::reference_segment::{
|
||||
ReferenceType as SegReferenceType, StructField,
|
||||
};
|
||||
use substrait_proto::protobuf::expression::{
|
||||
use substrait_proto::proto::expression::{
|
||||
FieldReference, Literal, ReferenceSegment, RexType, ScalarFunction,
|
||||
};
|
||||
use substrait_proto::protobuf::function_argument::ArgType;
|
||||
use substrait_proto::protobuf::Expression;
|
||||
use substrait_proto::proto::function_argument::ArgType;
|
||||
use substrait_proto::proto::Expression;
|
||||
|
||||
use crate::context::ConvertorContext;
|
||||
use crate::error::{
|
||||
@@ -61,6 +61,7 @@ pub(crate) fn to_df_expr(
|
||||
| RexType::MultiOrList(_)
|
||||
| RexType::Cast(_)
|
||||
| RexType::Subquery(_)
|
||||
| RexType::Nested(_)
|
||||
| RexType::Enum(_) => UnsupportedExprSnafu {
|
||||
name: format!("substrait expression {expr_rex_type:?}"),
|
||||
}
|
||||
@@ -615,9 +616,9 @@ pub fn convert_column(column: &Column, schema: &Schema) -> Result<FieldReference
|
||||
/// Some utils special for this `DataFusion::Expr` and `Substrait::Expression` conversion.
|
||||
mod utils {
|
||||
use datafusion_expr::{BuiltinScalarFunction, Operator};
|
||||
use substrait_proto::protobuf::expression::{RexType, ScalarFunction};
|
||||
use substrait_proto::protobuf::function_argument::ArgType;
|
||||
use substrait_proto::protobuf::{Expression, FunctionArgument};
|
||||
use substrait_proto::proto::expression::{RexType, ScalarFunction};
|
||||
use substrait_proto::proto::function_argument::ArgType;
|
||||
use substrait_proto::proto::{Expression, FunctionArgument};
|
||||
|
||||
pub(crate) fn name_df_operator(op: &Operator) -> &str {
|
||||
match op {
|
||||
|
||||
@@ -25,13 +25,13 @@ use datafusion::physical_plan::project_schema;
|
||||
use datafusion_expr::{Filter, LogicalPlan, TableScan, TableSource};
|
||||
use prost::Message;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use substrait_proto::protobuf::expression::mask_expression::{StructItem, StructSelect};
|
||||
use substrait_proto::protobuf::expression::MaskExpression;
|
||||
use substrait_proto::protobuf::extensions::simple_extension_declaration::MappingType;
|
||||
use substrait_proto::protobuf::plan_rel::RelType as PlanRelType;
|
||||
use substrait_proto::protobuf::read_rel::{NamedTable, ReadType};
|
||||
use substrait_proto::protobuf::rel::RelType;
|
||||
use substrait_proto::protobuf::{FilterRel, Plan, PlanRel, ReadRel, Rel};
|
||||
use substrait_proto::proto::expression::mask_expression::{StructItem, StructSelect};
|
||||
use substrait_proto::proto::expression::MaskExpression;
|
||||
use substrait_proto::proto::extensions::simple_extension_declaration::MappingType;
|
||||
use substrait_proto::proto::plan_rel::RelType as PlanRelType;
|
||||
use substrait_proto::proto::read_rel::{NamedTable, ReadType};
|
||||
use substrait_proto::proto::rel::RelType;
|
||||
use substrait_proto::proto::{FilterRel, Plan, PlanRel, ReadRel, Rel};
|
||||
use table::table::adapter::DfTableProviderAdapter;
|
||||
|
||||
use crate::context::ConvertorContext;
|
||||
@@ -424,6 +424,7 @@ impl DFLogicalSubstraitConvertor {
|
||||
relations: vec![plan_rel],
|
||||
advanced_extensions: None,
|
||||
expected_type_urls: vec![],
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -485,6 +486,7 @@ impl DFLogicalSubstraitConvertor {
|
||||
projection,
|
||||
advanced_extension: None,
|
||||
read_type: Some(read_type),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
Ok(read_rel)
|
||||
|
||||
@@ -13,8 +13,8 @@
|
||||
// limitations under the License.
|
||||
|
||||
use datatypes::schema::{ColumnSchema, Schema};
|
||||
use substrait_proto::protobuf::r#type::{Nullability, Struct as SubstraitStruct};
|
||||
use substrait_proto::protobuf::NamedStruct;
|
||||
use substrait_proto::proto::r#type::{Nullability, Struct as SubstraitStruct};
|
||||
use substrait_proto::proto::NamedStruct;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::types::{from_concrete_type, to_concrete_type};
|
||||
|
||||
@@ -20,9 +20,9 @@
|
||||
|
||||
use datafusion::scalar::ScalarValue;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use substrait_proto::protobuf::expression::literal::LiteralType;
|
||||
use substrait_proto::protobuf::r#type::{self as s_type, Kind, Nullability};
|
||||
use substrait_proto::protobuf::{Type as SType, Type};
|
||||
use substrait_proto::proto::expression::literal::LiteralType;
|
||||
use substrait_proto::proto::r#type::{self as s_type, Kind, Nullability};
|
||||
use substrait_proto::proto::{Type as SType, Type};
|
||||
|
||||
use crate::error::{self, Result, UnsupportedConcreteTypeSnafu, UnsupportedSubstraitTypeSnafu};
|
||||
|
||||
@@ -86,6 +86,7 @@ pub fn to_concrete_type(ty: &SType) -> Result<(ConcreteDataType, bool)> {
|
||||
| Kind::Struct(_)
|
||||
| Kind::List(_)
|
||||
| Kind::Map(_)
|
||||
| Kind::UserDefined(_)
|
||||
| Kind::UserDefinedTypeReference(_) => UnsupportedSubstraitTypeSnafu {
|
||||
ty: format!("{kind:?}"),
|
||||
}
|
||||
@@ -131,7 +132,9 @@ pub fn from_concrete_type(ty: ConcreteDataType, nullability: Option<bool>) -> Re
|
||||
ConcreteDataType::Timestamp(_) => {
|
||||
build_substrait_kind!(Timestamp, Timestamp, nullability, 0)
|
||||
}
|
||||
ConcreteDataType::List(_) => UnsupportedConcreteTypeSnafu { ty }.fail()?,
|
||||
ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||
UnsupportedConcreteTypeSnafu { ty }.fail()?
|
||||
}
|
||||
};
|
||||
|
||||
Ok(SType { kind })
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::num::TryFromIntError;
|
||||
|
||||
use chrono::ParseError;
|
||||
use common_error::ext::ErrorExt;
|
||||
@@ -24,8 +25,18 @@ use snafu::{Backtrace, ErrorCompat, Snafu};
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to parse string to date, raw: {}, source: {}", raw, source))]
|
||||
ParseDateStr { raw: String, source: ParseError },
|
||||
|
||||
#[snafu(display("Failed to parse a string into Timestamp, raw string: {}", raw))]
|
||||
ParseTimestamp { raw: String, backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Current timestamp overflow, source: {}", source))]
|
||||
TimestampOverflow {
|
||||
source: TryFromIntError,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Timestamp arithmetic overflow, msg: {}", msg))]
|
||||
ArithmeticOverflow { msg: String, backtrace: Backtrace },
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
@@ -34,6 +45,8 @@ impl ErrorExt for Error {
|
||||
Error::ParseDateStr { .. } | Error::ParseTimestamp { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::TimestampOverflow { .. } => StatusCode::Internal,
|
||||
Error::ArithmeticOverflow { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,12 +17,15 @@ use std::cmp::Ordering;
|
||||
use std::fmt::{Display, Formatter};
|
||||
use std::hash::{Hash, Hasher};
|
||||
use std::str::FromStr;
|
||||
use std::time::Duration;
|
||||
|
||||
use chrono::offset::Local;
|
||||
use chrono::{DateTime, LocalResult, NaiveDateTime, TimeZone, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{Error, ParseTimestampSnafu};
|
||||
use crate::error;
|
||||
use crate::error::{ArithmeticOverflowSnafu, Error, ParseTimestampSnafu, TimestampOverflowSnafu};
|
||||
|
||||
#[derive(Debug, Clone, Default, Copy, Serialize, Deserialize)]
|
||||
pub struct Timestamp {
|
||||
@@ -31,6 +34,50 @@ pub struct Timestamp {
|
||||
}
|
||||
|
||||
impl Timestamp {
|
||||
/// Creates current timestamp in millisecond.
|
||||
pub fn current_millis() -> Self {
|
||||
Self {
|
||||
value: crate::util::current_time_millis(),
|
||||
unit: TimeUnit::Millisecond,
|
||||
}
|
||||
}
|
||||
|
||||
/// Subtracts a duration from timestamp.
|
||||
/// # Note
|
||||
/// The result time unit remains unchanged even if `duration` has a different unit with `self`.
|
||||
/// For example, a timestamp with value 1 and time unit second, subtracted by 1 millisecond
|
||||
/// and the result is still 1 second.
|
||||
pub fn sub(&self, duration: Duration) -> error::Result<Self> {
|
||||
let duration: i64 = match self.unit {
|
||||
TimeUnit::Second => {
|
||||
i64::try_from(duration.as_secs()).context(TimestampOverflowSnafu)?
|
||||
}
|
||||
TimeUnit::Millisecond => {
|
||||
i64::try_from(duration.as_millis()).context(TimestampOverflowSnafu)?
|
||||
}
|
||||
TimeUnit::Microsecond => {
|
||||
i64::try_from(duration.as_micros()).context(TimestampOverflowSnafu)?
|
||||
}
|
||||
TimeUnit::Nanosecond => {
|
||||
i64::try_from(duration.as_nanos()).context(TimestampOverflowSnafu)?
|
||||
}
|
||||
};
|
||||
|
||||
let value = self
|
||||
.value
|
||||
.checked_sub(duration)
|
||||
.with_context(|| ArithmeticOverflowSnafu {
|
||||
msg: format!(
|
||||
"Try to subtract timestamp: {:?} with duration: {:?}",
|
||||
self, duration
|
||||
),
|
||||
})?;
|
||||
Ok(Timestamp {
|
||||
value,
|
||||
unit: self.unit,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn new(value: i64, unit: TimeUnit) -> Self {
|
||||
Self { unit, value }
|
||||
}
|
||||
@@ -77,11 +124,11 @@ impl Timestamp {
|
||||
pub fn convert_to(&self, unit: TimeUnit) -> Option<Timestamp> {
|
||||
if self.unit().factor() >= unit.factor() {
|
||||
let mul = self.unit().factor() / unit.factor();
|
||||
let value = self.value.checked_mul(mul)?;
|
||||
let value = self.value.checked_mul(mul as i64)?;
|
||||
Some(Timestamp::new(value, unit))
|
||||
} else {
|
||||
let mul = unit.factor() / self.unit().factor();
|
||||
Some(Timestamp::new(self.value.div_euclid(mul), unit))
|
||||
Some(Timestamp::new(self.value.div_euclid(mul as i64), unit))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,23 +139,25 @@ impl Timestamp {
|
||||
pub fn convert_to_ceil(&self, unit: TimeUnit) -> Option<Timestamp> {
|
||||
if self.unit().factor() >= unit.factor() {
|
||||
let mul = self.unit().factor() / unit.factor();
|
||||
let value = self.value.checked_mul(mul)?;
|
||||
let value = self.value.checked_mul(mul as i64)?;
|
||||
Some(Timestamp::new(value, unit))
|
||||
} else {
|
||||
let mul = unit.factor() / self.unit().factor();
|
||||
Some(Timestamp::new(self.value.div_ceil(mul), unit))
|
||||
Some(Timestamp::new(self.value.div_ceil(mul as i64), unit))
|
||||
}
|
||||
}
|
||||
|
||||
/// Split a [Timestamp] into seconds part and nanoseconds part.
|
||||
/// Notice the seconds part of split result is always rounded down to floor.
|
||||
fn split(&self) -> (i64, i64) {
|
||||
let sec_mul = TimeUnit::Second.factor() / self.unit.factor();
|
||||
let nsec_mul = self.unit.factor() / TimeUnit::Nanosecond.factor();
|
||||
fn split(&self) -> (i64, u32) {
|
||||
let sec_mul = (TimeUnit::Second.factor() / self.unit.factor()) as i64;
|
||||
let nsec_mul = (self.unit.factor() / TimeUnit::Nanosecond.factor()) as i64;
|
||||
|
||||
let sec_div = self.value.div_euclid(sec_mul);
|
||||
let sec_mod = self.value.rem_euclid(sec_mul);
|
||||
(sec_div, sec_mod * nsec_mul)
|
||||
// safety: the max possible value of `sec_mod` is 999,999,999
|
||||
let nsec = u32::try_from(sec_mod * nsec_mul).unwrap();
|
||||
(sec_div, nsec)
|
||||
}
|
||||
|
||||
/// Format timestamp to ISO8601 string. If the timestamp exceeds what chrono timestamp can
|
||||
@@ -122,15 +171,8 @@ impl Timestamp {
|
||||
}
|
||||
|
||||
pub fn to_chrono_datetime(&self) -> LocalResult<DateTime<Utc>> {
|
||||
let nano_factor = TimeUnit::Second.factor() / TimeUnit::Nanosecond.factor();
|
||||
let (mut secs, mut nsecs) = self.split();
|
||||
|
||||
if nsecs < 0 {
|
||||
secs -= 1;
|
||||
nsecs += nano_factor;
|
||||
}
|
||||
|
||||
Utc.timestamp_opt(secs, nsecs as u32)
|
||||
let (sec, nsec) = self.split();
|
||||
Utc.timestamp_opt(sec, nsec)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -252,7 +294,7 @@ impl Display for TimeUnit {
|
||||
}
|
||||
|
||||
impl TimeUnit {
|
||||
pub fn factor(&self) -> i64 {
|
||||
pub fn factor(&self) -> u32 {
|
||||
match self {
|
||||
TimeUnit::Second => 1_000_000_000,
|
||||
TimeUnit::Millisecond => 1_000_000,
|
||||
@@ -300,7 +342,7 @@ impl Hash for Timestamp {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
let (sec, nsec) = self.split();
|
||||
state.write_i64(sec);
|
||||
state.write_i64(nsec);
|
||||
state.write_u32(nsec);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -789,4 +831,41 @@ mod tests {
|
||||
Timestamp::new(1, TimeUnit::Second).convert_to_ceil(TimeUnit::Millisecond)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_split_overflow() {
|
||||
Timestamp::new(i64::MAX, TimeUnit::Second).split();
|
||||
Timestamp::new(i64::MIN, TimeUnit::Second).split();
|
||||
Timestamp::new(i64::MAX, TimeUnit::Millisecond).split();
|
||||
Timestamp::new(i64::MIN, TimeUnit::Millisecond).split();
|
||||
Timestamp::new(i64::MAX, TimeUnit::Microsecond).split();
|
||||
Timestamp::new(i64::MIN, TimeUnit::Microsecond).split();
|
||||
Timestamp::new(i64::MAX, TimeUnit::Nanosecond).split();
|
||||
Timestamp::new(i64::MIN, TimeUnit::Nanosecond).split();
|
||||
let (sec, nsec) = Timestamp::new(i64::MIN, TimeUnit::Nanosecond).split();
|
||||
let time = NaiveDateTime::from_timestamp_opt(sec, nsec).unwrap();
|
||||
assert_eq!(sec, time.timestamp());
|
||||
assert_eq!(nsec, time.timestamp_subsec_nanos());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_timestamp_sub() {
|
||||
let res = Timestamp::new(1, TimeUnit::Second)
|
||||
.sub(Duration::from_secs(1))
|
||||
.unwrap();
|
||||
assert_eq!(0, res.value);
|
||||
assert_eq!(TimeUnit::Second, res.unit);
|
||||
|
||||
let res = Timestamp::new(0, TimeUnit::Second)
|
||||
.sub(Duration::from_secs(1))
|
||||
.unwrap();
|
||||
assert_eq!(-1, res.value);
|
||||
assert_eq!(TimeUnit::Second, res.unit);
|
||||
|
||||
let res = Timestamp::new(1, TimeUnit::Second)
|
||||
.sub(Duration::from_millis(1))
|
||||
.unwrap();
|
||||
assert_eq!(1, res.value);
|
||||
assert_eq!(TimeUnit::Second, res.unit);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ common-catalog = { path = "../common/catalog" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-grpc = { path = "../common/grpc" }
|
||||
common-grpc-expr = { path = "../common/grpc-expr" }
|
||||
common-procedure = { path = "../common/procedure" }
|
||||
common-query = { path = "../common/query" }
|
||||
common-recordbatch = { path = "../common/recordbatch" }
|
||||
common-runtime = { path = "../common/runtime" }
|
||||
@@ -52,6 +53,7 @@ storage = { path = "../storage" }
|
||||
store-api = { path = "../store-api" }
|
||||
substrait = { path = "../common/substrait" }
|
||||
table = { path = "../table" }
|
||||
table-procedure = { path = "../table-procedure" }
|
||||
tokio.workspace = true
|
||||
tokio-stream = { version = "0.1", features = ["net"] }
|
||||
tonic.workspace = true
|
||||
@@ -64,3 +66,4 @@ client = { path = "../client" }
|
||||
common-query = { path = "../common/query" }
|
||||
datafusion-common.workspace = true
|
||||
tempdir = "0.3"
|
||||
toml = "0.5"
|
||||
|
||||
@@ -17,7 +17,7 @@ use std::time::Duration;
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_telemetry::info;
|
||||
use meta_client::MetaClientOpts;
|
||||
use meta_client::MetaClientOptions;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servers::Mode;
|
||||
use storage::config::EngineConfig as StorageEngineConfig;
|
||||
@@ -144,38 +144,61 @@ impl From<&DatanodeOptions> for StorageEngineConfig {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct ProcedureConfig {
|
||||
/// Storage config for procedure manager.
|
||||
pub store: ObjectStoreConfig,
|
||||
}
|
||||
|
||||
impl Default for ProcedureConfig {
|
||||
fn default() -> ProcedureConfig {
|
||||
ProcedureConfig::from_file_path("/tmp/greptimedb/procedure/".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl ProcedureConfig {
|
||||
pub fn from_file_path(path: String) -> ProcedureConfig {
|
||||
ProcedureConfig {
|
||||
store: ObjectStoreConfig::File(FileConfig { data_dir: path }),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct DatanodeOptions {
|
||||
pub mode: Mode,
|
||||
pub enable_memory_catalog: bool,
|
||||
pub node_id: Option<u64>,
|
||||
pub rpc_addr: String,
|
||||
pub rpc_hostname: Option<String>,
|
||||
pub rpc_runtime_size: usize,
|
||||
pub mysql_addr: String,
|
||||
pub mysql_runtime_size: usize,
|
||||
pub meta_client_opts: Option<MetaClientOpts>,
|
||||
pub meta_client_options: Option<MetaClientOptions>,
|
||||
pub wal: WalConfig,
|
||||
pub storage: ObjectStoreConfig,
|
||||
pub enable_memory_catalog: bool,
|
||||
pub compaction: CompactionConfig,
|
||||
pub mode: Mode,
|
||||
pub procedure: Option<ProcedureConfig>,
|
||||
}
|
||||
|
||||
impl Default for DatanodeOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
mode: Mode::Standalone,
|
||||
enable_memory_catalog: false,
|
||||
node_id: None,
|
||||
rpc_addr: "127.0.0.1:3001".to_string(),
|
||||
rpc_hostname: None,
|
||||
rpc_runtime_size: 8,
|
||||
mysql_addr: "127.0.0.1:4406".to_string(),
|
||||
mysql_runtime_size: 2,
|
||||
meta_client_opts: None,
|
||||
meta_client_options: None,
|
||||
wal: WalConfig::default(),
|
||||
storage: ObjectStoreConfig::default(),
|
||||
enable_memory_catalog: false,
|
||||
compaction: CompactionConfig::default(),
|
||||
mode: Mode::Standalone,
|
||||
procedure: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -218,3 +241,15 @@ impl Datanode {
|
||||
self.instance.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_toml() {
|
||||
let opts = DatanodeOptions::default();
|
||||
let toml_string = toml::to_string(&opts).unwrap();
|
||||
let _parsed: DatanodeOptions = toml::from_str(&toml_string).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,9 @@
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use common_procedure::ProcedureId;
|
||||
use common_recordbatch::error::Error as RecordBatchError;
|
||||
use datafusion::parquet;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use storage::error::Error as StorageError;
|
||||
use table::error::Error as TableError;
|
||||
@@ -355,6 +357,69 @@ pub enum Error {
|
||||
#[snafu(backtrace)]
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to copy data from table: {}, source: {}", table_name, source))]
|
||||
CopyTable {
|
||||
table_name: String,
|
||||
#[snafu(backtrace)]
|
||||
source: TableError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to execute table scan, source: {}", source))]
|
||||
TableScanExec {
|
||||
#[snafu(backtrace)]
|
||||
source: common_query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to write parquet file, source: {}", source))]
|
||||
WriteParquet {
|
||||
source: parquet::errors::ParquetError,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to poll stream, source: {}", source))]
|
||||
PollStream {
|
||||
source: datatypes::arrow::error::ArrowError,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to write object into path: {}, source: {}", path, source))]
|
||||
WriteObject {
|
||||
path: String,
|
||||
backtrace: Backtrace,
|
||||
source: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Unrecognized table option: {}", source))]
|
||||
UnrecognizedTableOption {
|
||||
#[snafu(backtrace)]
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to recover procedure, source: {}", source))]
|
||||
RecoverProcedure {
|
||||
#[snafu(backtrace)]
|
||||
source: common_procedure::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to submit procedure, source: {}", source))]
|
||||
SubmitProcedure {
|
||||
#[snafu(backtrace)]
|
||||
source: common_procedure::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to wait procedure done, source: {}", source))]
|
||||
WaitProcedure {
|
||||
source: tokio::sync::watch::error::RecvError,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
// TODO(yingwen): Use procedure's error.
|
||||
#[snafu(display("Failed to execute procedure, procedure_id: {}", procedure_id))]
|
||||
ProcedureExec {
|
||||
procedure_id: ProcedureId,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -417,7 +482,9 @@ impl ErrorExt for Error {
|
||||
| MissingRequiredField { .. }
|
||||
| IncorrectInternalState { .. } => StatusCode::Internal,
|
||||
|
||||
InitBackend { .. } => StatusCode::StorageUnavailable,
|
||||
InitBackend { .. } | WriteParquet { .. } | PollStream { .. } | WriteObject { .. } => {
|
||||
StatusCode::StorageUnavailable
|
||||
}
|
||||
OpenLogStore { source } => source.status_code(),
|
||||
StartScriptManager { source } => source.status_code(),
|
||||
OpenStorageEngine { source } => source.status_code(),
|
||||
@@ -426,6 +493,13 @@ impl ErrorExt for Error {
|
||||
TableIdProviderNotFound { .. } => StatusCode::Unsupported,
|
||||
BumpTableId { source, .. } => source.status_code(),
|
||||
ColumnDefaultValue { source, .. } => source.status_code(),
|
||||
CopyTable { source, .. } => source.status_code(),
|
||||
TableScanExec { source, .. } => source.status_code(),
|
||||
UnrecognizedTableOption { .. } => StatusCode::InvalidArguments,
|
||||
RecoverProcedure { source, .. } | SubmitProcedure { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
WaitProcedure { .. } | ProcedureExec { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,25 +16,24 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use std::{fs, path};
|
||||
|
||||
use backon::ExponentialBackoff;
|
||||
use catalog::remote::MetaKvBackend;
|
||||
use catalog::{CatalogManager, CatalogManagerRef, RegisterTableRequest};
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
|
||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
|
||||
use common_procedure::local::{LocalManager, ManagerConfig};
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use common_telemetry::logging::info;
|
||||
use log_store::raft_engine::log_store::RaftEngineLogStore;
|
||||
use log_store::LogConfig;
|
||||
use meta_client::client::{MetaClient, MetaClientBuilder};
|
||||
use meta_client::MetaClientOpts;
|
||||
use meta_client::MetaClientOptions;
|
||||
use mito::config::EngineConfig as TableEngineConfig;
|
||||
use mito::engine::MitoEngine;
|
||||
use object_store::cache_policy::LruCachePolicy;
|
||||
use object_store::layers::{CacheLayer, LoggingLayer, MetricsLayer, RetryLayer, TracingLayer};
|
||||
use object_store::services::fs::Builder as FsBuilder;
|
||||
use object_store::services::oss::Builder as OSSBuilder;
|
||||
use object_store::services::s3::Builder as S3Builder;
|
||||
use object_store::{util, ObjectStore};
|
||||
use object_store::cache_policy::LruCacheLayer;
|
||||
use object_store::layers::{LoggingLayer, MetricsLayer, RetryLayer, TracingLayer};
|
||||
use object_store::services::{Fs as FsBuilder, Oss as OSSBuilder, S3 as S3Builder};
|
||||
use object_store::{util, ObjectStore, ObjectStoreBuilder};
|
||||
use query::query_engine::{QueryEngineFactory, QueryEngineRef};
|
||||
use servers::Mode;
|
||||
use snafu::prelude::*;
|
||||
@@ -48,11 +47,11 @@ use table::table::TableIdProviderRef;
|
||||
use table::Table;
|
||||
|
||||
use crate::datanode::{
|
||||
DatanodeOptions, ObjectStoreConfig, WalConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE,
|
||||
DatanodeOptions, ObjectStoreConfig, ProcedureConfig, WalConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE,
|
||||
};
|
||||
use crate::error::{
|
||||
self, CatalogSnafu, MetaClientInitSnafu, MissingMetasrvOptsSnafu, MissingNodeIdSnafu,
|
||||
NewCatalogSnafu, OpenLogStoreSnafu, Result,
|
||||
NewCatalogSnafu, OpenLogStoreSnafu, RecoverProcedureSnafu, Result,
|
||||
};
|
||||
use crate::heartbeat::HeartbeatTask;
|
||||
use crate::script::ScriptExecutor;
|
||||
@@ -86,7 +85,7 @@ impl Instance {
|
||||
Mode::Distributed => {
|
||||
let meta_client = new_metasrv_client(
|
||||
opts.node_id.context(MissingNodeIdSnafu)?,
|
||||
opts.meta_client_opts
|
||||
opts.meta_client_options
|
||||
.as_ref()
|
||||
.context(MissingMetasrvOptsSnafu)?,
|
||||
)
|
||||
@@ -176,12 +175,32 @@ impl Instance {
|
||||
catalog_manager.clone(),
|
||||
)),
|
||||
};
|
||||
|
||||
let procedure_manager = create_procedure_manager(&opts.procedure).await?;
|
||||
// Recover procedures.
|
||||
if let Some(procedure_manager) = &procedure_manager {
|
||||
table_engine.register_procedure_loaders(&**procedure_manager);
|
||||
table_procedure::register_procedure_loaders(
|
||||
catalog_manager.clone(),
|
||||
table_engine.clone(),
|
||||
table_engine.clone(),
|
||||
&**procedure_manager,
|
||||
);
|
||||
|
||||
procedure_manager
|
||||
.recover()
|
||||
.await
|
||||
.context(RecoverProcedureSnafu)?;
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
query_engine: query_engine.clone(),
|
||||
sql_handler: SqlHandler::new(
|
||||
table_engine,
|
||||
table_engine.clone(),
|
||||
catalog_manager.clone(),
|
||||
query_engine.clone(),
|
||||
table_engine,
|
||||
procedure_manager,
|
||||
),
|
||||
catalog_manager,
|
||||
script_executor,
|
||||
@@ -227,7 +246,7 @@ pub(crate) async fn new_object_store(store_config: &ObjectStoreConfig) -> Result
|
||||
|
||||
object_store.map(|object_store| {
|
||||
object_store
|
||||
.layer(RetryLayer::new(ExponentialBackoff::default().with_jitter()))
|
||||
.layer(RetryLayer::new().with_jitter())
|
||||
.layer(MetricsLayer)
|
||||
.layer(LoggingLayer::default())
|
||||
.layer(TracingLayer)
|
||||
@@ -258,7 +277,7 @@ pub(crate) async fn new_oss_object_store(store_config: &ObjectStoreConfig) -> Re
|
||||
config: store_config.clone(),
|
||||
})?;
|
||||
|
||||
create_object_store_with_cache(ObjectStore::new(accessor), store_config)
|
||||
create_object_store_with_cache(ObjectStore::new(accessor).finish(), store_config)
|
||||
}
|
||||
|
||||
fn create_object_store_with_cache(
|
||||
@@ -285,13 +304,13 @@ fn create_object_store_with_cache(
|
||||
|
||||
if let Some(path) = cache_path {
|
||||
let cache_store =
|
||||
ObjectStore::new(FsBuilder::default().root(path).build().with_context(|_| {
|
||||
error::InitBackendSnafu {
|
||||
FsBuilder::default()
|
||||
.root(path)
|
||||
.build()
|
||||
.with_context(|_| error::InitBackendSnafu {
|
||||
config: store_config.clone(),
|
||||
}
|
||||
})?);
|
||||
let policy = LruCachePolicy::new(cache_capacity.0 as usize);
|
||||
let cache_layer = CacheLayer::new(cache_store).with_policy(policy);
|
||||
})?;
|
||||
let cache_layer = LruCacheLayer::new(Arc::new(cache_store), cache_capacity.0 as usize);
|
||||
Ok(object_store.layer(cache_layer))
|
||||
} else {
|
||||
Ok(object_store)
|
||||
@@ -328,7 +347,7 @@ pub(crate) async fn new_s3_object_store(store_config: &ObjectStoreConfig) -> Res
|
||||
config: store_config.clone(),
|
||||
})?;
|
||||
|
||||
create_object_store_with_cache(ObjectStore::new(accessor), store_config)
|
||||
create_object_store_with_cache(ObjectStore::new(accessor).finish(), store_config)
|
||||
}
|
||||
|
||||
pub(crate) async fn new_fs_object_store(store_config: &ObjectStoreConfig) -> Result<ObjectStore> {
|
||||
@@ -351,11 +370,11 @@ pub(crate) async fn new_fs_object_store(store_config: &ObjectStoreConfig) -> Res
|
||||
config: store_config.clone(),
|
||||
})?;
|
||||
|
||||
Ok(ObjectStore::new(accessor))
|
||||
Ok(ObjectStore::new(accessor).finish())
|
||||
}
|
||||
|
||||
/// Create metasrv client instance and spawn heartbeat loop.
|
||||
async fn new_metasrv_client(node_id: u64, meta_config: &MetaClientOpts) -> Result<MetaClient> {
|
||||
async fn new_metasrv_client(node_id: u64, meta_config: &MetaClientOptions) -> Result<MetaClient> {
|
||||
let cluster_id = 0; // TODO(hl): read from config
|
||||
let member_id = node_id;
|
||||
|
||||
@@ -403,3 +422,21 @@ pub(crate) async fn create_log_store(wal_config: &WalConfig) -> Result<RaftEngin
|
||||
.context(OpenLogStoreSnafu)?;
|
||||
Ok(logstore)
|
||||
}
|
||||
|
||||
async fn create_procedure_manager(
|
||||
procedure_config: &Option<ProcedureConfig>,
|
||||
) -> Result<Option<ProcedureManagerRef>> {
|
||||
let Some(procedure_config) = procedure_config else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
info!(
|
||||
"Creating procedure manager with config: {:?}",
|
||||
procedure_config
|
||||
);
|
||||
|
||||
let object_store = new_object_store(&procedure_config.store).await?;
|
||||
let manager_config = ManagerConfig { object_store };
|
||||
|
||||
Ok(Some(Arc::new(LocalManager::new(manager_config))))
|
||||
}
|
||||
|
||||
@@ -30,8 +30,9 @@ use session::context::{QueryContext, QueryContextRef};
|
||||
use snafu::prelude::*;
|
||||
use sql::ast::ObjectName;
|
||||
use sql::statements::statement::Statement;
|
||||
use sql::statements::tql::Tql;
|
||||
use table::engine::TableReference;
|
||||
use table::requests::{CreateDatabaseRequest, DropTableRequest};
|
||||
use table::requests::{CopyTableRequest, CreateDatabaseRequest, DropTableRequest};
|
||||
|
||||
use crate::error::{self, BumpTableIdSnafu, ExecuteSqlSnafu, Result, TableIdProviderNotFoundSnafu};
|
||||
use crate::instance::Instance;
|
||||
@@ -57,10 +58,10 @@ impl Instance {
|
||||
.await
|
||||
.context(ExecuteSqlSnafu)
|
||||
}
|
||||
QueryStatement::Sql(Statement::Insert(i)) => {
|
||||
QueryStatement::Sql(Statement::Insert(insert)) => {
|
||||
let requests = self
|
||||
.sql_handler
|
||||
.insert_to_requests(self.catalog_manager.clone(), *i, query_ctx.clone())
|
||||
.insert_to_requests(self.catalog_manager.clone(), *insert, query_ctx.clone())
|
||||
.await?;
|
||||
|
||||
match requests {
|
||||
@@ -86,14 +87,14 @@ impl Instance {
|
||||
}
|
||||
}
|
||||
}
|
||||
QueryStatement::Sql(Statement::Delete(d)) => {
|
||||
let request = SqlRequest::Delete(*d);
|
||||
QueryStatement::Sql(Statement::Delete(delete)) => {
|
||||
let request = SqlRequest::Delete(*delete);
|
||||
self.sql_handler.execute(request, query_ctx).await
|
||||
}
|
||||
QueryStatement::Sql(Statement::CreateDatabase(c)) => {
|
||||
QueryStatement::Sql(Statement::CreateDatabase(create_database)) => {
|
||||
let request = CreateDatabaseRequest {
|
||||
db_name: c.name.to_string(),
|
||||
create_if_not_exists: c.if_not_exists,
|
||||
db_name: create_database.name.to_string(),
|
||||
create_if_not_exists: create_database.if_not_exists,
|
||||
};
|
||||
|
||||
info!("Creating a new database: {}", request.db_name);
|
||||
@@ -103,7 +104,7 @@ impl Instance {
|
||||
.await
|
||||
}
|
||||
|
||||
QueryStatement::Sql(Statement::CreateTable(c)) => {
|
||||
QueryStatement::Sql(Statement::CreateTable(create_table)) => {
|
||||
let table_id = self
|
||||
.table_id_provider
|
||||
.as_ref()
|
||||
@@ -111,15 +112,15 @@ impl Instance {
|
||||
.next_table_id()
|
||||
.await
|
||||
.context(BumpTableIdSnafu)?;
|
||||
let _engine_name = c.engine.clone();
|
||||
let _engine_name = create_table.engine.clone();
|
||||
// TODO(hl): Select table engine by engine_name
|
||||
|
||||
let name = c.name.clone();
|
||||
let name = create_table.name.clone();
|
||||
let (catalog, schema, table) = table_idents_to_full_name(&name, query_ctx.clone())?;
|
||||
let table_ref = TableReference::full(&catalog, &schema, &table);
|
||||
let request = self
|
||||
.sql_handler
|
||||
.create_to_request(table_id, c, &table_ref)?;
|
||||
let request =
|
||||
self.sql_handler
|
||||
.create_to_request(table_id, create_table, &table_ref)?;
|
||||
let table_id = request.id;
|
||||
info!("Creating table: {table_ref}, table id = {table_id}",);
|
||||
|
||||
@@ -148,27 +149,27 @@ impl Instance {
|
||||
.execute(SqlRequest::DropTable(req), query_ctx)
|
||||
.await
|
||||
}
|
||||
QueryStatement::Sql(Statement::ShowDatabases(stmt)) => {
|
||||
QueryStatement::Sql(Statement::ShowDatabases(show_databases)) => {
|
||||
self.sql_handler
|
||||
.execute(SqlRequest::ShowDatabases(stmt), query_ctx)
|
||||
.execute(SqlRequest::ShowDatabases(show_databases), query_ctx)
|
||||
.await
|
||||
}
|
||||
QueryStatement::Sql(Statement::ShowTables(stmt)) => {
|
||||
QueryStatement::Sql(Statement::ShowTables(show_tables)) => {
|
||||
self.sql_handler
|
||||
.execute(SqlRequest::ShowTables(stmt), query_ctx)
|
||||
.execute(SqlRequest::ShowTables(show_tables), query_ctx)
|
||||
.await
|
||||
}
|
||||
QueryStatement::Sql(Statement::Explain(stmt)) => {
|
||||
QueryStatement::Sql(Statement::Explain(explain)) => {
|
||||
self.sql_handler
|
||||
.execute(SqlRequest::Explain(Box::new(stmt)), query_ctx)
|
||||
.execute(SqlRequest::Explain(Box::new(explain)), query_ctx)
|
||||
.await
|
||||
}
|
||||
QueryStatement::Sql(Statement::DescribeTable(stmt)) => {
|
||||
QueryStatement::Sql(Statement::DescribeTable(describe_table)) => {
|
||||
self.sql_handler
|
||||
.execute(SqlRequest::DescribeTable(stmt), query_ctx)
|
||||
.execute(SqlRequest::DescribeTable(describe_table), query_ctx)
|
||||
.await
|
||||
}
|
||||
QueryStatement::Sql(Statement::ShowCreateTable(_stmt)) => {
|
||||
QueryStatement::Sql(Statement::ShowCreateTable(_show_create_table)) => {
|
||||
unimplemented!("SHOW CREATE TABLE is unimplemented yet");
|
||||
}
|
||||
QueryStatement::Sql(Statement::Use(ref schema)) => {
|
||||
@@ -182,6 +183,49 @@ impl Instance {
|
||||
|
||||
Ok(Output::RecordBatches(RecordBatches::empty()))
|
||||
}
|
||||
QueryStatement::Sql(Statement::Copy(copy_table)) => {
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(copy_table.table_name(), query_ctx.clone())?;
|
||||
let file_name = copy_table.file_name().to_string();
|
||||
|
||||
let req = CopyTableRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
file_name,
|
||||
};
|
||||
|
||||
self.sql_handler
|
||||
.execute(SqlRequest::CopyTable(req), query_ctx)
|
||||
.await
|
||||
}
|
||||
QueryStatement::Sql(Statement::Tql(tql)) => self.execute_tql(tql, query_ctx).await,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn execute_tql(&self, tql: Tql, query_ctx: QueryContextRef) -> Result<Output> {
|
||||
match tql {
|
||||
Tql::Eval(eval) => {
|
||||
let promql = PromQuery {
|
||||
start: eval.start,
|
||||
end: eval.end,
|
||||
step: eval.step,
|
||||
query: eval.query,
|
||||
};
|
||||
let stmt = QueryLanguageParser::parse_promql(&promql).context(ExecuteSqlSnafu)?;
|
||||
let logical_plan = self
|
||||
.query_engine
|
||||
.statement_to_plan(stmt, query_ctx)
|
||||
.context(ExecuteSqlSnafu)?;
|
||||
|
||||
self.query_engine
|
||||
.execute(&logical_plan)
|
||||
.await
|
||||
.context(ExecuteSqlSnafu)
|
||||
}
|
||||
Tql::Explain(_explain) => {
|
||||
todo!("waiting for promql-parser ast adding a explain node")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@ impl Instance {
|
||||
));
|
||||
|
||||
// By default, catalog manager and factory are created in standalone mode
|
||||
let (catalog_manager, factory) = match opts.mode {
|
||||
let (catalog_manager, factory, heartbeat_task) = match opts.mode {
|
||||
Mode::Standalone => {
|
||||
let catalog = Arc::new(
|
||||
catalog::local::LocalCatalogManager::try_new(table_engine.clone())
|
||||
@@ -68,7 +68,7 @@ impl Instance {
|
||||
.context(CatalogSnafu)?,
|
||||
);
|
||||
let factory = QueryEngineFactory::new(catalog.clone());
|
||||
(catalog as CatalogManagerRef, factory)
|
||||
(catalog as CatalogManagerRef, factory, None)
|
||||
}
|
||||
Mode::Distributed => {
|
||||
let catalog = Arc::new(catalog::remote::RemoteCatalogManager::new(
|
||||
@@ -79,31 +79,33 @@ impl Instance {
|
||||
}),
|
||||
));
|
||||
let factory = QueryEngineFactory::new(catalog.clone());
|
||||
(catalog as CatalogManagerRef, factory)
|
||||
let heartbeat_task = HeartbeatTask::new(
|
||||
opts.node_id.unwrap_or(42),
|
||||
opts.rpc_addr.clone(),
|
||||
None,
|
||||
meta_client.clone(),
|
||||
catalog.clone(),
|
||||
);
|
||||
(catalog as CatalogManagerRef, factory, Some(heartbeat_task))
|
||||
}
|
||||
};
|
||||
let query_engine = factory.query_engine();
|
||||
let script_executor =
|
||||
ScriptExecutor::new(catalog_manager.clone(), query_engine.clone()).await?;
|
||||
|
||||
let heartbeat_task = HeartbeatTask::new(
|
||||
opts.node_id.unwrap_or(42),
|
||||
opts.rpc_addr.clone(),
|
||||
None,
|
||||
meta_client.clone(),
|
||||
catalog_manager.clone(),
|
||||
);
|
||||
Ok(Self {
|
||||
query_engine: query_engine.clone(),
|
||||
sql_handler: SqlHandler::new(
|
||||
table_engine,
|
||||
table_engine.clone(),
|
||||
catalog_manager.clone(),
|
||||
query_engine.clone(),
|
||||
table_engine,
|
||||
None,
|
||||
),
|
||||
catalog_manager,
|
||||
script_executor,
|
||||
table_id_provider: Some(Arc::new(LocalTableIdProvider::default())),
|
||||
heartbeat_task: Some(heartbeat_task),
|
||||
heartbeat_task,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,6 +89,7 @@ impl Services {
|
||||
Ok(Self {
|
||||
grpc_server: GrpcServer::new(
|
||||
ServerGrpcQueryHandlerAdaptor::arc(instance),
|
||||
None,
|
||||
grpc_runtime,
|
||||
),
|
||||
mysql_server,
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use catalog::CatalogManagerRef;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use common_query::Output;
|
||||
use common_telemetry::error;
|
||||
use query::query_engine::QueryEngineRef;
|
||||
@@ -23,7 +24,7 @@ use sql::statements::delete::Delete;
|
||||
use sql::statements::describe::DescribeTable;
|
||||
use sql::statements::explain::Explain;
|
||||
use sql::statements::show::{ShowDatabases, ShowTables};
|
||||
use table::engine::{EngineContext, TableEngineRef, TableReference};
|
||||
use table::engine::{EngineContext, TableEngineProcedureRef, TableEngineRef, TableReference};
|
||||
use table::requests::*;
|
||||
use table::TableRef;
|
||||
|
||||
@@ -31,6 +32,7 @@ use crate::error::{self, ExecuteSqlSnafu, GetTableSnafu, Result, TableNotFoundSn
|
||||
use crate::instance::sql::table_idents_to_full_name;
|
||||
|
||||
mod alter;
|
||||
mod copy_table;
|
||||
mod create;
|
||||
mod delete;
|
||||
mod drop_table;
|
||||
@@ -48,6 +50,7 @@ pub enum SqlRequest {
|
||||
DescribeTable(DescribeTable),
|
||||
Explain(Box<Explain>),
|
||||
Delete(Delete),
|
||||
CopyTable(CopyTableRequest),
|
||||
}
|
||||
|
||||
// Handler to execute SQL except query
|
||||
@@ -55,6 +58,8 @@ pub struct SqlHandler {
|
||||
table_engine: TableEngineRef,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
query_engine: QueryEngineRef,
|
||||
engine_procedure: TableEngineProcedureRef,
|
||||
procedure_manager: Option<ProcedureManagerRef>,
|
||||
}
|
||||
|
||||
impl SqlHandler {
|
||||
@@ -62,11 +67,15 @@ impl SqlHandler {
|
||||
table_engine: TableEngineRef,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
query_engine: QueryEngineRef,
|
||||
engine_procedure: TableEngineProcedureRef,
|
||||
procedure_manager: Option<ProcedureManagerRef>,
|
||||
) -> Self {
|
||||
Self {
|
||||
table_engine,
|
||||
catalog_manager,
|
||||
query_engine,
|
||||
engine_procedure,
|
||||
procedure_manager,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -81,31 +90,30 @@ impl SqlHandler {
|
||||
SqlRequest::CreateDatabase(req) => self.create_database(req, query_ctx.clone()).await,
|
||||
SqlRequest::Alter(req) => self.alter(req).await,
|
||||
SqlRequest::DropTable(req) => self.drop_table(req).await,
|
||||
SqlRequest::Delete(stmt) => self.delete(query_ctx.clone(), stmt).await,
|
||||
SqlRequest::ShowDatabases(stmt) => {
|
||||
show_databases(stmt, self.catalog_manager.clone()).context(ExecuteSqlSnafu)
|
||||
SqlRequest::Delete(req) => self.delete(query_ctx.clone(), req).await,
|
||||
SqlRequest::CopyTable(req) => self.copy_table(req).await,
|
||||
SqlRequest::ShowDatabases(req) => {
|
||||
show_databases(req, self.catalog_manager.clone()).context(ExecuteSqlSnafu)
|
||||
}
|
||||
SqlRequest::ShowTables(stmt) => {
|
||||
show_tables(stmt, self.catalog_manager.clone(), query_ctx.clone())
|
||||
SqlRequest::ShowTables(req) => {
|
||||
show_tables(req, self.catalog_manager.clone(), query_ctx.clone())
|
||||
.context(ExecuteSqlSnafu)
|
||||
}
|
||||
SqlRequest::DescribeTable(stmt) => {
|
||||
SqlRequest::DescribeTable(req) => {
|
||||
let (catalog, schema, table) =
|
||||
table_idents_to_full_name(stmt.name(), query_ctx.clone())?;
|
||||
table_idents_to_full_name(req.name(), query_ctx.clone())?;
|
||||
let table = self
|
||||
.catalog_manager
|
||||
.table(&catalog, &schema, &table)
|
||||
.context(error::CatalogSnafu)?
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
table_name: stmt.name().to_string(),
|
||||
table_name: req.name().to_string(),
|
||||
})?;
|
||||
describe_table(table).context(ExecuteSqlSnafu)
|
||||
}
|
||||
SqlRequest::Explain(stmt) => {
|
||||
explain(stmt, self.query_engine.clone(), query_ctx.clone())
|
||||
.await
|
||||
.context(ExecuteSqlSnafu)
|
||||
}
|
||||
SqlRequest::Explain(req) => explain(req, self.query_engine.clone(), query_ctx.clone())
|
||||
.await
|
||||
.context(ExecuteSqlSnafu),
|
||||
};
|
||||
if let Err(e) = &result {
|
||||
error!(e; "{query_ctx}");
|
||||
@@ -146,8 +154,8 @@ mod tests {
|
||||
use log_store::NoopLogStore;
|
||||
use mito::config::EngineConfig as TableEngineConfig;
|
||||
use mito::engine::MitoEngine;
|
||||
use object_store::services::fs::Builder;
|
||||
use object_store::ObjectStore;
|
||||
use object_store::services::Fs as Builder;
|
||||
use object_store::{ObjectStore, ObjectStoreBuilder};
|
||||
use query::parser::{QueryLanguageParser, QueryStatement};
|
||||
use query::QueryEngineFactory;
|
||||
use session::context::QueryContext;
|
||||
@@ -212,7 +220,7 @@ mod tests {
|
||||
let dir = TempDir::new("setup_test_engine_and_table").unwrap();
|
||||
let store_dir = dir.path().to_string_lossy();
|
||||
let accessor = Builder::default().root(&store_dir).build().unwrap();
|
||||
let object_store = ObjectStore::new(accessor);
|
||||
let object_store = ObjectStore::new(accessor).finish();
|
||||
let compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
|
||||
let sql = r#"insert into demo(host, cpu, memory, ts) values
|
||||
('host1', 66.6, 1024, 1655276557000),
|
||||
@@ -249,7 +257,13 @@ mod tests {
|
||||
|
||||
let factory = QueryEngineFactory::new(catalog_list.clone());
|
||||
let query_engine = factory.query_engine();
|
||||
let sql_handler = SqlHandler::new(table_engine, catalog_list.clone(), query_engine.clone());
|
||||
let sql_handler = SqlHandler::new(
|
||||
table_engine.clone(),
|
||||
catalog_list.clone(),
|
||||
query_engine.clone(),
|
||||
table_engine,
|
||||
None,
|
||||
);
|
||||
|
||||
let stmt = match QueryLanguageParser::parse_sql(sql).unwrap() {
|
||||
QueryStatement::Sql(Statement::Insert(i)) => i,
|
||||
|
||||
147
src/datanode/src/sql/copy_table.rs
Normal file
147
src/datanode/src/sql/copy_table.rs
Normal file
@@ -0,0 +1,147 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::pin::Pin;
|
||||
|
||||
use common_query::physical_plan::SessionContext;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::adapter::DfRecordBatchStreamAdapter;
|
||||
use datafusion::parquet::arrow::ArrowWriter;
|
||||
use datafusion::parquet::basic::{Compression, Encoding};
|
||||
use datafusion::parquet::file::properties::WriterProperties;
|
||||
use datafusion::physical_plan::RecordBatchStream;
|
||||
use futures::TryStreamExt;
|
||||
use object_store::services::Fs as Builder;
|
||||
use object_store::{ObjectStore, ObjectStoreBuilder};
|
||||
use snafu::ResultExt;
|
||||
use table::engine::TableReference;
|
||||
use table::requests::CopyTableRequest;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::sql::SqlHandler;
|
||||
|
||||
impl SqlHandler {
|
||||
pub(crate) async fn copy_table(&self, req: CopyTableRequest) -> Result<Output> {
|
||||
let table_ref = TableReference {
|
||||
catalog: &req.catalog_name,
|
||||
schema: &req.schema_name,
|
||||
table: &req.table_name,
|
||||
};
|
||||
let table = self.get_table(&table_ref)?;
|
||||
|
||||
let stream = table
|
||||
.scan(None, &[], None)
|
||||
.await
|
||||
.with_context(|_| error::CopyTableSnafu {
|
||||
table_name: table_ref.to_string(),
|
||||
})?;
|
||||
|
||||
let stream = stream
|
||||
.execute(0, SessionContext::default().task_ctx())
|
||||
.context(error::TableScanExecSnafu)?;
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(stream));
|
||||
|
||||
let accessor = Builder::default().build().unwrap();
|
||||
let object_store = ObjectStore::new(accessor).finish();
|
||||
|
||||
let mut parquet_writer = ParquetWriter::new(req.file_name, stream, object_store);
|
||||
// TODO(jiachun):
|
||||
// For now, COPY is implemented synchronously.
|
||||
// When copying large table, it will be blocked for a long time.
|
||||
// Maybe we should make "copy" runs in background?
|
||||
// Like PG: https://www.postgresql.org/docs/current/sql-copy.html
|
||||
let rows = parquet_writer.flush().await?;
|
||||
|
||||
Ok(Output::AffectedRows(rows))
|
||||
}
|
||||
}
|
||||
|
||||
type DfRecordBatchStream = Pin<Box<DfRecordBatchStreamAdapter>>;
|
||||
|
||||
struct ParquetWriter {
|
||||
file_name: String,
|
||||
stream: DfRecordBatchStream,
|
||||
object_store: ObjectStore,
|
||||
max_row_group_size: usize,
|
||||
max_rows_in_segment: usize,
|
||||
}
|
||||
|
||||
impl ParquetWriter {
|
||||
pub fn new(file_name: String, stream: DfRecordBatchStream, object_store: ObjectStore) -> Self {
|
||||
Self {
|
||||
file_name,
|
||||
stream,
|
||||
object_store,
|
||||
// TODO(jiachun): make these configurable: WITH (max_row_group_size=xxx, max_rows_in_segment=xxx)
|
||||
max_row_group_size: 4096,
|
||||
max_rows_in_segment: 5000000, // default 5M rows per segment
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn flush(&mut self) -> Result<usize> {
|
||||
let schema = self.stream.as_ref().schema();
|
||||
let writer_props = WriterProperties::builder()
|
||||
.set_compression(Compression::ZSTD)
|
||||
.set_encoding(Encoding::PLAIN)
|
||||
.set_max_row_group_size(self.max_row_group_size)
|
||||
.build();
|
||||
let mut total_rows = 0;
|
||||
loop {
|
||||
let mut buf = vec![];
|
||||
let mut arrow_writer =
|
||||
ArrowWriter::try_new(&mut buf, schema.clone(), Some(writer_props.clone()))
|
||||
.context(error::WriteParquetSnafu)?;
|
||||
|
||||
let mut rows = 0;
|
||||
let mut end_loop = true;
|
||||
// TODO(hl & jiachun): Since OpenDAL's writer is async and ArrowWriter requires a `std::io::Write`,
|
||||
// here we use a Vec<u8> to buffer all parquet bytes in memory and write to object store
|
||||
// at a time. Maybe we should find a better way to bridge ArrowWriter and OpenDAL's object.
|
||||
while let Some(batch) = self
|
||||
.stream
|
||||
.try_next()
|
||||
.await
|
||||
.context(error::PollStreamSnafu)?
|
||||
{
|
||||
arrow_writer
|
||||
.write(&batch)
|
||||
.context(error::WriteParquetSnafu)?;
|
||||
rows += batch.num_rows();
|
||||
if rows >= self.max_rows_in_segment {
|
||||
end_loop = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let start_row_num = total_rows + 1;
|
||||
total_rows += rows;
|
||||
arrow_writer.close().context(error::WriteParquetSnafu)?;
|
||||
|
||||
// if rows == 0, we just end up with an empty file.
|
||||
//
|
||||
// file_name like:
|
||||
// "file_name_1_1000000" (row num: 1 ~ 1000000),
|
||||
// "file_name_1000001_xxx" (row num: 1000001 ~ xxx)
|
||||
let file_name = format!("{}_{}_{}", self.file_name, start_row_num, total_rows);
|
||||
let object = self.object_store.object(&file_name);
|
||||
object.write(buf).await.context(error::WriteObjectSnafu {
|
||||
path: object.path(),
|
||||
})?;
|
||||
|
||||
if end_loop {
|
||||
return Ok(total_rows);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -15,24 +15,26 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use catalog::{RegisterSchemaRequest, RegisterTableRequest};
|
||||
use common_procedure::{ProcedureManagerRef, ProcedureState, ProcedureWithId};
|
||||
use common_query::Output;
|
||||
use common_telemetry::tracing::info;
|
||||
use common_telemetry::tracing::log::error;
|
||||
use common_telemetry::tracing::{error, info};
|
||||
use datatypes::schema::RawSchema;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use sql::ast::{ColumnOption, TableConstraint};
|
||||
use sql::ast::{ColumnOption, SqlOption, TableConstraint, Value};
|
||||
use sql::statements::column_def_to_schema;
|
||||
use sql::statements::create::CreateTable;
|
||||
use store_api::storage::consts::TIME_INDEX_NAME;
|
||||
use table::engine::{EngineContext, TableReference};
|
||||
use table::metadata::TableId;
|
||||
use table::requests::*;
|
||||
use table_procedure::CreateTableProcedure;
|
||||
|
||||
use crate::error::{
|
||||
self, CatalogNotFoundSnafu, CatalogSnafu, ConstraintNotSupportedSnafu, CreateTableSnafu,
|
||||
IllegalPrimaryKeysDefSnafu, InsertSystemCatalogSnafu, KeyColumnNotFoundSnafu,
|
||||
RegisterSchemaSnafu, Result, SchemaExistsSnafu, SchemaNotFoundSnafu,
|
||||
ProcedureExecSnafu, RegisterSchemaSnafu, Result, SchemaExistsSnafu, SchemaNotFoundSnafu,
|
||||
SubmitProcedureSnafu, UnrecognizedTableOptionSnafu, WaitProcedureSnafu,
|
||||
};
|
||||
use crate::sql::SqlHandler;
|
||||
|
||||
@@ -71,6 +73,10 @@ impl SqlHandler {
|
||||
}
|
||||
|
||||
pub(crate) async fn create_table(&self, req: CreateTableRequest) -> Result<Output> {
|
||||
if let Some(procedure_manager) = &self.procedure_manager {
|
||||
return self.create_table_by_procedure(procedure_manager, req).await;
|
||||
}
|
||||
|
||||
let ctx = EngineContext {};
|
||||
// first check if catalog and schema exist
|
||||
let catalog = self
|
||||
@@ -126,6 +132,43 @@ impl SqlHandler {
|
||||
Ok(Output::AffectedRows(0))
|
||||
}
|
||||
|
||||
pub(crate) async fn create_table_by_procedure(
|
||||
&self,
|
||||
procedure_manager: &ProcedureManagerRef,
|
||||
req: CreateTableRequest,
|
||||
) -> Result<Output> {
|
||||
let table_name = req.table_name.clone();
|
||||
let procedure = CreateTableProcedure::new(
|
||||
req,
|
||||
self.catalog_manager.clone(),
|
||||
self.table_engine.clone(),
|
||||
self.engine_procedure.clone(),
|
||||
);
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
let procedure_id = procedure_with_id.id;
|
||||
|
||||
info!("Create table {} by procedure {}", table_name, procedure_id);
|
||||
|
||||
let mut watcher = procedure_manager
|
||||
.submit(procedure_with_id)
|
||||
.await
|
||||
.context(SubmitProcedureSnafu)?;
|
||||
|
||||
// TODO(yingwen): Wrap this into a function and add error to ProcedureState::Failed.
|
||||
loop {
|
||||
watcher.changed().await.context(WaitProcedureSnafu)?;
|
||||
match *watcher.borrow() {
|
||||
ProcedureState::Running => (),
|
||||
ProcedureState::Done => {
|
||||
return Ok(Output::AffectedRows(0));
|
||||
}
|
||||
ProcedureState::Failed => {
|
||||
return ProcedureExecSnafu { procedure_id }.fail();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts [CreateTable] to [SqlRequest::CreateTable].
|
||||
pub(crate) fn create_to_request(
|
||||
&self,
|
||||
@@ -238,6 +281,7 @@ impl SqlHandler {
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
let table_options = stmt_options_to_table_options(&stmt.options)?;
|
||||
let schema = RawSchema::new(columns_schemas);
|
||||
let request = CreateTableRequest {
|
||||
id: table_id,
|
||||
@@ -249,16 +293,32 @@ impl SqlHandler {
|
||||
region_numbers: vec![0],
|
||||
primary_key_indices: primary_keys,
|
||||
create_if_not_exists: stmt.if_not_exists,
|
||||
table_options: HashMap::new(),
|
||||
table_options,
|
||||
};
|
||||
Ok(request)
|
||||
}
|
||||
}
|
||||
|
||||
fn stmt_options_to_table_options(opts: &[SqlOption]) -> error::Result<TableOptions> {
|
||||
let mut map = HashMap::with_capacity(opts.len());
|
||||
for SqlOption { name, value } in opts {
|
||||
let value_str = match value {
|
||||
Value::SingleQuotedString(s) => s.clone(),
|
||||
Value::DoubleQuotedString(s) => s.clone(),
|
||||
_ => value.to_string(),
|
||||
};
|
||||
map.insert(name.value.clone(), value_str);
|
||||
}
|
||||
let options = TableOptions::try_from(&map).context(UnrecognizedTableOptionSnafu)?;
|
||||
Ok(options)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::Schema;
|
||||
use sql::dialect::GenericDialect;
|
||||
@@ -280,6 +340,23 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_table_with_options() {
|
||||
let sql = r#"CREATE TABLE demo_table (timestamp BIGINT TIME INDEX, value DOUBLE, host STRING PRIMARY KEY) engine=mito with(regions=1, ttl='7days',write_buffer_size='32MB',some='other');"#;
|
||||
let parsed_stmt = sql_to_statement(sql);
|
||||
let handler = create_mock_sql_handler().await;
|
||||
let c = handler
|
||||
.create_to_request(42, parsed_stmt, &TableReference::bare("demo_table"))
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(Some(Duration::from_secs(604800)), c.table_options.ttl);
|
||||
assert_eq!(
|
||||
Some(ReadableSize::mb(32)),
|
||||
c.table_options.write_buffer_size
|
||||
);
|
||||
assert_eq!("other", c.table_options.extra_options.get("some").unwrap());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
pub async fn test_create_with_inline_primary_key() {
|
||||
let handler = create_mock_sql_handler().await;
|
||||
|
||||
@@ -53,6 +53,75 @@ async fn create_insert_query_assert(
|
||||
check_unordered_output_stream(query_output, expected).await;
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn create_insert_tql_assert(create: &str, insert: &str, tql: &str, expected: &str) {
|
||||
let instance = setup_test_instance("test_execute_insert").await;
|
||||
let query_ctx = QueryContext::arc();
|
||||
instance
|
||||
.inner()
|
||||
.execute_sql(create, query_ctx.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
instance
|
||||
.inner()
|
||||
.execute_sql(insert, query_ctx.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let query_output = instance
|
||||
.inner()
|
||||
.execute_sql(tql, query_ctx.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
let expected = String::from(expected);
|
||||
check_unordered_output_stream(query_output, expected).await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn sql_insert_tql_query_ceil() {
|
||||
create_insert_tql_assert(
|
||||
r#"create table http_requests_total (
|
||||
host string,
|
||||
cpu double,
|
||||
memory double,
|
||||
ts timestamp TIME INDEX,
|
||||
PRIMARY KEY (host),
|
||||
);"#,
|
||||
r#"insert into http_requests_total(host, cpu, memory, ts) values
|
||||
('host1', 66.6, 1024, 0),
|
||||
('host1', 66.6, 2048, 2000),
|
||||
('host1', 66.6, 4096, 5000),
|
||||
('host1', 43.1, 8192, 7000),
|
||||
('host1', 19.1, 10240, 9000),
|
||||
('host1', 99.1, 20480, 10000),
|
||||
('host1', 999.9, 40960, 21000),
|
||||
('host1', 31.9, 8192, 22000),
|
||||
('host1', 95.4, 333.3, 32000),
|
||||
('host1', 12423.1, 1333.3, 49000),
|
||||
('host1', 0, 2333.3, 80000),
|
||||
('host1', 49, 3333.3, 99000);
|
||||
"#,
|
||||
"TQL EVAL (0,100,10) ceil(http_requests_total{host=\"host1\"})",
|
||||
"+---------------------+-----------+--------------+-------+\
|
||||
\n| ts | ceil(cpu) | ceil(memory) | host |\
|
||||
\n+---------------------+-----------+--------------+-------+\
|
||||
\n| 1970-01-01T00:00:00 | 67 | 1024 | host1 |\
|
||||
\n| 1970-01-01T00:00:10 | 100 | 20480 | host1 |\
|
||||
\n| 1970-01-01T00:00:20 | 100 | 20480 | host1 |\
|
||||
\n| 1970-01-01T00:00:30 | 32 | 8192 | host1 |\
|
||||
\n| 1970-01-01T00:00:40 | 96 | 334 | host1 |\
|
||||
\n| 1970-01-01T00:00:50 | 12424 | 1334 | host1 |\
|
||||
\n| 1970-01-01T00:01:00 | 12424 | 1334 | host1 |\
|
||||
\n| 1970-01-01T00:01:10 | 12424 | 1334 | host1 |\
|
||||
\n| 1970-01-01T00:01:20 | 0 | 2334 | host1 |\
|
||||
\n| 1970-01-01T00:01:30 | 0 | 2334 | host1 |\
|
||||
\n| 1970-01-01T00:01:40 | 49 | 3334 | host1 |\
|
||||
\n+---------------------+-----------+--------------+-------+",
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn sql_insert_promql_query_ceil() {
|
||||
create_insert_query_assert(
|
||||
@@ -82,16 +151,16 @@ async fn sql_insert_promql_query_ceil() {
|
||||
UNIX_EPOCH.checked_add(Duration::from_secs(100)).unwrap(),
|
||||
Duration::from_secs(5),
|
||||
Duration::from_secs(1),
|
||||
"+---------------------+-------------------------------+----------------------------------+\
|
||||
\n| ts | ceil(http_requests_total.cpu) | ceil(http_requests_total.memory) |\
|
||||
\n+---------------------+-------------------------------+----------------------------------+\
|
||||
\n| 1970-01-01T00:00:00 | 67 | 1024 |\
|
||||
\n| 1970-01-01T00:00:05 | 67 | 4096 |\
|
||||
\n| 1970-01-01T00:00:10 | 100 | 20480 |\
|
||||
\n| 1970-01-01T00:00:50 | 12424 | 1334 |\
|
||||
\n| 1970-01-01T00:01:20 | 0 | 2334 |\
|
||||
\n| 1970-01-01T00:01:40 | 49 | 3334 |\
|
||||
\n+---------------------+-------------------------------+----------------------------------+",
|
||||
"+---------------------+-----------+--------------+-------+\
|
||||
\n| ts | ceil(cpu) | ceil(memory) | host |\
|
||||
\n+---------------------+-----------+--------------+-------+\
|
||||
\n| 1970-01-01T00:00:00 | 67 | 1024 | host1 |\
|
||||
\n| 1970-01-01T00:00:05 | 67 | 4096 | host1 |\
|
||||
\n| 1970-01-01T00:00:10 | 100 | 20480 | host1 |\
|
||||
\n| 1970-01-01T00:00:50 | 12424 | 1334 | host1 |\
|
||||
\n| 1970-01-01T00:01:20 | 0 | 2334 | host1 |\
|
||||
\n| 1970-01-01T00:01:40 | 49 | 3334 | host1 |\
|
||||
\n+---------------------+-----------+--------------+-------+",
|
||||
)
|
||||
.await;
|
||||
}
|
||||
@@ -142,13 +211,12 @@ async fn aggregators_simple_sum() {
|
||||
unix_epoch_plus_100s(),
|
||||
Duration::from_secs(60),
|
||||
Duration::from_secs(0),
|
||||
"+------------+--------------------------+\
|
||||
\n| group | SUM(http_requests.value) |\
|
||||
\n+------------+--------------------------+\
|
||||
\n| | |\
|
||||
\n| canary | 700 |\
|
||||
\n| production | 300 |\
|
||||
\n+------------+--------------------------+",
|
||||
"+------------+---------------------+--------------------------+\
|
||||
\n| group | ts | SUM(http_requests.value) |\
|
||||
\n+------------+---------------------+--------------------------+\
|
||||
\n| production | 1970-01-01T00:00:00 | 300 |\
|
||||
\n| canary | 1970-01-01T00:00:00 | 700 |\
|
||||
\n+------------+---------------------+--------------------------+",
|
||||
)
|
||||
.await;
|
||||
}
|
||||
@@ -167,13 +235,12 @@ async fn aggregators_simple_avg() {
|
||||
unix_epoch_plus_100s(),
|
||||
Duration::from_secs(60),
|
||||
Duration::from_secs(0),
|
||||
"+------------+--------------------------+\
|
||||
\n| group | AVG(http_requests.value) |\
|
||||
\n+------------+--------------------------+\
|
||||
\n| | 0 |\
|
||||
\n| production | 150 |\
|
||||
\n| canary | 350 |\
|
||||
\n+------------+--------------------------+",
|
||||
"+------------+---------------------+--------------------------+\
|
||||
\n| group | ts | AVG(http_requests.value) |\
|
||||
\n+------------+---------------------+--------------------------+\
|
||||
\n| production | 1970-01-01T00:00:00 | 150 |\
|
||||
\n| canary | 1970-01-01T00:00:00 | 350 |\
|
||||
\n+------------+---------------------+--------------------------+",
|
||||
)
|
||||
.await;
|
||||
}
|
||||
@@ -192,13 +259,12 @@ async fn aggregators_simple_count() {
|
||||
unix_epoch_plus_100s(),
|
||||
Duration::from_secs(60),
|
||||
Duration::from_secs(0),
|
||||
"+------------+----------------------------+\
|
||||
\n| group | COUNT(http_requests.value) |\
|
||||
\n+------------+----------------------------+\
|
||||
\n| | 0 |\
|
||||
\n| canary | 2 |\
|
||||
\n| production | 2 |\
|
||||
\n+------------+----------------------------+",
|
||||
"+------------+---------------------+----------------------------+\
|
||||
\n| group | ts | COUNT(http_requests.value) |\
|
||||
\n+------------+---------------------+----------------------------+\
|
||||
\n| canary | 1970-01-01T00:00:00 | 2 |\
|
||||
\n| production | 1970-01-01T00:00:00 | 2 |\
|
||||
\n+------------+---------------------+----------------------------+",
|
||||
)
|
||||
.await;
|
||||
}
|
||||
@@ -217,13 +283,12 @@ async fn aggregators_simple_without() {
|
||||
unix_epoch_plus_100s(),
|
||||
Duration::from_secs(60),
|
||||
Duration::from_secs(0),
|
||||
"+------------+------------+--------------------------+\
|
||||
\n| group | job | SUM(http_requests.value) |\
|
||||
\n+------------+------------+--------------------------+\
|
||||
\n| | | |\
|
||||
\n| canary | api-server | 700 |\
|
||||
\n| production | api-server | 300 |\
|
||||
\n+------------+------------+--------------------------+",
|
||||
"+------------+------------+---------------------+--------------------------+\
|
||||
\n| group | job | ts | SUM(http_requests.value) |\
|
||||
\n+------------+------------+---------------------+--------------------------+\
|
||||
\n| production | api-server | 1970-01-01T00:00:00 | 300 |\
|
||||
\n| canary | api-server | 1970-01-01T00:00:00 | 700 |\
|
||||
\n+------------+------------+---------------------+--------------------------+",
|
||||
)
|
||||
.await;
|
||||
}
|
||||
@@ -241,11 +306,11 @@ async fn aggregators_empty_by() {
|
||||
unix_epoch_plus_100s(),
|
||||
Duration::from_secs(60),
|
||||
Duration::from_secs(0),
|
||||
"+--------------------------+\
|
||||
\n| SUM(http_requests.value) |\
|
||||
\n+--------------------------+\
|
||||
\n| 1000 |\
|
||||
\n+--------------------------+",
|
||||
"+---------------------+--------------------------+\
|
||||
\n| ts | SUM(http_requests.value) |\
|
||||
\n+---------------------+--------------------------+\
|
||||
\n| 1970-01-01T00:00:00 | 1000 |\
|
||||
\n+---------------------+--------------------------+",
|
||||
)
|
||||
.await;
|
||||
}
|
||||
@@ -263,11 +328,11 @@ async fn aggregators_no_by_without() {
|
||||
unix_epoch_plus_100s(),
|
||||
Duration::from_secs(60),
|
||||
Duration::from_secs(0),
|
||||
"+--------------------------+\
|
||||
\n| SUM(http_requests.value) |\
|
||||
\n+--------------------------+\
|
||||
\n| 1000 |\
|
||||
\n+--------------------------+",
|
||||
"+---------------------+--------------------------+\
|
||||
\n| ts | SUM(http_requests.value) |\
|
||||
\n+---------------------+--------------------------+\
|
||||
\n| 1970-01-01T00:00:00 | 1000 |\
|
||||
\n+---------------------+--------------------------+",
|
||||
)
|
||||
.await;
|
||||
}
|
||||
@@ -286,13 +351,12 @@ async fn aggregators_empty_without() {
|
||||
unix_epoch_plus_100s(),
|
||||
Duration::from_secs(60),
|
||||
Duration::from_secs(0),
|
||||
"+------------+----------+------------+--------------------------+\
|
||||
\n| group | instance | job | SUM(http_requests.value) |\
|
||||
\n+------------+----------+------------+--------------------------+\
|
||||
\n| | | | |\
|
||||
\n| production | 0 | api-server | 100 |\
|
||||
\n| production | 1 | api-server | 200 |\
|
||||
\n+------------+----------+------------+--------------------------+",
|
||||
"+------------+----------+------------+---------------------+--------------------------+\
|
||||
\n| group | instance | job | ts | SUM(http_requests.value) |\
|
||||
\n+------------+----------+------------+---------------------+--------------------------+\
|
||||
\n| production | 0 | api-server | 1970-01-01T00:00:00 | 100 |\
|
||||
\n| production | 1 | api-server | 1970-01-01T00:00:00 | 200 |\
|
||||
\n+------------+----------+------------+---------------------+--------------------------+",
|
||||
)
|
||||
.await;
|
||||
}
|
||||
@@ -356,11 +420,12 @@ async fn stddev_by_label() {
|
||||
unix_epoch_plus_100s(),
|
||||
Duration::from_secs(60),
|
||||
Duration::from_secs(0),
|
||||
"+----------+-----------------------------+\
|
||||
\n| instance | STDDEV(http_requests.value) |\
|
||||
\n+----------+-----------------------------+\
|
||||
\n| 0 | 258.19888974716116 |\
|
||||
\n+----------+-----------------------------+",
|
||||
"+----------+---------------------+-----------------------------+\
|
||||
\n| instance | ts | STDDEV(http_requests.value) |\
|
||||
\n+----------+---------------------+-----------------------------+\
|
||||
\n| 0 | 1970-01-01T00:00:00 | 258.19888974716116 |\
|
||||
\n| 1 | 1970-01-01T00:00:00 | 258.19888974716116 |\
|
||||
\n+----------+---------------------+-----------------------------+",
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
|
||||
@@ -26,7 +25,7 @@ use query::QueryEngineFactory;
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::requests::CreateTableRequest;
|
||||
use table::requests::{CreateTableRequest, TableOptions};
|
||||
use tempdir::TempDir;
|
||||
|
||||
use crate::datanode::{DatanodeOptions, FileConfig, ObjectStoreConfig, WalConfig};
|
||||
@@ -107,7 +106,7 @@ pub(crate) async fn create_test_table(
|
||||
schema: RawSchema::new(column_schemas),
|
||||
create_if_not_exists: true,
|
||||
primary_key_indices: vec![0], // "host" is in primary keys
|
||||
table_options: HashMap::new(),
|
||||
table_options: TableOptions::default(),
|
||||
region_numbers: vec![0],
|
||||
},
|
||||
)
|
||||
@@ -141,7 +140,13 @@ pub async fn create_mock_sql_handler() -> SqlHandler {
|
||||
let catalog_list = catalog::local::new_memory_catalog_list().unwrap();
|
||||
let factory = QueryEngineFactory::new(catalog_list);
|
||||
|
||||
SqlHandler::new(mock_engine, catalog_manager, factory.query_engine())
|
||||
SqlHandler::new(
|
||||
mock_engine.clone(),
|
||||
catalog_manager,
|
||||
factory.query_engine(),
|
||||
mock_engine,
|
||||
None,
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) async fn setup_test_instance(test_name: &str) -> MockInstance {
|
||||
|
||||
@@ -22,10 +22,10 @@ use serde::{Deserialize, Serialize};
|
||||
use crate::error::{self, Error, Result};
|
||||
use crate::type_id::LogicalTypeId;
|
||||
use crate::types::{
|
||||
BinaryType, BooleanType, DateTimeType, DateType, Float32Type, Float64Type, Int16Type,
|
||||
Int32Type, Int64Type, Int8Type, ListType, NullType, StringType, TimestampMicrosecondType,
|
||||
TimestampMillisecondType, TimestampNanosecondType, TimestampSecondType, TimestampType,
|
||||
UInt16Type, UInt32Type, UInt64Type, UInt8Type,
|
||||
BinaryType, BooleanType, DateTimeType, DateType, DictionaryType, Float32Type, Float64Type,
|
||||
Int16Type, Int32Type, Int64Type, Int8Type, ListType, NullType, StringType,
|
||||
TimestampMicrosecondType, TimestampMillisecondType, TimestampNanosecondType,
|
||||
TimestampSecondType, TimestampType, UInt16Type, UInt32Type, UInt64Type, UInt8Type,
|
||||
};
|
||||
use crate::value::Value;
|
||||
use crate::vectors::MutableVector;
|
||||
@@ -59,6 +59,7 @@ pub enum ConcreteDataType {
|
||||
|
||||
// Compound types:
|
||||
List(ListType),
|
||||
Dictionary(DictionaryType),
|
||||
}
|
||||
|
||||
// TODO(yingwen): Refactor these `is_xxx()` methods, such as adding a `properties()` method
|
||||
@@ -169,6 +170,11 @@ impl TryFrom<&ArrowDataType> for ConcreteDataType {
|
||||
ArrowDataType::List(field) => Self::List(ListType::new(
|
||||
ConcreteDataType::from_arrow_type(field.data_type()),
|
||||
)),
|
||||
ArrowDataType::Dictionary(key_type, value_type) => {
|
||||
let key_type = ConcreteDataType::from_arrow_type(key_type);
|
||||
let value_type = ConcreteDataType::from_arrow_type(value_type);
|
||||
Self::Dictionary(DictionaryType::new(key_type, value_type))
|
||||
}
|
||||
_ => {
|
||||
return error::UnsupportedArrowTypeSnafu {
|
||||
arrow_type: dt.clone(),
|
||||
@@ -243,6 +249,13 @@ impl ConcreteDataType {
|
||||
pub fn list_datatype(item_type: ConcreteDataType) -> ConcreteDataType {
|
||||
ConcreteDataType::List(ListType::new(item_type))
|
||||
}
|
||||
|
||||
pub fn dictionary_datatype(
|
||||
key_type: ConcreteDataType,
|
||||
value_type: ConcreteDataType,
|
||||
) -> ConcreteDataType {
|
||||
ConcreteDataType::Dictionary(DictionaryType::new(key_type, value_type))
|
||||
}
|
||||
}
|
||||
|
||||
/// Data type abstraction.
|
||||
|
||||
@@ -48,6 +48,7 @@ pub enum LogicalTypeId {
|
||||
TimestampNanosecond,
|
||||
|
||||
List,
|
||||
Dictionary,
|
||||
}
|
||||
|
||||
impl LogicalTypeId {
|
||||
@@ -88,6 +89,10 @@ impl LogicalTypeId {
|
||||
LogicalTypeId::List => {
|
||||
ConcreteDataType::list_datatype(ConcreteDataType::null_datatype())
|
||||
}
|
||||
LogicalTypeId::Dictionary => ConcreteDataType::dictionary_datatype(
|
||||
ConcreteDataType::null_datatype(),
|
||||
ConcreteDataType::null_datatype(),
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,17 +16,18 @@ mod binary_type;
|
||||
mod boolean_type;
|
||||
mod date_type;
|
||||
mod datetime_type;
|
||||
mod dictionary_type;
|
||||
mod list_type;
|
||||
mod null_type;
|
||||
mod primitive_type;
|
||||
mod string_type;
|
||||
|
||||
mod timestamp_type;
|
||||
|
||||
pub use binary_type::BinaryType;
|
||||
pub use boolean_type::BooleanType;
|
||||
pub use date_type::DateType;
|
||||
pub use datetime_type::DateTimeType;
|
||||
pub use dictionary_type::DictionaryType;
|
||||
pub use list_type::ListType;
|
||||
pub use null_type::NullType;
|
||||
pub use primitive_type::{
|
||||
|
||||
91
src/datatypes/src/types/dictionary_type.rs
Normal file
91
src/datatypes/src/types/dictionary_type.rs
Normal file
@@ -0,0 +1,91 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use arrow::datatypes::DataType as ArrowDataType;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::data_type::{ConcreteDataType, DataType};
|
||||
use crate::type_id::LogicalTypeId;
|
||||
use crate::value::Value;
|
||||
use crate::vectors::MutableVector;
|
||||
|
||||
/// Used to represent the Dictionary datatype.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct DictionaryType {
|
||||
// Use Box to avoid recursive dependency, as enum ConcreteDataType depends on DictionaryType.
|
||||
/// The type of Dictionary key.
|
||||
key_type: Box<ConcreteDataType>,
|
||||
/// The type of Dictionary value.
|
||||
value_type: Box<ConcreteDataType>,
|
||||
}
|
||||
|
||||
impl Default for DictionaryType {
|
||||
fn default() -> Self {
|
||||
DictionaryType::new(
|
||||
ConcreteDataType::null_datatype(),
|
||||
ConcreteDataType::null_datatype(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl DictionaryType {
|
||||
/// Create a new `DictionaryType` whose item's data type is `item_type`.
|
||||
pub fn new(key_type: ConcreteDataType, value_type: ConcreteDataType) -> Self {
|
||||
DictionaryType {
|
||||
key_type: Box::new(key_type),
|
||||
value_type: Box::new(value_type),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the key data type.
|
||||
#[inline]
|
||||
pub fn key_type(&self) -> &ConcreteDataType {
|
||||
&self.key_type
|
||||
}
|
||||
|
||||
/// Returns the value data type.
|
||||
#[inline]
|
||||
pub fn value_type(&self) -> &ConcreteDataType {
|
||||
&self.value_type
|
||||
}
|
||||
}
|
||||
|
||||
impl DataType for DictionaryType {
|
||||
fn name(&self) -> &str {
|
||||
"Dictionary"
|
||||
}
|
||||
|
||||
fn logical_type_id(&self) -> LogicalTypeId {
|
||||
LogicalTypeId::Dictionary
|
||||
}
|
||||
|
||||
fn default_value(&self) -> Value {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn as_arrow_type(&self) -> ArrowDataType {
|
||||
ArrowDataType::Dictionary(
|
||||
Box::new(self.key_type.as_arrow_type()),
|
||||
Box::new(self.value_type.as_arrow_type()),
|
||||
)
|
||||
}
|
||||
|
||||
fn create_mutable_vector(&self, _capacity: usize) -> Box<dyn MutableVector> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn is_timestamp_compatible(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
@@ -273,6 +273,10 @@ fn to_null_value(output_type: &ConcreteDataType) -> ScalarValue {
|
||||
ConcreteDataType::List(_) => {
|
||||
ScalarValue::List(None, Box::new(new_item_field(output_type.as_arrow_type())))
|
||||
}
|
||||
ConcreteDataType::Dictionary(dict) => ScalarValue::Dictionary(
|
||||
Box::new(dict.key_type().as_arrow_type()),
|
||||
Box::new(to_null_value(dict.value_type())),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -513,6 +517,17 @@ impl Ord for ListValue {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(ruihang): Implement this type
|
||||
/// Dictionary value.
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct DictionaryValue {
|
||||
/// Inner values datatypes
|
||||
key_type: ConcreteDataType,
|
||||
value_type: ConcreteDataType,
|
||||
}
|
||||
|
||||
impl Eq for DictionaryValue {}
|
||||
|
||||
impl TryFrom<ScalarValue> for Value {
|
||||
type Error = error::Error;
|
||||
|
||||
|
||||
@@ -95,7 +95,7 @@ fn equal(lhs: &dyn Vector, rhs: &dyn Vector) -> bool {
|
||||
},
|
||||
List(_) => is_vector_eq!(ListVector, lhs, rhs),
|
||||
UInt8(_) | UInt16(_) | UInt32(_) | UInt64(_) | Int8(_) | Int16(_) | Int32(_) | Int64(_)
|
||||
| Float32(_) | Float64(_) => {
|
||||
| Float32(_) | Float64(_) | Dictionary(_) => {
|
||||
with_match_primitive_type_id!(lhs_type.logical_type_id(), |$T| {
|
||||
let lhs = lhs.as_any().downcast_ref::<PrimitiveVector<$T>>().unwrap();
|
||||
let rhs = rhs.as_any().downcast_ref::<PrimitiveVector<$T>>().unwrap();
|
||||
|
||||
@@ -53,4 +53,5 @@ futures = "0.3"
|
||||
meta-srv = { path = "../meta-srv", features = ["mock"] }
|
||||
strfmt = "0.2"
|
||||
tempdir = "0.3"
|
||||
toml = "0.5"
|
||||
tower = "0.4"
|
||||
|
||||
@@ -253,7 +253,7 @@ pub enum Error {
|
||||
source: datanode::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing meta_client_opts section in config"))]
|
||||
#[snafu(display("Missing meta_client_options section in config"))]
|
||||
MissingMetasrvOpts { backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Failed to convert AlterExpr to AlterRequest, source: {}", source))]
|
||||
@@ -348,6 +348,12 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Illegal primary keys definition: {}", msg))]
|
||||
IllegalPrimaryKeysDef { msg: String, backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Unrecognized table option: {}", source))]
|
||||
UnrecognizedTableOption {
|
||||
#[snafu(backtrace)]
|
||||
source: table::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -427,6 +433,7 @@ impl ErrorExt for Error {
|
||||
Error::DeserializePartition { source, .. } | Error::FindTableRoute { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::UnrecognizedTableOption { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -22,14 +22,15 @@ use datanode::instance::sql::table_idents_to_full_name;
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use sql::ast::{ColumnDef, ColumnOption, TableConstraint};
|
||||
use sql::ast::{ColumnDef, ColumnOption, SqlOption, TableConstraint, Value};
|
||||
use sql::statements::column_def_to_schema;
|
||||
use sql::statements::create::{CreateTable, TIME_INDEX};
|
||||
use table::requests::TableOptions;
|
||||
|
||||
use crate::error::{
|
||||
self, BuildCreateExprOnInsertionSnafu, ColumnDataTypeSnafu,
|
||||
ConvertColumnDefaultConstraintSnafu, IllegalPrimaryKeysDefSnafu, InvalidSqlSnafu,
|
||||
ParseSqlSnafu, Result,
|
||||
ParseSqlSnafu, Result, UnrecognizedTableOptionSnafu,
|
||||
};
|
||||
|
||||
pub type CreateExprFactoryRef = Arc<dyn CreateExprFactory + Send + Sync>;
|
||||
@@ -82,6 +83,7 @@ pub(crate) fn create_to_expr(
|
||||
.context(error::ExternalSnafu)?;
|
||||
|
||||
let time_index = find_time_index(&create.constraints)?;
|
||||
let table_options = HashMap::from(&stmt_options_to_table_options(&create.options)?);
|
||||
let expr = CreateTableExpr {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
@@ -91,8 +93,7 @@ pub(crate) fn create_to_expr(
|
||||
time_index,
|
||||
primary_keys: find_primary_keys(&create.columns, &create.constraints)?,
|
||||
create_if_not_exists: create.if_not_exists,
|
||||
// TODO(LFC): Fill in other table options.
|
||||
table_options: HashMap::from([("engine".to_string(), create.engine.clone())]),
|
||||
table_options,
|
||||
table_id: None,
|
||||
region_ids: vec![],
|
||||
};
|
||||
@@ -218,3 +219,46 @@ fn columns_to_expr(
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
// TODO(hl): This function is intentionally duplicated with that one in src/datanode/src/sql/create.rs:261
|
||||
// since we are going to remove the statement parsing stuff from datanode.
|
||||
// Refer: https://github.com/GreptimeTeam/greptimedb/issues/1010
|
||||
fn stmt_options_to_table_options(opts: &[SqlOption]) -> error::Result<TableOptions> {
|
||||
let mut map = HashMap::with_capacity(opts.len());
|
||||
for SqlOption { name, value } in opts {
|
||||
let value_str = match value {
|
||||
Value::SingleQuotedString(s) | Value::DoubleQuotedString(s) => s.clone(),
|
||||
_ => value.to_string(),
|
||||
};
|
||||
map.insert(name.value.clone(), value_str);
|
||||
}
|
||||
let options = TableOptions::try_from(&map).context(UnrecognizedTableOptionSnafu)?;
|
||||
Ok(options)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use session::context::QueryContext;
|
||||
use sql::dialect::GenericDialect;
|
||||
use sql::parser::ParserContext;
|
||||
use sql::statements::statement::Statement;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_create_to_expr() {
|
||||
let sql = "CREATE TABLE monitor (host STRING,ts TIMESTAMP,TIME INDEX (ts),PRIMARY KEY(host)) ENGINE=mito WITH(regions=1, ttl='3days', write_buffer_size='1024KB');";
|
||||
let stmt = ParserContext::create_with_dialect(sql, &GenericDialect {})
|
||||
.unwrap()
|
||||
.pop()
|
||||
.unwrap();
|
||||
|
||||
let Statement::CreateTable(create_table) = stmt else { unreachable!() };
|
||||
let expr = create_to_expr(&create_table, Arc::new(QueryContext::default())).unwrap();
|
||||
assert_eq!("3days", expr.table_options.get("ttl").unwrap());
|
||||
assert_eq!(
|
||||
"1.0MiB",
|
||||
expr.table_options.get("write_buffer_size").unwrap()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_base::Plugins;
|
||||
use meta_client::MetaClientOpts;
|
||||
use meta_client::MetaClientOptions;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servers::http::HttpOptions;
|
||||
use servers::Mode;
|
||||
@@ -35,6 +35,7 @@ use crate::server::Services;
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct FrontendOptions {
|
||||
pub mode: Mode,
|
||||
pub http_options: Option<HttpOptions>,
|
||||
pub grpc_options: Option<GrpcOptions>,
|
||||
pub mysql_options: Option<MysqlOptions>,
|
||||
@@ -43,13 +44,13 @@ pub struct FrontendOptions {
|
||||
pub influxdb_options: Option<InfluxdbOptions>,
|
||||
pub prometheus_options: Option<PrometheusOptions>,
|
||||
pub promql_options: Option<PromqlOptions>,
|
||||
pub mode: Mode,
|
||||
pub meta_client_opts: Option<MetaClientOpts>,
|
||||
pub meta_client_options: Option<MetaClientOptions>,
|
||||
}
|
||||
|
||||
impl Default for FrontendOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
mode: Mode::Standalone,
|
||||
http_options: Some(HttpOptions::default()),
|
||||
grpc_options: Some(GrpcOptions::default()),
|
||||
mysql_options: Some(MysqlOptions::default()),
|
||||
@@ -58,8 +59,7 @@ impl Default for FrontendOptions {
|
||||
influxdb_options: Some(InfluxdbOptions::default()),
|
||||
prometheus_options: Some(PrometheusOptions::default()),
|
||||
promql_options: Some(PromqlOptions::default()),
|
||||
mode: Mode::Standalone,
|
||||
meta_client_opts: None,
|
||||
meta_client_options: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -97,3 +97,15 @@ impl<T: FrontendInstance> Frontend<T> {
|
||||
Services::start(&self.opts, instance, self.plugins.clone()).await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_toml() {
|
||||
let opts = FrontendOptions::default();
|
||||
let toml_string = toml::to_string(&opts).unwrap();
|
||||
let _parsed: FrontendOptions = toml::from_str(&toml_string).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ use datanode::instance::InstanceRef as DnInstanceRef;
|
||||
use datatypes::schema::Schema;
|
||||
use distributed::DistInstance;
|
||||
use meta_client::client::{MetaClient, MetaClientBuilder};
|
||||
use meta_client::MetaClientOpts;
|
||||
use meta_client::MetaClientOptions;
|
||||
use partition::manager::PartitionRuleManager;
|
||||
use partition::route::TableRoutes;
|
||||
use query::parser::PromQuery;
|
||||
@@ -148,7 +148,7 @@ impl Instance {
|
||||
|
||||
async fn create_meta_client(opts: &FrontendOptions) -> Result<Arc<MetaClient>> {
|
||||
let metasrv_addr = &opts
|
||||
.meta_client_opts
|
||||
.meta_client_options
|
||||
.as_ref()
|
||||
.context(MissingMetasrvOptsSnafu)?
|
||||
.metasrv_addrs;
|
||||
@@ -157,7 +157,7 @@ impl Instance {
|
||||
metasrv_addr
|
||||
);
|
||||
|
||||
let meta_config = MetaClientOpts::default();
|
||||
let meta_config = MetaClientOptions::default();
|
||||
let channel_config = ChannelConfig::new()
|
||||
.timeout(Duration::from_millis(meta_config.timeout_millis))
|
||||
.connect_timeout(Duration::from_millis(meta_config.connect_timeout_millis))
|
||||
@@ -396,7 +396,9 @@ impl Instance {
|
||||
| Statement::Insert(_)
|
||||
| Statement::Delete(_)
|
||||
| Statement::Alter(_)
|
||||
| Statement::DropTable(_) => self.sql_handler.do_statement_query(stmt, query_ctx).await,
|
||||
| Statement::DropTable(_)
|
||||
| Statement::Tql(_)
|
||||
| Statement::Copy(_) => self.sql_handler.do_statement_query(stmt, query_ctx).await,
|
||||
Statement::Use(db) => self.handle_use(db, query_ctx),
|
||||
Statement::ShowCreateTable(_) => NotSupportedSnafu {
|
||||
feat: format!("{stmt:?}"),
|
||||
@@ -559,8 +561,8 @@ pub fn check_permission(
|
||||
}
|
||||
|
||||
match stmt {
|
||||
// query and explain will be checked in QueryEngineState
|
||||
Statement::Query(_) | Statement::Explain(_) => {}
|
||||
// query,explain and tql will be checked in QueryEngineState
|
||||
Statement::Query(_) | Statement::Explain(_) | Statement::Tql(_) => {}
|
||||
// database ops won't be checked
|
||||
Statement::CreateDatabase(_) | Statement::ShowDatabases(_) | Statement::Use(_) => {}
|
||||
// show create table and alter are not supported yet
|
||||
@@ -588,6 +590,9 @@ pub fn check_permission(
|
||||
Statement::Delete(delete) => {
|
||||
validate_param(delete.table_name(), query_ctx)?;
|
||||
}
|
||||
Statement::Copy(stmd) => {
|
||||
validate_param(stmd.table_name(), query_ctx)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -53,6 +53,7 @@ use sql::statements::create::Partitions;
|
||||
use sql::statements::sql_value_to_value;
|
||||
use sql::statements::statement::Statement;
|
||||
use table::metadata::{RawTableInfo, RawTableMeta, TableIdent, TableType};
|
||||
use table::requests::TableOptions;
|
||||
use table::table::AlterContext;
|
||||
|
||||
use crate::catalog::FrontendCatalogManager;
|
||||
@@ -62,7 +63,7 @@ use crate::error::{
|
||||
ColumnDataTypeSnafu, DeserializePartitionSnafu, ParseSqlSnafu, PrimaryKeyNotFoundSnafu,
|
||||
RequestDatanodeSnafu, RequestMetaSnafu, Result, SchemaExistsSnafu, SchemaNotFoundSnafu,
|
||||
StartMetaClientSnafu, TableAlreadyExistSnafu, TableNotFoundSnafu, TableSnafu,
|
||||
ToTableInsertRequestSnafu,
|
||||
ToTableInsertRequestSnafu, UnrecognizedTableOptionSnafu,
|
||||
};
|
||||
use crate::expr_factory;
|
||||
use crate::instance::parse_stmt;
|
||||
@@ -185,7 +186,7 @@ impl DistInstance {
|
||||
|
||||
for datanode in table_route.find_leaders() {
|
||||
let client = self.datanode_clients.get_client(&datanode).await;
|
||||
let client = Database::with_client(client);
|
||||
let client = Database::new(&table_name.catalog_name, &table_name.schema_name, client);
|
||||
|
||||
let regions = table_route.find_leader_regions(&datanode);
|
||||
let mut create_expr_for_region = create_table.clone();
|
||||
@@ -605,7 +606,8 @@ fn create_table_info(create_table: &CreateTableExpr) -> Result<RawTableInfo> {
|
||||
next_column_id: column_schemas.len() as u32,
|
||||
region_numbers: vec![],
|
||||
engine_options: HashMap::new(),
|
||||
options: HashMap::new(),
|
||||
options: TableOptions::try_from(&create_table.table_options)
|
||||
.context(UnrecognizedTableOptionSnafu)?,
|
||||
created_on: DateTime::default(),
|
||||
};
|
||||
|
||||
|
||||
@@ -66,6 +66,7 @@ impl Services {
|
||||
|
||||
let grpc_server = GrpcServer::new(
|
||||
ServerGrpcQueryHandlerAdaptor::arc(instance.clone()),
|
||||
user_provider.clone(),
|
||||
grpc_runtime,
|
||||
);
|
||||
|
||||
|
||||
@@ -258,7 +258,7 @@ impl DistTable {
|
||||
);
|
||||
for datanode in leaders {
|
||||
let client = self.datanode_clients.get_client(&datanode).await;
|
||||
let db = Database::with_client(client);
|
||||
let db = Database::new(&expr.catalog_name, &expr.schema_name, client);
|
||||
debug!("Sending {:?} to {:?}", expr, db);
|
||||
let result = db
|
||||
.alter(expr.clone())
|
||||
|
||||
@@ -114,6 +114,7 @@ pub(crate) async fn create_datanode_client(
|
||||
// https://github.com/hyperium/tonic/blob/master/examples/src/mock/mock.rs
|
||||
let datanode_service = GrpcServer::new(
|
||||
ServerGrpcQueryHandlerAdaptor::arc(datanode_instance),
|
||||
None,
|
||||
runtime,
|
||||
)
|
||||
.create_service();
|
||||
|
||||
@@ -26,6 +26,7 @@ use meta_client::rpc::{
|
||||
PutRequest, RangeRequest, TableName,
|
||||
};
|
||||
use table::metadata::{RawTableInfo, RawTableMeta, TableIdent, TableType};
|
||||
use table::requests::TableOptions;
|
||||
use tracing::{event, subscriber, Level};
|
||||
use tracing_subscriber::FmtSubscriber;
|
||||
|
||||
@@ -177,7 +178,7 @@ fn new_table_info() -> RawTableInfo {
|
||||
next_column_id: 0,
|
||||
region_numbers: vec![],
|
||||
engine_options: HashMap::new(),
|
||||
options: HashMap::new(),
|
||||
options: TableOptions::default(),
|
||||
created_on: DateTime::default(),
|
||||
},
|
||||
table_type: TableType::Base,
|
||||
|
||||
@@ -339,6 +339,7 @@ mod tests {
|
||||
use meta_srv::selector::{Namespace, Selector};
|
||||
use meta_srv::Result as MetaResult;
|
||||
use table::metadata::{RawTableInfo, RawTableMeta, TableIdent, TableType};
|
||||
use table::requests::TableOptions;
|
||||
|
||||
use super::*;
|
||||
use crate::mocks;
|
||||
@@ -469,7 +470,7 @@ mod tests {
|
||||
next_column_id: 0,
|
||||
region_numbers: vec![],
|
||||
engine_options: HashMap::new(),
|
||||
options: HashMap::new(),
|
||||
options: TableOptions::default(),
|
||||
created_on: DateTime::default(),
|
||||
},
|
||||
table_type: TableType::Base,
|
||||
|
||||
@@ -22,14 +22,14 @@ pub mod rpc;
|
||||
|
||||
// Options for meta client in datanode instance.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct MetaClientOpts {
|
||||
pub struct MetaClientOptions {
|
||||
pub metasrv_addrs: Vec<String>,
|
||||
pub timeout_millis: u64,
|
||||
pub connect_timeout_millis: u64,
|
||||
pub tcp_nodelay: bool,
|
||||
}
|
||||
|
||||
impl Default for MetaClientOpts {
|
||||
impl Default for MetaClientOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
|
||||
|
||||
@@ -312,6 +312,7 @@ mod tests {
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, RawSchema};
|
||||
use table::metadata::{RawTableMeta, TableIdent, TableType};
|
||||
use table::requests::TableOptions;
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -345,7 +346,7 @@ mod tests {
|
||||
next_column_id: 0,
|
||||
region_numbers: vec![],
|
||||
engine_options: HashMap::new(),
|
||||
options: HashMap::new(),
|
||||
options: TableOptions::default(),
|
||||
created_on: DateTime::default(),
|
||||
},
|
||||
table_type: TableType::Base,
|
||||
|
||||
@@ -35,7 +35,7 @@ use crate::selector::load_based::LoadBasedSelector;
|
||||
use crate::selector::SelectorType;
|
||||
use crate::service::admin;
|
||||
use crate::service::store::etcd::EtcdStore;
|
||||
use crate::service::store::kv::ResetableKvStoreRef;
|
||||
use crate::service::store::kv::ResettableKvStoreRef;
|
||||
use crate::service::store::memory::MemStore;
|
||||
use crate::{error, Result};
|
||||
|
||||
@@ -90,7 +90,7 @@ pub async fn make_meta_srv(opts: MetaSrvOptions) -> Result<MetaSrv> {
|
||||
)
|
||||
};
|
||||
|
||||
let in_memory = Arc::new(MemStore::default()) as ResetableKvStoreRef;
|
||||
let in_memory = Arc::new(MemStore::default()) as ResettableKvStoreRef;
|
||||
|
||||
let meta_peer_client = MetaPeerClientBuilder::default()
|
||||
.election(election.clone())
|
||||
|
||||
@@ -28,13 +28,13 @@ use crate::error::{match_for_io_error, Result};
|
||||
use crate::keys::{StatKey, StatValue, DN_STAT_PREFIX};
|
||||
use crate::metasrv::ElectionRef;
|
||||
use crate::service::store::ext::KvStoreExt;
|
||||
use crate::service::store::kv::ResetableKvStoreRef;
|
||||
use crate::service::store::kv::ResettableKvStoreRef;
|
||||
use crate::{error, util};
|
||||
|
||||
#[derive(Builder, Clone)]
|
||||
pub struct MetaPeerClient {
|
||||
election: Option<ElectionRef>,
|
||||
in_memory: ResetableKvStoreRef,
|
||||
in_memory: ResettableKvStoreRef,
|
||||
#[builder(default = "ChannelManager::default()")]
|
||||
channel_manager: ChannelManager,
|
||||
#[builder(default = "3")]
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::string::FromUtf8Error;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use tonic::codegen::http;
|
||||
use tonic::{Code, Status};
|
||||
@@ -259,6 +261,15 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Distributed lock is not configured"))]
|
||||
LockNotConfig { backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Invalid utf-8 value, source: {:?}", source))]
|
||||
InvalidUtf8Value {
|
||||
source: FromUtf8Error,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing required parameter, param: {:?}", param))]
|
||||
MissingRequiredParameter { param: String },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -303,6 +314,7 @@ impl ErrorExt for Error {
|
||||
| Error::ExceededRetryLimit { .. }
|
||||
| Error::StartGrpc { .. } => StatusCode::Internal,
|
||||
Error::EmptyKey { .. }
|
||||
| Error::MissingRequiredParameter { .. }
|
||||
| Error::EmptyTableName { .. }
|
||||
| Error::InvalidLeaseKey { .. }
|
||||
| Error::InvalidStatKey { .. }
|
||||
@@ -319,6 +331,7 @@ impl ErrorExt for Error {
|
||||
| Error::MoveValue { .. }
|
||||
| Error::InvalidKvsLength { .. }
|
||||
| Error::InvalidTxnResult { .. }
|
||||
| Error::InvalidUtf8Value { .. }
|
||||
| Error::Unexpected { .. } => StatusCode::Unexpected,
|
||||
Error::TableNotFound { .. } => StatusCode::TableNotFound,
|
||||
Error::InvalidCatalogValue { source, .. } => source.status_code(),
|
||||
|
||||
@@ -27,7 +27,7 @@ use crate::handler::HeartbeatHandlerGroup;
|
||||
use crate::lock::DistLockRef;
|
||||
use crate::selector::{Selector, SelectorType};
|
||||
use crate::sequence::SequenceRef;
|
||||
use crate::service::store::kv::{KvStoreRef, ResetableKvStoreRef};
|
||||
use crate::service::store::kv::{KvStoreRef, ResettableKvStoreRef};
|
||||
|
||||
pub const TABLE_ID_SEQ: &str = "table_id";
|
||||
|
||||
@@ -59,7 +59,7 @@ impl Default for MetaSrvOptions {
|
||||
pub struct Context {
|
||||
pub datanode_lease_secs: i64,
|
||||
pub server_addr: String,
|
||||
pub in_memory: ResetableKvStoreRef,
|
||||
pub in_memory: ResettableKvStoreRef,
|
||||
pub kv_store: KvStoreRef,
|
||||
pub election: Option<ElectionRef>,
|
||||
pub skip_all: Arc<AtomicBool>,
|
||||
@@ -93,7 +93,7 @@ pub struct MetaSrv {
|
||||
options: MetaSrvOptions,
|
||||
// It is only valid at the leader node and is used to temporarily
|
||||
// store some data that will not be persisted.
|
||||
in_memory: ResetableKvStoreRef,
|
||||
in_memory: ResettableKvStoreRef,
|
||||
kv_store: KvStoreRef,
|
||||
table_id_sequence: SequenceRef,
|
||||
selector: SelectorRef,
|
||||
@@ -142,7 +142,7 @@ impl MetaSrv {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn in_memory(&self) -> ResetableKvStoreRef {
|
||||
pub fn in_memory(&self) -> ResettableKvStoreRef {
|
||||
self.in_memory.clone()
|
||||
}
|
||||
|
||||
|
||||
@@ -24,14 +24,14 @@ use crate::lock::DistLockRef;
|
||||
use crate::metasrv::{ElectionRef, MetaSrv, MetaSrvOptions, SelectorRef, TABLE_ID_SEQ};
|
||||
use crate::selector::lease_based::LeaseBasedSelector;
|
||||
use crate::sequence::Sequence;
|
||||
use crate::service::store::kv::{KvStoreRef, ResetableKvStoreRef};
|
||||
use crate::service::store::kv::{KvStoreRef, ResettableKvStoreRef};
|
||||
use crate::service::store::memory::MemStore;
|
||||
|
||||
// TODO(fys): try use derive_builder macro
|
||||
pub struct MetaSrvBuilder {
|
||||
options: Option<MetaSrvOptions>,
|
||||
kv_store: Option<KvStoreRef>,
|
||||
in_memory: Option<ResetableKvStoreRef>,
|
||||
in_memory: Option<ResettableKvStoreRef>,
|
||||
selector: Option<SelectorRef>,
|
||||
handler_group: Option<HeartbeatHandlerGroup>,
|
||||
election: Option<ElectionRef>,
|
||||
@@ -63,7 +63,7 @@ impl MetaSrvBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
pub fn in_memory(mut self, in_memory: ResetableKvStoreRef) -> Self {
|
||||
pub fn in_memory(mut self, in_memory: ResettableKvStoreRef) -> Self {
|
||||
self.in_memory = Some(in_memory);
|
||||
self
|
||||
}
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
|
||||
mod health;
|
||||
mod heartbeat;
|
||||
mod leader;
|
||||
mod meta;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::convert::Infallible;
|
||||
@@ -36,6 +38,41 @@ pub fn make_admin_service(meta_srv: MetaSrv) -> Admin {
|
||||
},
|
||||
);
|
||||
|
||||
let router = router.route(
|
||||
"/catalogs",
|
||||
meta::CatalogsHandler {
|
||||
kv_store: meta_srv.kv_store(),
|
||||
},
|
||||
);
|
||||
|
||||
let router = router.route(
|
||||
"/schemas",
|
||||
meta::SchemasHandler {
|
||||
kv_store: meta_srv.kv_store(),
|
||||
},
|
||||
);
|
||||
|
||||
let router = router.route(
|
||||
"/tables",
|
||||
meta::TablesHandler {
|
||||
kv_store: meta_srv.kv_store(),
|
||||
},
|
||||
);
|
||||
|
||||
let router = router.route(
|
||||
"/table",
|
||||
meta::TableHandler {
|
||||
kv_store: meta_srv.kv_store(),
|
||||
},
|
||||
);
|
||||
|
||||
let router = router.route(
|
||||
"/leader",
|
||||
leader::LeaderHandler {
|
||||
election: meta_srv.election(),
|
||||
},
|
||||
);
|
||||
|
||||
let router = Router::nest("/admin", router);
|
||||
|
||||
Admin::new(router)
|
||||
|
||||
@@ -29,14 +29,22 @@ pub struct HeartBeatHandler {
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl HttpHandler for HeartBeatHandler {
|
||||
async fn handle(&self, _: &str, _: &HashMap<String, String>) -> Result<http::Response<String>> {
|
||||
async fn handle(
|
||||
&self,
|
||||
_: &str,
|
||||
params: &HashMap<String, String>,
|
||||
) -> Result<http::Response<String>> {
|
||||
let meta_peer_client = self
|
||||
.meta_peer_client
|
||||
.as_ref()
|
||||
.context(error::NoMetaPeerClientSnafu)?;
|
||||
|
||||
let stat_kvs = meta_peer_client.get_all_dn_stat_kvs().await?;
|
||||
let stat_vals: Vec<StatValue> = stat_kvs.into_values().collect();
|
||||
let mut stat_vals: Vec<StatValue> = stat_kvs.into_values().collect();
|
||||
|
||||
if let Some(addr) = params.get("addr") {
|
||||
stat_vals = filter_by_addr(stat_vals, addr);
|
||||
}
|
||||
let result = StatValues { stat_vals }.try_into()?;
|
||||
|
||||
http::Response::builder()
|
||||
@@ -61,3 +69,70 @@ impl TryFrom<StatValues> for String {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn filter_by_addr(stat_vals: Vec<StatValue>, addr: &str) -> Vec<StatValue> {
|
||||
stat_vals
|
||||
.into_iter()
|
||||
.filter(|stat_val| stat_val.stats.iter().any(|stat| stat.addr == addr))
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::handler::node_stat::Stat;
|
||||
use crate::keys::StatValue;
|
||||
use crate::service::admin::heartbeat::filter_by_addr;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_filter_by_addr() {
|
||||
let stat_value1 = StatValue {
|
||||
stats: vec![
|
||||
Stat {
|
||||
addr: "127.0.0.1:3001".to_string(),
|
||||
timestamp_millis: 1,
|
||||
..Default::default()
|
||||
},
|
||||
Stat {
|
||||
addr: "127.0.0.1:3001".to_string(),
|
||||
timestamp_millis: 2,
|
||||
..Default::default()
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
let stat_value2 = StatValue {
|
||||
stats: vec![
|
||||
Stat {
|
||||
addr: "127.0.0.1:3002".to_string(),
|
||||
timestamp_millis: 3,
|
||||
..Default::default()
|
||||
},
|
||||
Stat {
|
||||
addr: "127.0.0.1:3002".to_string(),
|
||||
timestamp_millis: 4,
|
||||
..Default::default()
|
||||
},
|
||||
Stat {
|
||||
addr: "127.0.0.1:3002".to_string(),
|
||||
timestamp_millis: 5,
|
||||
..Default::default()
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
let mut stat_vals = vec![stat_value1, stat_value2];
|
||||
stat_vals = filter_by_addr(stat_vals, "127.0.0.1:3002");
|
||||
assert_eq!(stat_vals.len(), 1);
|
||||
assert_eq!(stat_vals.get(0).unwrap().stats.len(), 3);
|
||||
assert_eq!(
|
||||
stat_vals
|
||||
.get(0)
|
||||
.unwrap()
|
||||
.stats
|
||||
.get(0)
|
||||
.unwrap()
|
||||
.timestamp_millis,
|
||||
3
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
42
src/meta-srv/src/service/admin/leader.rs
Normal file
42
src/meta-srv/src/service/admin/leader.rs
Normal file
@@ -0,0 +1,42 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
use std::collections::HashMap;
|
||||
|
||||
use snafu::ResultExt;
|
||||
use tonic::codegen::http;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::metasrv::ElectionRef;
|
||||
use crate::service::admin::HttpHandler;
|
||||
|
||||
pub struct LeaderHandler {
|
||||
pub election: Option<ElectionRef>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl HttpHandler for LeaderHandler {
|
||||
async fn handle(&self, _: &str, _: &HashMap<String, String>) -> Result<http::Response<String>> {
|
||||
if let Some(election) = &self.election {
|
||||
let leader_addr = election.leader().await?.0;
|
||||
return http::Response::builder()
|
||||
.status(http::StatusCode::OK)
|
||||
.body(leader_addr)
|
||||
.context(error::InvalidHttpBodySnafu);
|
||||
}
|
||||
http::Response::builder()
|
||||
.status(http::StatusCode::OK)
|
||||
.body("election info is None".to_string())
|
||||
.context(error::InvalidHttpBodySnafu)
|
||||
}
|
||||
}
|
||||
197
src/meta-srv/src/service/admin/meta.rs
Normal file
197
src/meta-srv/src/service/admin/meta.rs
Normal file
@@ -0,0 +1,197 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use api::v1::meta::{RangeRequest, RangeResponse};
|
||||
use catalog::helper::{CATALOG_KEY_PREFIX, SCHEMA_KEY_PREFIX, TABLE_GLOBAL_KEY_PREFIX};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tonic::codegen::http;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::service::admin::HttpHandler;
|
||||
use crate::service::store::ext::KvStoreExt;
|
||||
use crate::service::store::kv::KvStoreRef;
|
||||
use crate::{error, util};
|
||||
|
||||
pub struct CatalogsHandler {
|
||||
pub kv_store: KvStoreRef,
|
||||
}
|
||||
|
||||
pub struct SchemasHandler {
|
||||
pub kv_store: KvStoreRef,
|
||||
}
|
||||
|
||||
pub struct TablesHandler {
|
||||
pub kv_store: KvStoreRef,
|
||||
}
|
||||
|
||||
pub struct TableHandler {
|
||||
pub kv_store: KvStoreRef,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl HttpHandler for CatalogsHandler {
|
||||
async fn handle(&self, _: &str, _: &HashMap<String, String>) -> Result<http::Response<String>> {
|
||||
get_http_response_by_prefix(String::from(CATALOG_KEY_PREFIX), &self.kv_store).await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl HttpHandler for SchemasHandler {
|
||||
async fn handle(
|
||||
&self,
|
||||
_: &str,
|
||||
params: &HashMap<String, String>,
|
||||
) -> Result<http::Response<String>> {
|
||||
let catalog = params
|
||||
.get("catalog_name")
|
||||
.context(error::MissingRequiredParameterSnafu {
|
||||
param: "catalog_name",
|
||||
})?;
|
||||
let prefix = format!("{SCHEMA_KEY_PREFIX}-{catalog}",);
|
||||
get_http_response_by_prefix(prefix, &self.kv_store).await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl HttpHandler for TablesHandler {
|
||||
async fn handle(
|
||||
&self,
|
||||
_: &str,
|
||||
params: &HashMap<String, String>,
|
||||
) -> Result<http::Response<String>> {
|
||||
let catalog = params
|
||||
.get("catalog_name")
|
||||
.context(error::MissingRequiredParameterSnafu {
|
||||
param: "catalog_name",
|
||||
})?;
|
||||
|
||||
let schema = params
|
||||
.get("schema_name")
|
||||
.context(error::MissingRequiredParameterSnafu {
|
||||
param: "schema_name",
|
||||
})?;
|
||||
let prefix = format!("{TABLE_GLOBAL_KEY_PREFIX}-{catalog}-{schema}",);
|
||||
get_http_response_by_prefix(prefix, &self.kv_store).await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl HttpHandler for TableHandler {
|
||||
async fn handle(
|
||||
&self,
|
||||
_: &str,
|
||||
params: &HashMap<String, String>,
|
||||
) -> Result<http::Response<String>> {
|
||||
let table_name = params
|
||||
.get("full_table_name")
|
||||
.map(|full_table_name| full_table_name.replace('.', "-"))
|
||||
.context(error::MissingRequiredParameterSnafu {
|
||||
param: "full_table_name",
|
||||
})?;
|
||||
let table_key = format!("{TABLE_GLOBAL_KEY_PREFIX}-{table_name}");
|
||||
|
||||
let response = self.kv_store.get(table_key.into_bytes()).await?;
|
||||
let mut value: String = "Not found result".to_string();
|
||||
if let Some(key_value) = response {
|
||||
value = String::from_utf8(key_value.value).context(error::InvalidUtf8ValueSnafu)?;
|
||||
}
|
||||
http::Response::builder()
|
||||
.status(http::StatusCode::OK)
|
||||
.body(value)
|
||||
.context(error::InvalidHttpBodySnafu)
|
||||
}
|
||||
}
|
||||
|
||||
/// Get kv_store's key list with http response format by prefix key
|
||||
async fn get_http_response_by_prefix(
|
||||
key_prefix: String,
|
||||
kv_store: &KvStoreRef,
|
||||
) -> Result<http::Response<String>> {
|
||||
let keys = get_keys_by_prefix(key_prefix, kv_store).await?;
|
||||
let body = serde_json::to_string(&keys).context(error::SerializeToJsonSnafu {
|
||||
input: format!("{keys:?}"),
|
||||
})?;
|
||||
|
||||
http::Response::builder()
|
||||
.status(http::StatusCode::OK)
|
||||
.body(body)
|
||||
.context(error::InvalidHttpBodySnafu)
|
||||
}
|
||||
|
||||
/// Get kv_store's key list by prefix key
|
||||
async fn get_keys_by_prefix(key_prefix: String, kv_store: &KvStoreRef) -> Result<Vec<String>> {
|
||||
let key_prefix_u8 = key_prefix.clone().into_bytes();
|
||||
let range_end = util::get_prefix_end_key(&key_prefix_u8);
|
||||
let req = RangeRequest {
|
||||
key: key_prefix_u8,
|
||||
range_end,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let response: RangeResponse = kv_store.range(req).await?;
|
||||
|
||||
let kvs = response.kvs;
|
||||
let mut values = Vec::with_capacity(kvs.len());
|
||||
for kv in kvs {
|
||||
let value = String::from_utf8(kv.key).context(error::InvalidUtf8ValueSnafu)?;
|
||||
let split_list = value.split(&key_prefix).collect::<Vec<&str>>();
|
||||
if let Some(v) = split_list.get(1) {
|
||||
values.push(v.to_string());
|
||||
}
|
||||
}
|
||||
Ok(values)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::PutRequest;
|
||||
|
||||
use crate::service::admin::meta::get_keys_by_prefix;
|
||||
use crate::service::store::kv::KvStoreRef;
|
||||
use crate::service::store::memory::MemStore;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_list_by_prefix() {
|
||||
let in_mem = Arc::new(MemStore::new()) as KvStoreRef;
|
||||
|
||||
in_mem
|
||||
.put(PutRequest {
|
||||
key: "test_key1".as_bytes().to_vec(),
|
||||
value: "test_val1".as_bytes().to_vec(),
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
in_mem
|
||||
.put(PutRequest {
|
||||
key: "test_key2".as_bytes().to_vec(),
|
||||
value: "test_val2".as_bytes().to_vec(),
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let keys = get_keys_by_prefix(String::from("test_key"), &in_mem)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!("1", keys[0]);
|
||||
assert_eq!("2", keys[1]);
|
||||
}
|
||||
}
|
||||
@@ -18,11 +18,11 @@ pub mod kv;
|
||||
pub mod memory;
|
||||
|
||||
use api::v1::meta::{
|
||||
store_server, BatchPutRequest, BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse,
|
||||
DeleteRangeRequest, DeleteRangeResponse, MoveValueRequest, MoveValueResponse, PutRequest,
|
||||
PutResponse, RangeRequest, RangeResponse,
|
||||
store_server, BatchGetRequest, BatchGetResponse, BatchPutRequest, BatchPutResponse,
|
||||
CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest, DeleteRangeResponse,
|
||||
MoveValueRequest, MoveValueResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
|
||||
};
|
||||
use tonic::{Request, Response};
|
||||
use tonic::{Request, Response, Status};
|
||||
|
||||
use crate::metasrv::MetaSrv;
|
||||
use crate::service::GrpcResult;
|
||||
@@ -43,6 +43,14 @@ impl store_server::Store for MetaSrv {
|
||||
Ok(Response::new(res))
|
||||
}
|
||||
|
||||
async fn batch_get(
|
||||
&self,
|
||||
_request: Request<BatchGetRequest>,
|
||||
) -> Result<Response<BatchGetResponse>, Status> {
|
||||
// TODO(fys): please fix this
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn batch_put(&self, req: Request<BatchPutRequest>) -> GrpcResult<BatchPutResponse> {
|
||||
let req = req.into_inner();
|
||||
let res = self.kv_store().batch_put(req).await?;
|
||||
|
||||
@@ -23,7 +23,7 @@ use api::v1::meta::{
|
||||
use crate::error::Result;
|
||||
|
||||
pub type KvStoreRef = Arc<dyn KvStore>;
|
||||
pub type ResetableKvStoreRef = Arc<dyn ResetableKvStore>;
|
||||
pub type ResettableKvStoreRef = Arc<dyn ResettableKvStore>;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait KvStore: Send + Sync {
|
||||
@@ -40,6 +40,6 @@ pub trait KvStore: Send + Sync {
|
||||
async fn move_value(&self, req: MoveValueRequest) -> Result<MoveValueResponse>;
|
||||
}
|
||||
|
||||
pub trait ResetableKvStore: KvStore {
|
||||
pub trait ResettableKvStore: KvStore {
|
||||
fn reset(&self);
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ use api::v1::meta::{
|
||||
use parking_lot::RwLock;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::service::store::kv::{KvStore, ResetableKvStore};
|
||||
use crate::service::store::kv::{KvStore, ResettableKvStore};
|
||||
|
||||
pub struct MemStore {
|
||||
inner: RwLock<BTreeMap<Vec<u8>, Vec<u8>>>,
|
||||
@@ -44,7 +44,7 @@ impl MemStore {
|
||||
}
|
||||
}
|
||||
|
||||
impl ResetableKvStore for MemStore {
|
||||
impl ResettableKvStore for MemStore {
|
||||
fn reset(&self) {
|
||||
self.inner.write().clear();
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ async-trait = "0.1"
|
||||
chrono.workspace = true
|
||||
common-catalog = { path = "../common/catalog" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-procedure = { path = "../common/procedure" }
|
||||
common-query = { path = "../common/query" }
|
||||
common-recordbatch = { path = "../common/recordbatch" }
|
||||
common-telemetry = { path = "../common/telemetry" }
|
||||
|
||||
@@ -12,14 +12,17 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod procedure;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_procedure::{BoxedProcedure, ProcedureManager};
|
||||
use common_telemetry::tracing::log::info;
|
||||
use common_telemetry::{debug, logging};
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use datatypes::schema::Schema;
|
||||
use object_store::ObjectStore;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::storage::{
|
||||
@@ -27,7 +30,7 @@ use store_api::storage::{
|
||||
CreateOptions, EngineContext as StorageEngineContext, OpenOptions, Region,
|
||||
RegionDescriptorBuilder, RegionId, RowKeyDescriptor, RowKeyDescriptorBuilder, StorageEngine,
|
||||
};
|
||||
use table::engine::{EngineContext, TableEngine, TableReference};
|
||||
use table::engine::{EngineContext, TableEngine, TableEngineProcedure, TableReference};
|
||||
use table::error::TableOperationSnafu;
|
||||
use table::metadata::{
|
||||
TableId, TableInfo, TableInfoBuilder, TableMetaBuilder, TableType, TableVersion,
|
||||
@@ -40,6 +43,7 @@ use table::{error as table_error, Result as TableResult, Table};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::config::EngineConfig;
|
||||
use crate::engine::procedure::CreateMitoTable;
|
||||
use crate::error::{
|
||||
self, BuildColumnDescriptorSnafu, BuildColumnFamilyDescriptorSnafu, BuildRegionDescriptorSnafu,
|
||||
BuildRowKeyDescriptorSnafu, InvalidPrimaryKeySnafu, InvalidRawSchemaSnafu,
|
||||
@@ -83,6 +87,14 @@ impl<S: StorageEngine> MitoEngine<S> {
|
||||
inner: Arc::new(MitoEngineInner::new(config, storage_engine, object_store)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Register all procedure loaders to the procedure manager.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics on error.
|
||||
pub fn register_procedure_loaders(&self, procedure_manager: &dyn ProcedureManager) {
|
||||
procedure::register_procedure_loaders(self.inner.clone(), procedure_manager);
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -152,7 +164,22 @@ impl<S: StorageEngine> TableEngine for MitoEngine<S> {
|
||||
}
|
||||
}
|
||||
|
||||
struct MitoEngineInner<S: StorageEngine> {
|
||||
impl<S: StorageEngine> TableEngineProcedure for MitoEngine<S> {
|
||||
fn create_table_procedure(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
request: CreateTableRequest,
|
||||
) -> TableResult<BoxedProcedure> {
|
||||
validate_create_table_request(&request)
|
||||
.map_err(BoxedError::new)
|
||||
.context(table_error::TableOperationSnafu)?;
|
||||
|
||||
let procedure = Box::new(CreateMitoTable::new(request, self.inner.clone()));
|
||||
Ok(procedure)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct MitoEngineInner<S: StorageEngine> {
|
||||
/// All tables opened by the engine. Map key is formatted [TableReference].
|
||||
///
|
||||
/// Writing to `tables` should also hold the `table_mutex`.
|
||||
@@ -167,7 +194,7 @@ struct MitoEngineInner<S: StorageEngine> {
|
||||
fn build_row_key_desc(
|
||||
mut column_id: ColumnId,
|
||||
table_name: &str,
|
||||
table_schema: &SchemaRef,
|
||||
table_schema: &Schema,
|
||||
primary_key_indices: &Vec<usize>,
|
||||
) -> Result<(ColumnId, RowKeyDescriptor)> {
|
||||
let ts_column_schema = table_schema
|
||||
@@ -231,7 +258,7 @@ fn build_row_key_desc(
|
||||
fn build_column_family(
|
||||
mut column_id: ColumnId,
|
||||
table_name: &str,
|
||||
table_schema: &SchemaRef,
|
||||
table_schema: &Schema,
|
||||
primary_key_indices: &[usize],
|
||||
) -> Result<(ColumnId, ColumnFamilyDescriptor)> {
|
||||
let mut builder = ColumnFamilyDescriptorBuilder::default();
|
||||
@@ -366,6 +393,11 @@ impl<S: StorageEngine> MitoEngineInner<S> {
|
||||
})?;
|
||||
let opts = CreateOptions {
|
||||
parent_dir: table_dir.clone(),
|
||||
write_buffer_size: request
|
||||
.table_options
|
||||
.write_buffer_size
|
||||
.map(|size| size.0 as usize),
|
||||
ttl: request.table_options.ttl,
|
||||
};
|
||||
|
||||
let region = self
|
||||
@@ -383,6 +415,7 @@ impl<S: StorageEngine> MitoEngineInner<S> {
|
||||
.engine(MITO_ENGINE)
|
||||
.next_column_id(next_column_id)
|
||||
.primary_key_indices(request.primary_key_indices.clone())
|
||||
.options(request.table_options)
|
||||
.region_numbers(request.region_numbers)
|
||||
.build()
|
||||
.context(error::BuildTableMetaSnafu { table_name })?;
|
||||
@@ -448,13 +481,21 @@ impl<S: StorageEngine> MitoEngineInner<S> {
|
||||
let table_id = request.table_id;
|
||||
let engine_ctx = StorageEngineContext::default();
|
||||
let table_dir = table_dir(catalog_name, schema_name, table_id);
|
||||
let opts = OpenOptions {
|
||||
parent_dir: table_dir.to_string(),
|
||||
};
|
||||
|
||||
let Some((manifest, table_info)) = self
|
||||
.recover_table_manifest_and_info(table_name, &table_dir)
|
||||
.await? else { return Ok(None) };
|
||||
.await.map_err(BoxedError::new)
|
||||
.context(TableOperationSnafu)? else { return Ok(None) };
|
||||
|
||||
let opts = OpenOptions {
|
||||
parent_dir: table_dir.to_string(),
|
||||
write_buffer_size: table_info
|
||||
.meta
|
||||
.options
|
||||
.write_buffer_size
|
||||
.map(|s| s.0 as usize),
|
||||
ttl: table_info.meta.options.ttl,
|
||||
};
|
||||
|
||||
debug!(
|
||||
"Opening table {}, table info recovered: {:?}",
|
||||
@@ -500,16 +541,14 @@ impl<S: StorageEngine> MitoEngineInner<S> {
|
||||
&self,
|
||||
table_name: &str,
|
||||
table_dir: &str,
|
||||
) -> TableResult<Option<(TableManifest, TableInfo)>> {
|
||||
) -> Result<Option<(TableManifest, TableInfo)>> {
|
||||
let manifest = MitoTable::<<S as StorageEngine>::Region>::build_manifest(
|
||||
table_dir,
|
||||
self.object_store.clone(),
|
||||
);
|
||||
let Some(table_info) =
|
||||
MitoTable::<<S as StorageEngine>::Region>::recover_table_info(table_name, &manifest)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(TableOperationSnafu)? else { return Ok(None) };
|
||||
.await? else { return Ok(None) };
|
||||
|
||||
Ok(Some((manifest, table_info)))
|
||||
}
|
||||
@@ -612,7 +651,7 @@ mod tests {
|
||||
use storage::EngineImpl;
|
||||
use store_api::manifest::Manifest;
|
||||
use store_api::storage::ReadContext;
|
||||
use table::requests::{AddColumnRequest, AlterKind, DeleteRequest};
|
||||
use table::requests::{AddColumnRequest, AlterKind, DeleteRequest, TableOptions};
|
||||
use tempdir::TempDir;
|
||||
|
||||
use super::*;
|
||||
@@ -664,7 +703,7 @@ mod tests {
|
||||
schema,
|
||||
create_if_not_exists: true,
|
||||
primary_key_indices: Vec::default(),
|
||||
table_options: HashMap::new(),
|
||||
table_options: TableOptions::default(),
|
||||
region_numbers: vec![0],
|
||||
},
|
||||
)
|
||||
@@ -778,7 +817,7 @@ mod tests {
|
||||
create_if_not_exists: true,
|
||||
// put ts into primary keys
|
||||
primary_key_indices: vec![0, 1],
|
||||
table_options: HashMap::new(),
|
||||
table_options: TableOptions::default(),
|
||||
region_numbers: vec![0],
|
||||
};
|
||||
|
||||
@@ -939,7 +978,7 @@ mod tests {
|
||||
create_if_not_exists: true,
|
||||
desc: None,
|
||||
primary_key_indices: Vec::default(),
|
||||
table_options: HashMap::new(),
|
||||
table_options: TableOptions::default(),
|
||||
region_numbers: vec![0],
|
||||
};
|
||||
|
||||
@@ -956,7 +995,7 @@ mod tests {
|
||||
create_if_not_exists: false,
|
||||
desc: None,
|
||||
primary_key_indices: Vec::default(),
|
||||
table_options: HashMap::new(),
|
||||
table_options: TableOptions::default(),
|
||||
region_numbers: vec![0],
|
||||
};
|
||||
|
||||
@@ -1165,7 +1204,7 @@ mod tests {
|
||||
region_numbers: vec![0],
|
||||
primary_key_indices: vec![0],
|
||||
create_if_not_exists: true,
|
||||
table_options: HashMap::new(),
|
||||
table_options: TableOptions::default(),
|
||||
};
|
||||
table_engine
|
||||
.create_table(&ctx, req)
|
||||
@@ -1248,7 +1287,7 @@ mod tests {
|
||||
create_if_not_exists: true,
|
||||
desc: None,
|
||||
primary_key_indices: Vec::default(),
|
||||
table_options: HashMap::new(),
|
||||
table_options: TableOptions::default(),
|
||||
region_numbers: vec![0],
|
||||
};
|
||||
|
||||
@@ -1281,7 +1320,7 @@ mod tests {
|
||||
create_if_not_exists: false,
|
||||
desc: None,
|
||||
primary_key_indices: Vec::default(),
|
||||
table_options: HashMap::new(),
|
||||
table_options: TableOptions::default(),
|
||||
region_numbers: vec![0],
|
||||
};
|
||||
table_engine.create_table(&ctx, request).await.unwrap();
|
||||
|
||||
96
src/mito/src/engine/procedure.rs
Normal file
96
src/mito/src/engine/procedure.rs
Normal file
@@ -0,0 +1,96 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod create;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_procedure::ProcedureManager;
|
||||
pub(crate) use create::CreateMitoTable;
|
||||
use store_api::storage::StorageEngine;
|
||||
|
||||
use crate::engine::MitoEngineInner;
|
||||
|
||||
/// Register all procedure loaders to the procedure manager.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics on error.
|
||||
pub(crate) fn register_procedure_loaders<S: StorageEngine>(
|
||||
engine_inner: Arc<MitoEngineInner<S>>,
|
||||
procedure_manager: &dyn ProcedureManager,
|
||||
) {
|
||||
// The procedure names are expected to be unique, so we just panic on error.
|
||||
CreateMitoTable::register_loader(engine_inner, procedure_manager);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod procedure_test_util {
|
||||
use async_trait::async_trait;
|
||||
use common_procedure::{
|
||||
BoxedProcedure, Context, ContextProvider, ProcedureId, ProcedureState, Result, Status,
|
||||
};
|
||||
use log_store::NoopLogStore;
|
||||
use storage::compaction::noop::NoopCompactionScheduler;
|
||||
use storage::config::EngineConfig as StorageEngineConfig;
|
||||
use storage::EngineImpl;
|
||||
use tempdir::TempDir;
|
||||
|
||||
use super::*;
|
||||
use crate::engine::{EngineConfig, MitoEngine};
|
||||
use crate::table::test_util;
|
||||
|
||||
struct MockContextProvider {}
|
||||
|
||||
#[async_trait]
|
||||
impl ContextProvider for MockContextProvider {
|
||||
async fn procedure_state(
|
||||
&self,
|
||||
_procedure_id: ProcedureId,
|
||||
) -> Result<Option<ProcedureState>> {
|
||||
Ok(Some(ProcedureState::Done))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TestEnv {
|
||||
pub table_engine: MitoEngine<EngineImpl<NoopLogStore>>,
|
||||
pub dir: TempDir,
|
||||
}
|
||||
|
||||
pub async fn setup_test_engine(path: &str) -> TestEnv {
|
||||
let (dir, object_store) = test_util::new_test_object_store(path).await;
|
||||
let compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
|
||||
let storage_engine = EngineImpl::new(
|
||||
StorageEngineConfig::default(),
|
||||
Arc::new(NoopLogStore::default()),
|
||||
object_store.clone(),
|
||||
compaction_scheduler,
|
||||
);
|
||||
let table_engine = MitoEngine::new(EngineConfig::default(), storage_engine, object_store);
|
||||
|
||||
TestEnv { table_engine, dir }
|
||||
}
|
||||
|
||||
pub async fn execute_procedure_until_done(procedure: &mut BoxedProcedure) {
|
||||
let ctx = Context {
|
||||
procedure_id: ProcedureId::random(),
|
||||
provider: Arc::new(MockContextProvider {}),
|
||||
};
|
||||
|
||||
loop {
|
||||
if let Status::Done = procedure.execute(&ctx).await.unwrap() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
377
src/mito/src/engine/procedure/create.rs
Normal file
377
src/mito/src/engine/procedure/create.rs
Normal file
@@ -0,0 +1,377 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_procedure::error::{FromJsonSnafu, ToJsonSnafu};
|
||||
use common_procedure::{Context, Error, LockKey, Procedure, ProcedureManager, Result, Status};
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, ResultExt};
|
||||
use store_api::storage::{
|
||||
ColumnId, CreateOptions, EngineContext, OpenOptions, RegionDescriptorBuilder, RegionNumber,
|
||||
StorageEngine,
|
||||
};
|
||||
use table::metadata::{TableInfoBuilder, TableMetaBuilder, TableType};
|
||||
use table::requests::CreateTableRequest;
|
||||
|
||||
use crate::engine::{self, MitoEngineInner, TableReference};
|
||||
use crate::error::{
|
||||
BuildRegionDescriptorSnafu, BuildTableInfoSnafu, BuildTableMetaSnafu, InvalidRawSchemaSnafu,
|
||||
TableExistsSnafu,
|
||||
};
|
||||
use crate::table::MitoTable;
|
||||
|
||||
/// Procedure to create a [MitoTable].
|
||||
pub(crate) struct CreateMitoTable<S: StorageEngine> {
|
||||
data: CreateTableData,
|
||||
engine_inner: Arc<MitoEngineInner<S>>,
|
||||
/// Created regions of the table.
|
||||
regions: HashMap<RegionNumber, S::Region>,
|
||||
/// Schema of the table.
|
||||
table_schema: Option<SchemaRef>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<S: StorageEngine> Procedure for CreateMitoTable<S> {
|
||||
fn type_name(&self) -> &str {
|
||||
Self::TYPE_NAME
|
||||
}
|
||||
|
||||
async fn execute(&mut self, _ctx: &Context) -> Result<Status> {
|
||||
match self.data.state {
|
||||
CreateTableState::Prepare => self.on_prepare(),
|
||||
CreateTableState::CreateRegions => self.on_create_regions().await,
|
||||
CreateTableState::WriteTableManifest => self.on_write_table_manifest().await,
|
||||
}
|
||||
}
|
||||
|
||||
fn dump(&self) -> Result<String> {
|
||||
let json = serde_json::to_string(&self.data).context(ToJsonSnafu)?;
|
||||
Ok(json)
|
||||
}
|
||||
|
||||
fn lock_key(&self) -> LockKey {
|
||||
let table_ref = self.data.table_ref();
|
||||
let keys = self
|
||||
.data
|
||||
.request
|
||||
.region_numbers
|
||||
.iter()
|
||||
.map(|number| format!("{table_ref}/region-{number}"));
|
||||
LockKey::new(keys)
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: StorageEngine> CreateMitoTable<S> {
|
||||
const TYPE_NAME: &str = "mito::CreateMitoTable";
|
||||
|
||||
/// Returns a new [CreateMitoTable].
|
||||
pub(crate) fn new(request: CreateTableRequest, engine_inner: Arc<MitoEngineInner<S>>) -> Self {
|
||||
CreateMitoTable {
|
||||
data: CreateTableData {
|
||||
state: CreateTableState::Prepare,
|
||||
request,
|
||||
next_column_id: None,
|
||||
},
|
||||
engine_inner,
|
||||
regions: HashMap::new(),
|
||||
table_schema: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Register the loader of this procedure to the `procedure_manager`.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics on error.
|
||||
pub(crate) fn register_loader(
|
||||
engine_inner: Arc<MitoEngineInner<S>>,
|
||||
procedure_manager: &dyn ProcedureManager,
|
||||
) {
|
||||
procedure_manager
|
||||
.register_loader(
|
||||
Self::TYPE_NAME,
|
||||
Box::new(move |data| {
|
||||
Self::from_json(data, engine_inner.clone()).map(|p| Box::new(p) as _)
|
||||
}),
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Recover the procedure from json.
|
||||
fn from_json(json: &str, engine_inner: Arc<MitoEngineInner<S>>) -> Result<Self> {
|
||||
let data: CreateTableData = serde_json::from_str(json).context(FromJsonSnafu)?;
|
||||
|
||||
Ok(CreateMitoTable {
|
||||
data,
|
||||
engine_inner,
|
||||
regions: HashMap::new(),
|
||||
table_schema: None,
|
||||
})
|
||||
}
|
||||
|
||||
/// Checks whether the table exists.
|
||||
fn on_prepare(&mut self) -> Result<Status> {
|
||||
let table_ref = self.data.table_ref();
|
||||
if self.engine_inner.get_table(&table_ref).is_some() {
|
||||
// If the table already exists.
|
||||
ensure!(
|
||||
self.data.request.create_if_not_exists,
|
||||
TableExistsSnafu {
|
||||
table_name: table_ref.to_string(),
|
||||
}
|
||||
);
|
||||
|
||||
return Ok(Status::Done);
|
||||
}
|
||||
|
||||
self.data.state = CreateTableState::CreateRegions;
|
||||
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
/// Creates regions for the table.
|
||||
async fn on_create_regions(&mut self) -> Result<Status> {
|
||||
let engine_ctx = EngineContext::default();
|
||||
let table_dir = engine::table_dir(
|
||||
&self.data.request.catalog_name,
|
||||
&self.data.request.schema_name,
|
||||
self.data.request.id,
|
||||
);
|
||||
let open_opts = OpenOptions {
|
||||
parent_dir: table_dir.clone(),
|
||||
write_buffer_size: None,
|
||||
ttl: None,
|
||||
};
|
||||
let create_opts = CreateOptions {
|
||||
parent_dir: table_dir,
|
||||
write_buffer_size: None,
|
||||
ttl: None,
|
||||
};
|
||||
|
||||
let table_schema =
|
||||
Schema::try_from(self.data.request.schema.clone()).context(InvalidRawSchemaSnafu)?;
|
||||
let primary_key_indices = &self.data.request.primary_key_indices;
|
||||
let (next_column_id, default_cf) = engine::build_column_family(
|
||||
engine::INIT_COLUMN_ID,
|
||||
&self.data.request.table_name,
|
||||
&table_schema,
|
||||
primary_key_indices,
|
||||
)?;
|
||||
let (next_column_id, row_key) = engine::build_row_key_desc(
|
||||
next_column_id,
|
||||
&self.data.request.table_name,
|
||||
&table_schema,
|
||||
primary_key_indices,
|
||||
)?;
|
||||
self.data.next_column_id = Some(next_column_id);
|
||||
|
||||
// Try to open all regions and collect the regions not exist.
|
||||
for number in &self.data.request.region_numbers {
|
||||
if self.regions.contains_key(number) {
|
||||
// Region is opened.
|
||||
continue;
|
||||
}
|
||||
|
||||
let region_name = engine::region_name(self.data.request.id, *number);
|
||||
if let Some(region) = self
|
||||
.engine_inner
|
||||
.storage_engine
|
||||
.open_region(&engine_ctx, ®ion_name, &open_opts)
|
||||
.await
|
||||
.map_err(Error::from_error_ext)?
|
||||
{
|
||||
// Region already exists.
|
||||
self.regions.insert(*number, region);
|
||||
continue;
|
||||
}
|
||||
|
||||
// We need to create that region.
|
||||
let region_id = engine::region_id(self.data.request.id, *number);
|
||||
let region_desc = RegionDescriptorBuilder::default()
|
||||
.id(region_id)
|
||||
.name(region_name.clone())
|
||||
.row_key(row_key.clone())
|
||||
.default_cf(default_cf.clone())
|
||||
.build()
|
||||
.context(BuildRegionDescriptorSnafu {
|
||||
table_name: &self.data.request.table_name,
|
||||
region_name,
|
||||
})?;
|
||||
|
||||
let region = self
|
||||
.engine_inner
|
||||
.storage_engine
|
||||
.create_region(&engine_ctx, region_desc, &create_opts)
|
||||
.await
|
||||
.map_err(Error::from_error_ext)?;
|
||||
|
||||
self.regions.insert(*number, region);
|
||||
}
|
||||
|
||||
// All regions are created, moves to the next step.
|
||||
self.data.state = CreateTableState::WriteTableManifest;
|
||||
self.table_schema = Some(Arc::new(table_schema));
|
||||
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
/// Writes metadata to the table manifest.
|
||||
async fn on_write_table_manifest(&mut self) -> Result<Status> {
|
||||
let table_dir = engine::table_dir(
|
||||
&self.data.request.catalog_name,
|
||||
&self.data.request.schema_name,
|
||||
self.data.request.id,
|
||||
);
|
||||
// Try to open the table first, as the table manifest might already exist.
|
||||
let table_ref = self.data.table_ref();
|
||||
if let Some((manifest, table_info)) = self
|
||||
.engine_inner
|
||||
.recover_table_manifest_and_info(&self.data.request.table_name, &table_dir)
|
||||
.await?
|
||||
{
|
||||
let table = Arc::new(MitoTable::new(table_info, self.regions.clone(), manifest));
|
||||
|
||||
self.engine_inner
|
||||
.tables
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(table_ref.to_string(), table);
|
||||
return Ok(Status::Done);
|
||||
}
|
||||
|
||||
// We need to persist the table manifest and create the table.
|
||||
let table = self.write_manifest_and_create_table(&table_dir).await?;
|
||||
let table = Arc::new(table);
|
||||
self.engine_inner
|
||||
.tables
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(table_ref.to_string(), table);
|
||||
|
||||
Ok(Status::Done)
|
||||
}
|
||||
|
||||
/// Write metadata to the table manifest and return the created table.
|
||||
async fn write_manifest_and_create_table(
|
||||
&self,
|
||||
table_dir: &str,
|
||||
) -> Result<MitoTable<S::Region>> {
|
||||
// Safety: We are in `WriteTableManifest` state.
|
||||
let next_column_id = self.data.next_column_id.unwrap();
|
||||
let table_schema = self.table_schema.clone().unwrap();
|
||||
|
||||
let table_meta = TableMetaBuilder::default()
|
||||
.schema(table_schema)
|
||||
.engine(engine::MITO_ENGINE)
|
||||
.next_column_id(next_column_id)
|
||||
.primary_key_indices(self.data.request.primary_key_indices.clone())
|
||||
.region_numbers(self.data.request.region_numbers.clone())
|
||||
.build()
|
||||
.context(BuildTableMetaSnafu {
|
||||
table_name: &self.data.request.table_name,
|
||||
})?;
|
||||
|
||||
let table_info = TableInfoBuilder::new(self.data.request.table_name.clone(), table_meta)
|
||||
.ident(self.data.request.id)
|
||||
.table_version(engine::INIT_TABLE_VERSION)
|
||||
.table_type(TableType::Base)
|
||||
.catalog_name(&self.data.request.catalog_name)
|
||||
.schema_name(&self.data.request.schema_name)
|
||||
.desc(self.data.request.desc.clone())
|
||||
.build()
|
||||
.context(BuildTableInfoSnafu {
|
||||
table_name: &self.data.request.table_name,
|
||||
})?;
|
||||
|
||||
let table = MitoTable::create(
|
||||
&self.data.request.table_name,
|
||||
table_dir,
|
||||
table_info,
|
||||
self.regions.clone(),
|
||||
self.engine_inner.object_store.clone(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(table)
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents each step while creating table in the mito engine.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
enum CreateTableState {
|
||||
/// Prepare to create region.
|
||||
Prepare,
|
||||
/// Create regions.
|
||||
CreateRegions,
|
||||
/// Write metadata to table manifest.
|
||||
WriteTableManifest,
|
||||
}
|
||||
|
||||
/// Serializable data of [CreateMitoTable].
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct CreateTableData {
|
||||
state: CreateTableState,
|
||||
request: CreateTableRequest,
|
||||
/// Next id for column.
|
||||
///
|
||||
/// Available in [CreateTableState::WriteTableManifest] state.
|
||||
next_column_id: Option<ColumnId>,
|
||||
}
|
||||
|
||||
impl CreateTableData {
|
||||
fn table_ref(&self) -> TableReference {
|
||||
TableReference {
|
||||
catalog: &self.request.catalog_name,
|
||||
schema: &self.request.schema_name,
|
||||
table: &self.request.table_name,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use table::engine::{EngineContext, TableEngine, TableEngineProcedure};
|
||||
|
||||
use super::*;
|
||||
use crate::engine::procedure::procedure_test_util::{self, TestEnv};
|
||||
use crate::table::test_util;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_table_procedure() {
|
||||
let TestEnv {
|
||||
table_engine,
|
||||
dir: _dir,
|
||||
} = procedure_test_util::setup_test_engine("create_procedure").await;
|
||||
let schema = Arc::new(test_util::schema_for_test());
|
||||
let request = test_util::new_create_request(schema);
|
||||
|
||||
let mut procedure = table_engine
|
||||
.create_table_procedure(&EngineContext::default(), request.clone())
|
||||
.unwrap();
|
||||
procedure_test_util::execute_procedure_until_done(&mut procedure).await;
|
||||
|
||||
let table_ref = TableReference {
|
||||
catalog: &request.catalog_name,
|
||||
schema: &request.schema_name,
|
||||
table: &request.table_name,
|
||||
};
|
||||
assert!(table_engine
|
||||
.get_table(&EngineContext::default(), &table_ref)
|
||||
.unwrap()
|
||||
.is_some());
|
||||
}
|
||||
}
|
||||
@@ -230,6 +230,12 @@ impl ErrorExt for Error {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Error> for common_procedure::Error {
|
||||
fn from(e: Error) -> common_procedure::Error {
|
||||
common_procedure::Error::from_error_ext(e)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_error::ext::BoxedError;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user