mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-04 04:12:55 +00:00
Compare commits
38 Commits
v0.3.0-nig
...
v0.3.0-nig
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c3eeda7d84 | ||
|
|
82f2b34f4d | ||
|
|
8764ce7845 | ||
|
|
d76ddc575f | ||
|
|
68dfea0cfd | ||
|
|
57c02af55b | ||
|
|
e8c2222a76 | ||
|
|
eb95a9e78b | ||
|
|
4920836021 | ||
|
|
715e1a321f | ||
|
|
a6ec79ee30 | ||
|
|
e70d49b9cf | ||
|
|
ca75a7b744 | ||
|
|
3330957896 | ||
|
|
fb1ac0cb9c | ||
|
|
856ab5bea7 | ||
|
|
122bd5f0ab | ||
|
|
2fd1075c4f | ||
|
|
027707d969 | ||
|
|
8d54d40b21 | ||
|
|
497b1f9dc9 | ||
|
|
4ae0b5e185 | ||
|
|
cfcfc72681 | ||
|
|
66903d42e1 | ||
|
|
4fc173acf0 | ||
|
|
f9a4326461 | ||
|
|
4151d7a8ea | ||
|
|
a4e106380b | ||
|
|
7a310cb056 | ||
|
|
8fef32f8ef | ||
|
|
8c85fdec29 | ||
|
|
84f6b46437 | ||
|
|
44aef6fcbd | ||
|
|
7a9dd5f0c8 | ||
|
|
486bb2ee8e | ||
|
|
020c55e260 | ||
|
|
ee3e1dbdaa | ||
|
|
aa0c5b888c |
14
.github/workflows/release.yml
vendored
14
.github/workflows/release.yml
vendored
@@ -84,13 +84,14 @@ jobs:
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
targets: ${{ matrix.arch }}
|
||||
|
||||
- name: Install latest nextest release
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Output package versions
|
||||
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
|
||||
|
||||
# - name: Run tests
|
||||
# if: env.DISABLE_RUN_TESTS == 'false'
|
||||
# run: make unit-test integration-test sqlness-test
|
||||
- name: Run tests
|
||||
if: env.DISABLE_RUN_TESTS == 'false'
|
||||
run: make test sqlness-test
|
||||
|
||||
- name: Run cargo build
|
||||
if: contains(matrix.arch, 'darwin') || contains(matrix.opts, 'pyo3_backend') == false
|
||||
@@ -200,13 +201,14 @@ jobs:
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
targets: ${{ matrix.arch }}
|
||||
|
||||
- name: Install latest nextest release
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Output package versions
|
||||
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
|
||||
|
||||
- name: Run tests
|
||||
if: env.DISABLE_RUN_TESTS == 'false'
|
||||
run: make unit-test integration-test sqlness-test
|
||||
run: make test sqlness-test
|
||||
|
||||
- name: Run cargo build
|
||||
if: contains(matrix.arch, 'darwin') || contains(matrix.opts, 'pyo3_backend') == false
|
||||
|
||||
138
Cargo.lock
generated
138
Cargo.lock
generated
@@ -53,6 +53,15 @@ dependencies = [
|
||||
"version_check",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "aho-corasick"
|
||||
version = "0.7.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "aho-corasick"
|
||||
version = "1.0.1"
|
||||
@@ -1518,6 +1527,7 @@ dependencies = [
|
||||
"common-recordbatch",
|
||||
"common-telemetry",
|
||||
"common-test-util",
|
||||
"config",
|
||||
"datanode",
|
||||
"either",
|
||||
"frontend",
|
||||
@@ -1534,6 +1544,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu",
|
||||
"substrait 0.2.0",
|
||||
"temp-env",
|
||||
"tikv-jemalloc-ctl",
|
||||
"tikv-jemallocator",
|
||||
"tokio",
|
||||
@@ -1621,6 +1632,7 @@ dependencies = [
|
||||
"derive_builder 0.12.0",
|
||||
"futures",
|
||||
"object-store",
|
||||
"paste",
|
||||
"regex",
|
||||
"snafu",
|
||||
"tokio",
|
||||
@@ -1664,7 +1676,9 @@ name = "common-function-macro"
|
||||
version = "0.2.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"backtrace",
|
||||
"common-query",
|
||||
"common-telemetry",
|
||||
"datatypes",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -1680,11 +1694,14 @@ dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
"async-trait",
|
||||
"backtrace",
|
||||
"common-base",
|
||||
"common-error",
|
||||
"common-function-macro",
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
"common-runtime",
|
||||
"common-telemetry",
|
||||
"criterion 0.4.0",
|
||||
"dashmap",
|
||||
"datafusion",
|
||||
@@ -1730,6 +1747,16 @@ dependencies = [
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "common-meta"
|
||||
version = "0.2.0"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"snafu",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure"
|
||||
version = "0.2.0"
|
||||
@@ -1819,11 +1846,13 @@ dependencies = [
|
||||
"console-subscriber",
|
||||
"metrics",
|
||||
"metrics-exporter-prometheus",
|
||||
"metrics-util",
|
||||
"once_cell",
|
||||
"opentelemetry",
|
||||
"opentelemetry-jaeger",
|
||||
"parking_lot",
|
||||
"serde",
|
||||
"tokio",
|
||||
"tracing",
|
||||
"tracing-appender",
|
||||
"tracing-bunyan-formatter",
|
||||
@@ -1861,6 +1890,25 @@ dependencies = [
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "config"
|
||||
version = "0.13.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d379af7f68bfc21714c6c7dea883544201741d2ce8274bb12fa54f89507f52a7"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"json5",
|
||||
"lazy_static",
|
||||
"nom",
|
||||
"pathdiff",
|
||||
"ron",
|
||||
"rust-ini",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"toml",
|
||||
"yaml-rust",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "console"
|
||||
version = "0.15.5"
|
||||
@@ -2462,6 +2510,7 @@ dependencies = [
|
||||
"common-function",
|
||||
"common-grpc",
|
||||
"common-grpc-expr",
|
||||
"common-meta",
|
||||
"common-procedure",
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
@@ -2478,6 +2527,7 @@ dependencies = [
|
||||
"futures-util",
|
||||
"humantime-serde",
|
||||
"hyper",
|
||||
"key-lock",
|
||||
"log",
|
||||
"log-store",
|
||||
"meta-client",
|
||||
@@ -3041,8 +3091,6 @@ dependencies = [
|
||||
"prost",
|
||||
"query",
|
||||
"regex",
|
||||
"rstest",
|
||||
"rstest_reuse",
|
||||
"script",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -3822,7 +3870,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=e8abf8241c908448dce595399e89c89a40d048bd#e8abf8241c908448dce595399e89c89a40d048bd"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=6bfb02057c40da0e397c0cb4f6b87bd769669d50#6bfb02057c40da0e397c0cb4f6b87bd769669d50"
|
||||
dependencies = [
|
||||
"prost",
|
||||
"tonic 0.9.2",
|
||||
@@ -4269,6 +4317,17 @@ dependencies = [
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "json5"
|
||||
version = "0.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "96b0db21af676c1ce64250b5f40f3ce2cf27e4e47cb91ed91eb6fe9350b430c1"
|
||||
dependencies = [
|
||||
"pest",
|
||||
"pest_derive",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jsonwebtoken"
|
||||
version = "8.3.0"
|
||||
@@ -4474,6 +4533,12 @@ dependencies = [
|
||||
"cc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "linked-hash-map"
|
||||
version = "0.5.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f"
|
||||
|
||||
[[package]]
|
||||
name = "linux-raw-sys"
|
||||
version = "0.3.4"
|
||||
@@ -4791,6 +4856,7 @@ dependencies = [
|
||||
"common-catalog",
|
||||
"common-error",
|
||||
"common-grpc",
|
||||
"common-meta",
|
||||
"common-procedure",
|
||||
"common-runtime",
|
||||
"common-telemetry",
|
||||
@@ -4803,6 +4869,7 @@ dependencies = [
|
||||
"http-body",
|
||||
"lazy_static",
|
||||
"metrics",
|
||||
"once_cell",
|
||||
"parking_lot",
|
||||
"prost",
|
||||
"rand",
|
||||
@@ -4852,8 +4919,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "metrics-exporter-prometheus"
|
||||
version = "0.11.1"
|
||||
source = "git+https://github.com/GreptimeTeam/metrics.git?rev=174de287e9f7f9f57c0272be56c95df156489476#174de287e9f7f9f57c0272be56c95df156489476"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8603921e1f54ef386189335f288441af761e0fc61bcb552168d9cedfe63ebc70"
|
||||
dependencies = [
|
||||
"indexmap",
|
||||
"metrics",
|
||||
@@ -4881,14 +4949,18 @@ version = "0.14.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f7d24dc2dbae22bff6f1f9326ffce828c9f07ef9cc1e8002e5279f845432a30a"
|
||||
dependencies = [
|
||||
"aho-corasick 0.7.20",
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-utils",
|
||||
"hashbrown 0.12.3",
|
||||
"indexmap",
|
||||
"metrics",
|
||||
"num_cpus",
|
||||
"ordered-float 2.10.0",
|
||||
"parking_lot",
|
||||
"portable-atomic",
|
||||
"quanta",
|
||||
"radix_trie",
|
||||
"sketches-ddsketch",
|
||||
]
|
||||
|
||||
@@ -4945,6 +5017,7 @@ dependencies = [
|
||||
"async-trait",
|
||||
"chrono",
|
||||
"common-catalog",
|
||||
"common-datasource",
|
||||
"common-error",
|
||||
"common-procedure",
|
||||
"common-procedure-test",
|
||||
@@ -5730,6 +5803,12 @@ version = "1.0.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79"
|
||||
|
||||
[[package]]
|
||||
name = "pathdiff"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd"
|
||||
|
||||
[[package]]
|
||||
name = "peeking_take_while"
|
||||
version = "0.1.2"
|
||||
@@ -6687,7 +6766,7 @@ version = "1.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"aho-corasick 1.0.1",
|
||||
"memchr",
|
||||
"regex-syntax 0.7.1",
|
||||
]
|
||||
@@ -8323,6 +8402,7 @@ dependencies = [
|
||||
"futures-util",
|
||||
"lazy_static",
|
||||
"log-store",
|
||||
"metrics",
|
||||
"object-store",
|
||||
"parquet",
|
||||
"paste",
|
||||
@@ -8639,6 +8719,15 @@ version = "0.12.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ae9980cab1db3fceee2f6c6f643d5d8de2997c58ee8d25fb0cc8a9e9e7348e5"
|
||||
|
||||
[[package]]
|
||||
name = "temp-env"
|
||||
version = "0.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9547444bfe52cbd79515c6c8087d8ae6ca8d64d2d31a27746320f5cb81d1a15c"
|
||||
dependencies = [
|
||||
"parking_lot",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tempfile"
|
||||
version = "3.5.0"
|
||||
@@ -8690,31 +8779,49 @@ dependencies = [
|
||||
"axum-test-helper",
|
||||
"catalog",
|
||||
"client",
|
||||
"common-base",
|
||||
"common-catalog",
|
||||
"common-error",
|
||||
"common-grpc",
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
"common-runtime",
|
||||
"common-telemetry",
|
||||
"common-test-util",
|
||||
"datafusion",
|
||||
"datafusion-expr",
|
||||
"datanode",
|
||||
"datatypes",
|
||||
"dotenv",
|
||||
"frontend",
|
||||
"futures",
|
||||
"itertools",
|
||||
"meta-client",
|
||||
"meta-srv",
|
||||
"mito",
|
||||
"object-store",
|
||||
"once_cell",
|
||||
"partition",
|
||||
"paste",
|
||||
"prost",
|
||||
"query",
|
||||
"rand",
|
||||
"rstest",
|
||||
"rstest_reuse",
|
||||
"script",
|
||||
"secrecy",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"servers",
|
||||
"session",
|
||||
"snafu",
|
||||
"sql",
|
||||
"store-api",
|
||||
"table",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"tonic 0.9.2",
|
||||
"tower",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
@@ -8926,9 +9033,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.27.0"
|
||||
version = "1.28.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d0de47a4eecbe11f498978a9b29d792f0d2692d1dd003650c24c76510e3bc001"
|
||||
checksum = "c3c786bf8134e5a3a166db9b29ab8f48134739014a3eca7bc6bfa95d673b136f"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"bytes",
|
||||
@@ -8941,7 +9048,7 @@ dependencies = [
|
||||
"socket2 0.4.9",
|
||||
"tokio-macros",
|
||||
"tracing",
|
||||
"windows-sys 0.45.0",
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -8956,9 +9063,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tokio-macros"
|
||||
version = "2.0.0"
|
||||
version = "2.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "61a573bdc87985e9d6ddeed1b3d864e8a302c847e40d647746df2f1de209d1ce"
|
||||
checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -10261,6 +10368,15 @@ dependencies = [
|
||||
"lzma-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "yaml-rust"
|
||||
version = "0.4.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85"
|
||||
dependencies = [
|
||||
"linked-hash-map",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zeroize"
|
||||
version = "1.6.0"
|
||||
|
||||
@@ -14,6 +14,7 @@ members = [
|
||||
"src/common/grpc",
|
||||
"src/common/grpc-expr",
|
||||
"src/common/mem-prof",
|
||||
"src/common/meta",
|
||||
"src/common/procedure",
|
||||
"src/common/procedure-test",
|
||||
"src/common/query",
|
||||
@@ -79,7 +80,7 @@ serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
sqlparser = "0.33"
|
||||
tempfile = "3"
|
||||
tokio = { version = "1.24.2", features = ["full"] }
|
||||
tokio = { version = "1.28", features = ["full"] }
|
||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
tonic = { version = "0.9", features = ["tls"] }
|
||||
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
||||
|
||||
11
Makefile
11
Makefile
@@ -33,13 +33,12 @@ docker-image: ## Build docker image.
|
||||
|
||||
##@ Test
|
||||
|
||||
.PHONY: unit-test
|
||||
unit-test: ## Run unit test.
|
||||
cargo test --workspace
|
||||
test: nextest ## Run unit and integration tests.
|
||||
cargo nextest run
|
||||
|
||||
.PHONY: integration-test
|
||||
integration-test: ## Run integation test.
|
||||
cargo test integration
|
||||
.PHONY: nextest ## Install nextest tools.
|
||||
nextest:
|
||||
cargo --list | grep nextest || cargo install cargo-nextest --locked
|
||||
|
||||
.PHONY: sqlness-test
|
||||
sqlness-test: ## Run sqlness test.
|
||||
|
||||
58
README.md
58
README.md
@@ -100,64 +100,22 @@ Or if you built from docker:
|
||||
docker run -p 4002:4002 -v "$(pwd):/tmp/greptimedb" greptime/greptimedb standalone start
|
||||
```
|
||||
|
||||
For more startup options, greptimedb's **distributed mode** and information
|
||||
about Kubernetes deployment, check our [docs](https://docs.greptime.com/).
|
||||
Please see [the online document site](https://docs.greptime.com/getting-started/overview#install-greptimedb) for more installation options and [operations info](https://docs.greptime.com/user-guide/operations/overview).
|
||||
|
||||
### Connect
|
||||
### Get started
|
||||
|
||||
1. Connect to GreptimeDB via standard [MySQL
|
||||
client](https://dev.mysql.com/downloads/mysql/):
|
||||
Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview#connect) on our [official document site](https://docs.greptime.com/).
|
||||
|
||||
```
|
||||
# The standalone instance listen on port 4002 by default.
|
||||
mysql -h 127.0.0.1 -P 4002
|
||||
```
|
||||
|
||||
2. Create table:
|
||||
|
||||
```SQL
|
||||
CREATE TABLE monitor (
|
||||
host STRING,
|
||||
ts TIMESTAMP,
|
||||
cpu DOUBLE DEFAULT 0,
|
||||
memory DOUBLE,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(host)) ENGINE=mito WITH(regions=1);
|
||||
```
|
||||
|
||||
3. Insert some data:
|
||||
|
||||
```SQL
|
||||
INSERT INTO monitor(host, cpu, memory, ts) VALUES ('host1', 66.6, 1024, 1660897955000);
|
||||
INSERT INTO monitor(host, cpu, memory, ts) VALUES ('host2', 77.7, 2048, 1660897956000);
|
||||
INSERT INTO monitor(host, cpu, memory, ts) VALUES ('host3', 88.8, 4096, 1660897957000);
|
||||
```
|
||||
|
||||
4. Query the data:
|
||||
|
||||
```SQL
|
||||
SELECT * FROM monitor;
|
||||
```
|
||||
|
||||
```TEXT
|
||||
+-------+--------------------------+------+--------+
|
||||
| host | ts | cpu | memory |
|
||||
+-------+--------------------------+------+--------+
|
||||
| host1 | 2022-08-19 16:32:35+0800 | 66.6 | 1024 |
|
||||
| host2 | 2022-08-19 16:32:36+0800 | 77.7 | 2048 |
|
||||
| host3 | 2022-08-19 16:32:37+0800 | 88.8 | 4096 |
|
||||
+-------+--------------------------+------+--------+
|
||||
3 rows in set (0.03 sec)
|
||||
```
|
||||
|
||||
You can always cleanup test database by removing `/tmp/greptimedb`.
|
||||
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients).
|
||||
|
||||
## Resources
|
||||
|
||||
### Installation
|
||||
|
||||
- [Pre-built Binaries](https://github.com/GreptimeTeam/greptimedb/releases):
|
||||
For Linux and macOS, you can easily download pre-built binaries that are ready to use. In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version. We recommend using virtualenv for the installation process to manage multiple Python versions.
|
||||
- [Pre-built Binaries](https://greptime.com/download):
|
||||
For Linux and macOS, you can easily download pre-built binaries including official releases and nightly builds that are ready to use.
|
||||
In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version.
|
||||
We recommend using virtualenv for the installation process to manage multiple Python versions.
|
||||
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
|
||||
Docker images, this is the easiest way to try GreptimeDB. By default it runs CPython script with `pyo3_backend` enabled.
|
||||
- [`gtctl`](https://github.com/GreptimeTeam/gtctl): the command-line tool for
|
||||
|
||||
@@ -52,6 +52,15 @@ gc_duration = '30s'
|
||||
# Whether to try creating a manifest checkpoint on region opening
|
||||
checkpoint_on_startup = false
|
||||
|
||||
# Storage flush options
|
||||
[storage.flush]
|
||||
# Max inflight flush tasks.
|
||||
max_flush_tasks = 8
|
||||
# Default write buffer size for a region.
|
||||
region_write_buffer_size = "32MB"
|
||||
# Interval to auto flush a region if it has not flushed yet.
|
||||
auto_flush_interval = "1h"
|
||||
|
||||
# Procedure storage options, see `standalone.example.toml`.
|
||||
[procedure]
|
||||
max_retry_times = 3
|
||||
|
||||
@@ -117,6 +117,15 @@ gc_duration = '30s'
|
||||
# Whether to try creating a manifest checkpoint on region opening
|
||||
checkpoint_on_startup = false
|
||||
|
||||
# Storage flush options
|
||||
[storage.flush]
|
||||
# Max inflight flush tasks.
|
||||
max_flush_tasks = 8
|
||||
# Default write buffer size for a region.
|
||||
region_write_buffer_size = "32MB"
|
||||
# Interval to auto flush a region if it has not flushed yet.
|
||||
auto_flush_interval = "1h"
|
||||
|
||||
# Procedure storage options.
|
||||
[procedure]
|
||||
# Procedure max retry time.
|
||||
|
||||
137
docs/rfcs/2023-05-09-distributed-planner.md
Normal file
137
docs/rfcs/2023-05-09-distributed-planner.md
Normal file
@@ -0,0 +1,137 @@
|
||||
---
|
||||
Feature Name: distributed-planner
|
||||
Tracking Issue: TBD
|
||||
Date: 2023-05-09
|
||||
Author: "Ruihang Xia <waynestxia@gmail.com>"
|
||||
---
|
||||
|
||||
Distributed Planner
|
||||
-------------------
|
||||
# Summary
|
||||
Enhance the logical planner with aware of distributed, multi-region table topology. To achieve "push computation down" execution rather than the current "pull data up" manner.
|
||||
|
||||
# Motivation
|
||||
Query distributively can leverage the advantage of GreptimeDB's architecture to process large dataset that exceeds the capacity of a single node, or accelerate the query execution by executing it in parallel. This task includes two sub-tasks
|
||||
- Be able to transform the plan that can push as much as possible computation down to data source.
|
||||
- Be able to handle pipeline breaker (like `Join` or `Sort`) on multiple computation nodes.
|
||||
This is a relatively complex topic. To keep this RFC concentrated I'll focus on the first one.
|
||||
|
||||
# Details
|
||||
## Background: Partition and Region
|
||||
GreptimeDB supports table partitioning, where the partition rule is set during table creation. Each partition can be further divided into one or more physical storage units known as "regions". Both partitions and regions are divided based on rows:
|
||||
``` text
|
||||
┌────────────────────────────────────┐
|
||||
│ │
|
||||
│ Table │
|
||||
│ │
|
||||
└─────┬────────────┬────────────┬────┘
|
||||
│ │ │
|
||||
│ │ │
|
||||
┌─────▼────┐ ┌─────▼────┐ ┌─────▼────┐
|
||||
│ Region 1 │ │ Region 2 │ │ Region 3 │
|
||||
└──────────┘ └──────────┘ └──────────┘
|
||||
Row 1~10 Row 11~20 Row 21~30
|
||||
```
|
||||
General speaking, region is the minimum element of data distribution, and we can also use it as the unit to distribute computation. This can greatly simplify the routing logic of this distributed planner, by always schedule the computation to the node that currently opening the corresponding region. And is also easy to scale more node for computing since GreptimeDB's data is persisted on shared storage backend like S3. But this is a bit beyond the scope of this specific topic.
|
||||
## Background: Commutativity
|
||||
Commutativity is an attribute that describes whether two operation can exchange their apply order: $P1(P2(R)) \Leftrightarrow P2(P1(R))$. If the equation keeps, we can transform one expression into another form without changing its result. This is useful on rewriting SQL expression, and is the theoretical basis of this RFC.
|
||||
|
||||
Take this SQL as an example
|
||||
|
||||
``` sql
|
||||
SELECT a FROM t WHERE a > 10;
|
||||
```
|
||||
|
||||
As we know projection and filter are commutative (todo: latex), it can be translated to the following two identical plan trees:
|
||||
|
||||
```text
|
||||
┌─────────────┐ ┌─────────────┐
|
||||
│Projection(a)│ │Filter(a>10) │
|
||||
└──────▲──────┘ └──────▲──────┘
|
||||
│ │
|
||||
┌──────┴──────┐ ┌──────┴──────┐
|
||||
│Filter(a>10) │ │Projection(a)│
|
||||
└──────▲──────┘ └──────▲──────┘
|
||||
│ │
|
||||
┌──────┴──────┐ ┌──────┴──────┐
|
||||
│ TableScan │ │ TableScan │
|
||||
└─────────────┘ └─────────────┘
|
||||
```
|
||||
|
||||
## Merge Operation
|
||||
|
||||
This RFC proposes to add a new expression node `MergeScan` to merge result from several regions in the frontend. It wrap the abstraction of remote data and execution, and expose a `TableScan` interface to upper level.
|
||||
|
||||
``` text
|
||||
▲
|
||||
│
|
||||
┌───────┼───────┐
|
||||
│ │ │
|
||||
│ ┌──┴──┐ │
|
||||
│ └──▲──┘ │
|
||||
│ │ │
|
||||
│ ┌──┴──┐ │
|
||||
│ └──▲──┘ │ ┌─────────────────────────────┐
|
||||
│ │ │ │ │
|
||||
│ ┌────┴────┐ │ │ ┌──────────┐ ┌───┐ ┌───┐ │
|
||||
│ │MergeScan◄──┼────┤ │ Region 1 │ │ │ .. │ │ │
|
||||
│ └─────────┘ │ │ └──────────┘ └───┘ └───┘ │
|
||||
│ │ │ │
|
||||
└─Frontend──────┘ └─Remote-Sources──────────────┘
|
||||
```
|
||||
This merge operation simply chains all the the underlying remote data sources and return `RecordBatch`, just like a coalesce op. And each remote sources is a gRPC query to datanode via the substrait logical plan interface. The plan is transformed and divided from the original query that comes to frontend.
|
||||
|
||||
## Commutativity of MergeScan
|
||||
|
||||
Obviously, The position of `MergeScan` is the key of the distributed plan. The more closer to the underlying `TableScan`, the less computation is taken by datanodes. Thus the goal is to pull the `MergeScan` up as more as possible. The word "pull up" means exchange `MergeScan` with its parent node in the plan tree, which means we should check the commutativity between the existing expression nodes and the `MergeScan`. Here I classify all the possibility into five categories:
|
||||
|
||||
- Commutative: $P1(P2(R)) \Leftrightarrow P2(P1(R))$
|
||||
- filter
|
||||
- projection
|
||||
- operations that match the partition key
|
||||
- Partial Commutative: $P1(P2(R)) \Leftrightarrow P1(P2(P1(R)))$
|
||||
- $min(R) \rightarrow min(MERGE(min(R)))$
|
||||
- $max(R) \rightarrow max(MERGE(max(R)))$
|
||||
- Conditional Commutative: $P1(P2(R)) \Leftrightarrow P3(P2(P1(R)))$
|
||||
- $count(R) \rightarrow sum(count(R))$
|
||||
- Transformed Commutative: $P1(P2(R)) \Leftrightarrow P1(P3(R)) \Leftrightarrow P3(P1(R))$
|
||||
- $avg(R) \rightarrow sum(R)/count(R)$
|
||||
- Non-commutative
|
||||
- sort
|
||||
- join
|
||||
- percentile
|
||||
## Steps to plan
|
||||
After establishing the set of commutative relations for all expressions, we can begin transforming the logical plan. There are four steps:
|
||||
|
||||
- Add a merge node before table scan
|
||||
- Evaluate commutativity in a bottom-up way, stop at the first non-commutative node
|
||||
- Divide the TableScan to scan over partitions
|
||||
- Execute
|
||||
|
||||
First insert the `MergeScan` on top of the bottom `TableScan` node. Then examine the commutativity start from the `MergeScan` node transform the plan tree based on the result. Stop this process on the first non-commutative node.
|
||||
``` text
|
||||
┌─────────────┐ ┌─────────────┐
|
||||
│ Sort │ │ Sort │
|
||||
└──────▲──────┘ └──────▲──────┘
|
||||
│ │
|
||||
┌─────────────┐ ┌──────┴──────┐ ┌──────┴──────┐
|
||||
│ Sort │ │Projection(a)│ │ MergeScan │
|
||||
└──────▲──────┘ └──────▲──────┘ └──────▲──────┘
|
||||
│ │ │
|
||||
┌──────┴──────┐ ┌──────┴──────┐ ┌──────┴──────┐
|
||||
│Projection(a)│ │ MergeScan │ │Projection(a)│
|
||||
└──────▲──────┘ └──────▲──────┘ └──────▲──────┘
|
||||
│ │ │
|
||||
┌──────┴──────┐ ┌──────┴──────┐ ┌──────┴──────┐
|
||||
│ TableScan │ │ TableScan │ │ TableScan │
|
||||
└─────────────┘ └─────────────┘ └─────────────┘
|
||||
(a) (b) (c)
|
||||
```
|
||||
Then in the physical planning phase, convert the sub-tree below `MergeScan` into a remote query request and dispatch to all the regions. And let the `MergeScan` to receive the results and feed to it parent node.
|
||||
|
||||
To simplify the overall complexity, any error in the procedure will lead to a failure to the entire query, and cancel all other parts.
|
||||
# Alternatives
|
||||
## Spill
|
||||
If only consider the ability of processing large dataset, we can enable DataFusion's spill ability to temporary persist intermediate data into disk, like the "swap" memory. But this will lead to a super slow performance and very large write amplification.
|
||||
# Future Work
|
||||
As described in the `Motivation` section we can further explore the distributed planner on the physical execution level, by introducing mechanism like Spark's shuffle to improve parallelism and reduce intermediate pipeline breaker's stage.
|
||||
@@ -10,7 +10,7 @@ common-base = { path = "../common/base" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-time = { path = "../common/time" }
|
||||
datatypes = { path = "../datatypes" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "e8abf8241c908448dce595399e89c89a40d048bd" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "6bfb02057c40da0e397c0cb4f6b87bd769669d50" }
|
||||
prost.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
tonic.workspace = true
|
||||
|
||||
@@ -266,6 +266,7 @@ impl LocalCatalogManager {
|
||||
schema_name: t.schema_name.clone(),
|
||||
table_name: t.table_name.clone(),
|
||||
table_id: t.table_id,
|
||||
region_numbers: vec![0],
|
||||
};
|
||||
let engine = self
|
||||
.engine_manager
|
||||
|
||||
@@ -273,11 +273,7 @@ async fn initiate_schemas(
|
||||
"Fetch schema from metasrv: {}.{}",
|
||||
&catalog_name, &schema_name
|
||||
);
|
||||
increment_gauge!(
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT,
|
||||
1.0,
|
||||
&[crate::metrics::db_label(&catalog_name, &schema_name)],
|
||||
);
|
||||
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT, 1.0);
|
||||
|
||||
let backend = backend.clone();
|
||||
let engine_manager = engine_manager.clone();
|
||||
@@ -347,6 +343,43 @@ async fn iter_remote_tables<'a>(
|
||||
}))
|
||||
}
|
||||
|
||||
async fn print_regional_key_debug_info(
|
||||
node_id: u64,
|
||||
backend: KvBackendRef,
|
||||
table_key: &TableGlobalKey,
|
||||
) {
|
||||
let regional_key = TableRegionalKey {
|
||||
catalog_name: table_key.catalog_name.clone(),
|
||||
schema_name: table_key.schema_name.clone(),
|
||||
table_name: table_key.table_name.clone(),
|
||||
node_id,
|
||||
}
|
||||
.to_string();
|
||||
|
||||
match backend.get(regional_key.as_bytes()).await {
|
||||
Ok(Some(Kv(_, values_bytes))) => {
|
||||
debug!(
|
||||
"Node id: {}, TableRegionalKey: {}, value: {},",
|
||||
node_id,
|
||||
table_key,
|
||||
String::from_utf8_lossy(&values_bytes),
|
||||
);
|
||||
}
|
||||
Ok(None) => {
|
||||
debug!(
|
||||
"Node id: {}, TableRegionalKey: {}, value: None",
|
||||
node_id, table_key,
|
||||
);
|
||||
}
|
||||
Err(err) => {
|
||||
debug!(
|
||||
"Node id: {}, failed to fetch TableRegionalKey: {}, source: {}",
|
||||
node_id, regional_key, err
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Initiates all tables inside the catalog by fetching data from metasrv.
|
||||
/// Return maximum table id in the schema.
|
||||
async fn initiate_tables(
|
||||
@@ -367,35 +400,63 @@ async fn initiate_tables(
|
||||
.map(|(table_key, table_value)| {
|
||||
let engine_manager = engine_manager.clone();
|
||||
let schema = schema.clone();
|
||||
let backend = backend.clone();
|
||||
common_runtime::spawn_bg(async move {
|
||||
let table_ref =
|
||||
open_or_create_table(node_id, engine_manager, &table_key, &table_value).await?;
|
||||
let table_info = table_ref.table_info();
|
||||
let table_name = &table_info.name;
|
||||
schema.register_table(table_name.clone(), table_ref).await?;
|
||||
info!("Registered table {}", table_name);
|
||||
Ok(table_info.ident.table_id)
|
||||
match open_or_create_table(node_id, engine_manager, &table_key, &table_value).await
|
||||
{
|
||||
Ok(table_ref) => {
|
||||
let table_info = table_ref.table_info();
|
||||
let table_name = &table_info.name;
|
||||
schema.register_table(table_name.clone(), table_ref).await?;
|
||||
info!("Registered table {}", table_name);
|
||||
Ok(Some(table_info.ident.table_id))
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(
|
||||
"Node id: {}, failed to open table: {}, source: {}",
|
||||
node_id, table_key, err
|
||||
);
|
||||
debug!(
|
||||
"Node id: {}, TableGlobalKey: {}, value: {:?},",
|
||||
node_id, table_key, table_value
|
||||
);
|
||||
print_regional_key_debug_info(node_id, backend, &table_key).await;
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let max_table_id = futures::future::try_join_all(joins)
|
||||
let opened_table_ids = futures::future::try_join_all(joins)
|
||||
.await
|
||||
.context(ParallelOpenTableSnafu)?
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<_>>>()?
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let opened = opened_table_ids.len();
|
||||
|
||||
let max_table_id = opened_table_ids
|
||||
.into_iter()
|
||||
.max()
|
||||
.unwrap_or(MAX_SYS_TABLE_ID);
|
||||
|
||||
increment_gauge!(
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
|
||||
1.0,
|
||||
table_num as f64,
|
||||
&[crate::metrics::db_label(catalog_name, schema_name)],
|
||||
);
|
||||
info!(
|
||||
"initialized tables in {}.{}, total: {}",
|
||||
catalog_name, schema_name, table_num
|
||||
"initialized tables in {}.{}, total: {}, opened: {}, failed: {}",
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_num,
|
||||
opened,
|
||||
table_num - opened
|
||||
);
|
||||
|
||||
Ok(max_table_id)
|
||||
@@ -431,6 +492,7 @@ async fn open_or_create_table(
|
||||
schema_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
table_id,
|
||||
region_numbers: region_numbers.clone(),
|
||||
};
|
||||
let engine =
|
||||
engine_manager
|
||||
|
||||
@@ -93,6 +93,7 @@ impl SystemCatalogTable {
|
||||
schema_name: INFORMATION_SCHEMA_NAME.to_string(),
|
||||
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
|
||||
table_id: SYSTEM_CATALOG_TABLE_ID,
|
||||
region_numbers: vec![0],
|
||||
};
|
||||
let schema = build_system_catalog_schema();
|
||||
let ctx = EngineContext::default();
|
||||
@@ -508,7 +509,8 @@ mod tests {
|
||||
Arc::new(NoopLogStore::default()),
|
||||
object_store.clone(),
|
||||
noop_compaction_scheduler,
|
||||
),
|
||||
)
|
||||
.unwrap(),
|
||||
object_store,
|
||||
));
|
||||
(dir, table_engine)
|
||||
|
||||
@@ -62,7 +62,8 @@ impl DfTableSourceProvider {
|
||||
TableReference::Bare { .. } => (),
|
||||
TableReference::Partial { schema, .. } => {
|
||||
ensure!(
|
||||
schema.as_ref() == self.default_schema,
|
||||
schema.as_ref() == self.default_schema
|
||||
|| schema.as_ref() == INFORMATION_SCHEMA_NAME,
|
||||
QueryAccessDeniedSnafu {
|
||||
catalog: &self.default_catalog,
|
||||
schema: schema.as_ref(),
|
||||
@@ -74,7 +75,8 @@ impl DfTableSourceProvider {
|
||||
} => {
|
||||
ensure!(
|
||||
catalog.as_ref() == self.default_catalog
|
||||
&& schema.as_ref() == self.default_schema,
|
||||
&& (schema.as_ref() == self.default_schema
|
||||
|| schema.as_ref() == INFORMATION_SCHEMA_NAME),
|
||||
QueryAccessDeniedSnafu {
|
||||
catalog: catalog.as_ref(),
|
||||
schema: schema.as_ref()
|
||||
@@ -191,5 +193,25 @@ mod tests {
|
||||
};
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
assert!(result.is_err());
|
||||
|
||||
let table_ref = TableReference::Partial {
|
||||
schema: Cow::Borrowed("information_schema"),
|
||||
table: Cow::Borrowed("columns"),
|
||||
};
|
||||
assert!(table_provider.resolve_table_ref(table_ref).is_ok());
|
||||
|
||||
let table_ref = TableReference::Full {
|
||||
catalog: Cow::Borrowed("greptime"),
|
||||
schema: Cow::Borrowed("information_schema"),
|
||||
table: Cow::Borrowed("columns"),
|
||||
};
|
||||
assert!(table_provider.resolve_table_ref(table_ref).is_ok());
|
||||
|
||||
let table_ref = TableReference::Full {
|
||||
catalog: Cow::Borrowed("dummy"),
|
||||
schema: Cow::Borrowed("information_schema"),
|
||||
table: Cow::Borrowed("columns"),
|
||||
};
|
||||
assert!(table_provider.resolve_table_ref(table_ref).is_err());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -56,8 +56,9 @@ impl Database {
|
||||
Self {
|
||||
catalog: catalog.into(),
|
||||
schema: schema.into(),
|
||||
dbname: "".to_string(),
|
||||
client,
|
||||
..Default::default()
|
||||
ctx: FlightContext::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -70,9 +71,11 @@ impl Database {
|
||||
/// environment
|
||||
pub fn new_with_dbname(dbname: impl Into<String>, client: Client) -> Self {
|
||||
Self {
|
||||
catalog: "".to_string(),
|
||||
schema: "".to_string(),
|
||||
dbname: dbname.into(),
|
||||
client,
|
||||
..Default::default()
|
||||
ctx: FlightContext::default(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ path = "src/bin/greptime.rs"
|
||||
|
||||
[features]
|
||||
mem-prof = ["tikv-jemallocator", "tikv-jemalloc-ctl"]
|
||||
tokio-console = ["common-telemetry/tokio-console"]
|
||||
|
||||
[dependencies]
|
||||
anymap = "1.0.0-beta.2"
|
||||
@@ -24,6 +25,7 @@ common-recordbatch = { path = "../common/recordbatch" }
|
||||
common-telemetry = { path = "../common/telemetry", features = [
|
||||
"deadlock_detection",
|
||||
] }
|
||||
config = "0.13"
|
||||
datanode = { path = "../datanode" }
|
||||
either = "1.8"
|
||||
frontend = { path = "../frontend" }
|
||||
@@ -36,20 +38,19 @@ query = { path = "../query" }
|
||||
rustyline = "10.1"
|
||||
serde.workspace = true
|
||||
servers = { path = "../servers" }
|
||||
|
||||
session = { path = "../session" }
|
||||
snafu.workspace = true
|
||||
substrait = { path = "../common/substrait" }
|
||||
tikv-jemalloc-ctl = { version = "0.5", optional = true }
|
||||
tikv-jemallocator = { version = "0.5", optional = true }
|
||||
tokio.workspace = true
|
||||
toml = "0.5"
|
||||
|
||||
|
||||
[dev-dependencies]
|
||||
common-test-util = { path = "../common/test-util" }
|
||||
rexpect = "0.5"
|
||||
temp-env = "0.3"
|
||||
serde.workspace = true
|
||||
toml = "0.5"
|
||||
|
||||
[build-dependencies]
|
||||
build-data = "0.1.3"
|
||||
|
||||
@@ -20,7 +20,7 @@ use clap::Parser;
|
||||
use cmd::error::Result;
|
||||
use cmd::options::{Options, TopLevelOptions};
|
||||
use cmd::{cli, datanode, frontend, metasrv, standalone};
|
||||
use common_telemetry::logging::{error, info};
|
||||
use common_telemetry::logging::{error, info, TracingOptions};
|
||||
|
||||
#[derive(Parser)]
|
||||
#[clap(name = "greptimedb", version = print_version())]
|
||||
@@ -31,6 +31,10 @@ struct Command {
|
||||
log_level: Option<String>,
|
||||
#[clap(subcommand)]
|
||||
subcmd: SubCommand,
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
#[clap(long)]
|
||||
tokio_console_addr: Option<String>,
|
||||
}
|
||||
|
||||
pub enum Application {
|
||||
@@ -172,10 +176,14 @@ async fn main() -> Result<()> {
|
||||
|
||||
let opts = cmd.load_options()?;
|
||||
let logging_opts = opts.logging_options();
|
||||
let tracing_opts = TracingOptions {
|
||||
#[cfg(feature = "tokio-console")]
|
||||
tokio_console_addr: cmd.tokio_console_addr.clone(),
|
||||
};
|
||||
|
||||
common_telemetry::set_panic_hook();
|
||||
common_telemetry::init_default_metrics_recorder();
|
||||
let _guard = common_telemetry::init_global_logging(app_name, logging_opts);
|
||||
let _guard = common_telemetry::init_global_logging(app_name, logging_opts, tracing_opts);
|
||||
|
||||
let mut app = cmd.build(opts).await?;
|
||||
|
||||
|
||||
@@ -23,7 +23,6 @@ use snafu::ResultExt;
|
||||
|
||||
use crate::error::{MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
|
||||
use crate::options::{Options, TopLevelOptions};
|
||||
use crate::toml_loader;
|
||||
|
||||
pub struct Instance {
|
||||
datanode: Datanode,
|
||||
@@ -87,8 +86,8 @@ struct StartCommand {
|
||||
rpc_hostname: Option<String>,
|
||||
#[clap(long)]
|
||||
mysql_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
metasrv_addr: Option<String>,
|
||||
#[clap(long, multiple = true, value_delimiter = ',')]
|
||||
metasrv_addr: Option<Vec<String>>,
|
||||
#[clap(short, long)]
|
||||
config_file: Option<String>,
|
||||
#[clap(long)]
|
||||
@@ -99,15 +98,17 @@ struct StartCommand {
|
||||
http_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
http_timeout: Option<u64>,
|
||||
#[clap(long, default_value = "GREPTIMEDB_DATANODE")]
|
||||
env_prefix: String,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
let mut opts: DatanodeOptions = if let Some(path) = &self.config_file {
|
||||
toml_loader::from_file!(path)?
|
||||
} else {
|
||||
DatanodeOptions::default()
|
||||
};
|
||||
let mut opts: DatanodeOptions = Options::load_layered_options(
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
DatanodeOptions::env_list_keys(),
|
||||
)?;
|
||||
|
||||
if let Some(dir) = top_level_opts.log_dir {
|
||||
opts.logging.dir = dir;
|
||||
@@ -116,30 +117,26 @@ impl StartCommand {
|
||||
opts.logging.level = level;
|
||||
}
|
||||
|
||||
if let Some(addr) = self.rpc_addr.clone() {
|
||||
opts.rpc_addr = addr;
|
||||
if let Some(addr) = &self.rpc_addr {
|
||||
opts.rpc_addr = addr.clone();
|
||||
}
|
||||
|
||||
if self.rpc_hostname.is_some() {
|
||||
opts.rpc_hostname = self.rpc_hostname.clone();
|
||||
}
|
||||
|
||||
if let Some(addr) = self.mysql_addr.clone() {
|
||||
opts.mysql_addr = addr;
|
||||
if let Some(addr) = &self.mysql_addr {
|
||||
opts.mysql_addr = addr.clone();
|
||||
}
|
||||
|
||||
if let Some(node_id) = self.node_id {
|
||||
opts.node_id = Some(node_id);
|
||||
}
|
||||
|
||||
if let Some(meta_addr) = self.metasrv_addr.clone() {
|
||||
if let Some(metasrv_addrs) = &self.metasrv_addr {
|
||||
opts.meta_client_options
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs = meta_addr
|
||||
.split(',')
|
||||
.map(&str::trim)
|
||||
.map(&str::to_string)
|
||||
.collect::<_>();
|
||||
.metasrv_addrs = metasrv_addrs.clone();
|
||||
opts.mode = Mode::Distributed;
|
||||
}
|
||||
|
||||
@@ -150,16 +147,20 @@ impl StartCommand {
|
||||
.fail();
|
||||
}
|
||||
|
||||
if let Some(data_dir) = self.data_dir.clone() {
|
||||
opts.storage.store = ObjectStoreConfig::File(FileConfig { data_dir });
|
||||
if let Some(data_dir) = &self.data_dir {
|
||||
opts.storage.store = ObjectStoreConfig::File(FileConfig {
|
||||
data_dir: data_dir.clone(),
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(wal_dir) = self.wal_dir.clone() {
|
||||
opts.wal.dir = wal_dir;
|
||||
if let Some(wal_dir) = &self.wal_dir {
|
||||
opts.wal.dir = wal_dir.clone();
|
||||
}
|
||||
if let Some(http_addr) = self.http_addr.clone() {
|
||||
opts.http_opts.addr = http_addr
|
||||
|
||||
if let Some(http_addr) = &self.http_addr {
|
||||
opts.http_opts.addr = http_addr.clone();
|
||||
}
|
||||
|
||||
if let Some(http_timeout) = self.http_timeout {
|
||||
opts.http_opts.timeout = Duration::from_secs(http_timeout)
|
||||
}
|
||||
@@ -191,6 +192,7 @@ mod tests {
|
||||
use servers::Mode;
|
||||
|
||||
use super::*;
|
||||
use crate::options::ENV_VAR_SEP;
|
||||
|
||||
#[test]
|
||||
fn test_read_from_config_file() {
|
||||
@@ -232,6 +234,7 @@ mod tests {
|
||||
checkpoint_margin = 9
|
||||
gc_duration = '7s'
|
||||
checkpoint_on_startup = true
|
||||
compress = true
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
@@ -291,6 +294,7 @@ mod tests {
|
||||
checkpoint_margin: Some(9),
|
||||
gc_duration: Some(Duration::from_secs(7)),
|
||||
checkpoint_on_startup: true,
|
||||
compress: true
|
||||
},
|
||||
options.storage.manifest,
|
||||
);
|
||||
@@ -310,7 +314,7 @@ mod tests {
|
||||
|
||||
if let Options::Datanode(opt) = (StartCommand {
|
||||
node_id: Some(42),
|
||||
metasrv_addr: Some("127.0.0.1:3002".to_string()),
|
||||
metasrv_addr: Some(vec!["127.0.0.1:3002".to_string()]),
|
||||
..Default::default()
|
||||
})
|
||||
.load_options(TopLevelOptions::default())
|
||||
@@ -320,7 +324,7 @@ mod tests {
|
||||
}
|
||||
|
||||
assert!((StartCommand {
|
||||
metasrv_addr: Some("127.0.0.1:3002".to_string()),
|
||||
metasrv_addr: Some(vec!["127.0.0.1:3002".to_string()]),
|
||||
..Default::default()
|
||||
})
|
||||
.load_options(TopLevelOptions::default())
|
||||
@@ -350,4 +354,127 @@ mod tests {
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||
assert_eq!("debug", logging_opt.level);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_precedence_order() {
|
||||
let mut file = create_named_temp_file();
|
||||
let toml_str = r#"
|
||||
mode = "distributed"
|
||||
enable_memory_catalog = false
|
||||
node_id = 42
|
||||
rpc_addr = "127.0.0.1:3001"
|
||||
rpc_hostname = "127.0.0.1"
|
||||
rpc_runtime_size = 8
|
||||
mysql_addr = "127.0.0.1:4406"
|
||||
mysql_runtime_size = 2
|
||||
|
||||
[meta_client_options]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
purge_interval = "10m"
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
|
||||
[storage]
|
||||
type = "File"
|
||||
data_dir = "/tmp/greptimedb/data/"
|
||||
|
||||
[storage.compaction]
|
||||
max_inflight_tasks = 3
|
||||
max_files_in_level0 = 7
|
||||
max_purge_tasks = 32
|
||||
|
||||
[storage.manifest]
|
||||
checkpoint_on_startup = true
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
let env_prefix = "DATANODE_UT";
|
||||
temp_env::with_vars(
|
||||
vec![
|
||||
(
|
||||
// storage.manifest.gc_duration = 9s
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"manifest".to_uppercase(),
|
||||
"gc_duration".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("9s"),
|
||||
),
|
||||
(
|
||||
// storage.compaction.max_purge_tasks = 99
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"compaction".to_uppercase(),
|
||||
"max_purge_tasks".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("99"),
|
||||
),
|
||||
(
|
||||
// meta_client_options.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"meta_client_options".to_uppercase(),
|
||||
"metasrv_addrs".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003"),
|
||||
),
|
||||
],
|
||||
|| {
|
||||
let command = StartCommand {
|
||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||
wal_dir: Some("/other/wal/dir".to_string()),
|
||||
env_prefix: env_prefix.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Datanode(opts) =
|
||||
command.load_options(TopLevelOptions::default()).unwrap() else {unreachable!()};
|
||||
|
||||
// Should be read from env, env > default values.
|
||||
assert_eq!(
|
||||
opts.storage.manifest.gc_duration,
|
||||
Some(Duration::from_secs(9))
|
||||
);
|
||||
assert_eq!(
|
||||
opts.meta_client_options.unwrap().metasrv_addrs,
|
||||
vec![
|
||||
"127.0.0.1:3001".to_string(),
|
||||
"127.0.0.1:3002".to_string(),
|
||||
"127.0.0.1:3003".to_string()
|
||||
]
|
||||
);
|
||||
|
||||
// Should be read from config file, config file > env > default values.
|
||||
assert_eq!(opts.storage.compaction.max_purge_tasks, 32);
|
||||
|
||||
// Should be read from cli, cli > config file > env > default values.
|
||||
assert_eq!(opts.wal.dir, "/other/wal/dir");
|
||||
|
||||
// Should be default value.
|
||||
assert_eq!(
|
||||
opts.storage.manifest.checkpoint_margin,
|
||||
DatanodeOptions::default()
|
||||
.storage
|
||||
.manifest
|
||||
.checkpoint_margin
|
||||
);
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use config::ConfigError;
|
||||
use rustyline::error::ReadlineError;
|
||||
use snafu::Location;
|
||||
|
||||
@@ -70,12 +71,6 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse config, source: {}", source))]
|
||||
ParseConfig {
|
||||
source: toml::de::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing config, msg: {}", msg))]
|
||||
MissingConfig { msg: String, location: Location },
|
||||
|
||||
@@ -153,6 +148,12 @@ pub enum Error {
|
||||
#[snafu(backtrace)]
|
||||
source: substrait::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to load layered config, source: {}", source))]
|
||||
LoadLayeredConfig {
|
||||
source: ConfigError,
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -168,13 +169,12 @@ impl ErrorExt for Error {
|
||||
Error::ShutdownMetaServer { source } => source.status_code(),
|
||||
Error::BuildMetaServer { source } => source.status_code(),
|
||||
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
||||
Error::ReadConfig { .. } | Error::ParseConfig { .. } | Error::MissingConfig { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::IllegalConfig { .. } | Error::InvalidReplCommand { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::IllegalAuthConfig { .. } => StatusCode::InvalidArguments,
|
||||
Error::ReadConfig { .. }
|
||||
| Error::MissingConfig { .. }
|
||||
| Error::LoadLayeredConfig { .. }
|
||||
| Error::IllegalConfig { .. }
|
||||
| Error::InvalidReplCommand { .. }
|
||||
| Error::IllegalAuthConfig { .. } => StatusCode::InvalidArguments,
|
||||
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
|
||||
Error::RequestDatabase { source, .. } => source.status_code(),
|
||||
Error::CollectRecordBatches { source } | Error::PrettyPrintRecordBatches { source } => {
|
||||
|
||||
@@ -17,13 +17,8 @@ use std::sync::Arc;
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::grpc::GrpcOptions;
|
||||
use frontend::influxdb::InfluxdbOptions;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
||||
use frontend::mysql::MysqlOptions;
|
||||
use frontend::opentsdb::OpentsdbOptions;
|
||||
use frontend::postgres::PostgresOptions;
|
||||
use frontend::prom::PromOptions;
|
||||
use frontend::service_config::{InfluxdbOptions, PromOptions};
|
||||
use meta_client::MetaClientOptions;
|
||||
use servers::auth::UserProviderRef;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
@@ -32,7 +27,6 @@ use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, IllegalAuthConfigSnafu, Result};
|
||||
use crate::options::{Options, TopLevelOptions};
|
||||
use crate::toml_loader;
|
||||
|
||||
pub struct Instance {
|
||||
frontend: FeInstance,
|
||||
@@ -89,7 +83,7 @@ impl SubCommand {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
#[derive(Debug, Default, Parser)]
|
||||
pub struct StartCommand {
|
||||
#[clap(long)]
|
||||
http_addr: Option<String>,
|
||||
@@ -107,8 +101,8 @@ pub struct StartCommand {
|
||||
config_file: Option<String>,
|
||||
#[clap(short, long)]
|
||||
influxdb_enable: Option<bool>,
|
||||
#[clap(long)]
|
||||
metasrv_addr: Option<String>,
|
||||
#[clap(long, multiple = true, value_delimiter = ',')]
|
||||
metasrv_addr: Option<Vec<String>>,
|
||||
#[clap(long)]
|
||||
tls_mode: Option<TlsMode>,
|
||||
#[clap(long)]
|
||||
@@ -119,15 +113,17 @@ pub struct StartCommand {
|
||||
user_provider: Option<String>,
|
||||
#[clap(long)]
|
||||
disable_dashboard: Option<bool>,
|
||||
#[clap(long, default_value = "GREPTIMEDB_FRONTEND")]
|
||||
env_prefix: String,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
let mut opts: FrontendOptions = if let Some(path) = &self.config_file {
|
||||
toml_loader::from_file!(path)?
|
||||
} else {
|
||||
FrontendOptions::default()
|
||||
};
|
||||
let mut opts: FrontendOptions = Options::load_layered_options(
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
FrontendOptions::env_list_keys(),
|
||||
)?;
|
||||
|
||||
if let Some(dir) = top_level_opts.log_dir {
|
||||
opts.logging.dir = dir;
|
||||
@@ -136,14 +132,16 @@ impl StartCommand {
|
||||
opts.logging.level = level;
|
||||
}
|
||||
|
||||
let tls_option = TlsOption::new(
|
||||
let tls_opts = TlsOption::new(
|
||||
self.tls_mode.clone(),
|
||||
self.tls_cert_path.clone(),
|
||||
self.tls_key_path.clone(),
|
||||
);
|
||||
|
||||
if let Some(addr) = self.http_addr.clone() {
|
||||
opts.http_options.get_or_insert_with(Default::default).addr = addr;
|
||||
if let Some(addr) = &self.http_addr {
|
||||
if let Some(http_opts) = &mut opts.http_options {
|
||||
http_opts.addr = addr.clone()
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(disable_dashboard) = self.disable_dashboard {
|
||||
@@ -152,47 +150,44 @@ impl StartCommand {
|
||||
.disable_dashboard = disable_dashboard;
|
||||
}
|
||||
|
||||
if let Some(addr) = self.grpc_addr.clone() {
|
||||
opts.grpc_options = Some(GrpcOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
});
|
||||
if let Some(addr) = &self.grpc_addr {
|
||||
if let Some(grpc_opts) = &mut opts.grpc_options {
|
||||
grpc_opts.addr = addr.clone()
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(addr) = self.mysql_addr.clone() {
|
||||
opts.mysql_options = Some(MysqlOptions {
|
||||
addr,
|
||||
tls: tls_option.clone(),
|
||||
..Default::default()
|
||||
});
|
||||
if let Some(addr) = &self.mysql_addr {
|
||||
if let Some(mysql_opts) = &mut opts.mysql_options {
|
||||
mysql_opts.addr = addr.clone();
|
||||
mysql_opts.tls = tls_opts.clone();
|
||||
}
|
||||
}
|
||||
if let Some(addr) = self.prom_addr.clone() {
|
||||
opts.prom_options = Some(PromOptions { addr });
|
||||
|
||||
if let Some(addr) = &self.prom_addr {
|
||||
opts.prom_options = Some(PromOptions { addr: addr.clone() });
|
||||
}
|
||||
if let Some(addr) = self.postgres_addr.clone() {
|
||||
opts.postgres_options = Some(PostgresOptions {
|
||||
addr,
|
||||
tls: tls_option,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
if let Some(addr) = &self.postgres_addr {
|
||||
if let Some(postgres_opts) = &mut opts.postgres_options {
|
||||
postgres_opts.addr = addr.clone();
|
||||
postgres_opts.tls = tls_opts;
|
||||
}
|
||||
}
|
||||
if let Some(addr) = self.opentsdb_addr.clone() {
|
||||
opts.opentsdb_options = Some(OpentsdbOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
if let Some(addr) = &self.opentsdb_addr {
|
||||
if let Some(opentsdb_addr) = &mut opts.opentsdb_options {
|
||||
opentsdb_addr.addr = addr.clone();
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(enable) = self.influxdb_enable {
|
||||
opts.influxdb_options = Some(InfluxdbOptions { enable });
|
||||
}
|
||||
if let Some(metasrv_addr) = self.metasrv_addr.clone() {
|
||||
|
||||
if let Some(metasrv_addrs) = &self.metasrv_addr {
|
||||
opts.meta_client_options
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs = metasrv_addr
|
||||
.split(',')
|
||||
.map(&str::trim)
|
||||
.map(&str::to_string)
|
||||
.collect::<Vec<_>>();
|
||||
.metasrv_addrs = metasrv_addrs.clone();
|
||||
opts.mode = Mode::Distributed;
|
||||
}
|
||||
|
||||
@@ -231,27 +226,23 @@ mod tests {
|
||||
use std::time::Duration;
|
||||
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use frontend::service_config::GrpcOptions;
|
||||
use servers::auth::{Identity, Password, UserProviderRef};
|
||||
|
||||
use super::*;
|
||||
use crate::options::ENV_VAR_SEP;
|
||||
|
||||
#[test]
|
||||
fn test_try_from_start_command() {
|
||||
let command = StartCommand {
|
||||
http_addr: Some("127.0.0.1:1234".to_string()),
|
||||
grpc_addr: None,
|
||||
prom_addr: Some("127.0.0.1:4444".to_string()),
|
||||
mysql_addr: Some("127.0.0.1:5678".to_string()),
|
||||
postgres_addr: Some("127.0.0.1:5432".to_string()),
|
||||
opentsdb_addr: Some("127.0.0.1:4321".to_string()),
|
||||
influxdb_enable: Some(false),
|
||||
config_file: None,
|
||||
metasrv_addr: None,
|
||||
tls_mode: None,
|
||||
tls_cert_path: None,
|
||||
tls_key_path: None,
|
||||
user_provider: None,
|
||||
disable_dashboard: Some(false),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Frontend(opts) =
|
||||
@@ -307,20 +298,9 @@ mod tests {
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
let command = StartCommand {
|
||||
http_addr: None,
|
||||
grpc_addr: None,
|
||||
mysql_addr: None,
|
||||
prom_addr: None,
|
||||
postgres_addr: None,
|
||||
opentsdb_addr: None,
|
||||
influxdb_enable: None,
|
||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||
metasrv_addr: None,
|
||||
tls_mode: None,
|
||||
tls_cert_path: None,
|
||||
tls_key_path: None,
|
||||
user_provider: None,
|
||||
disable_dashboard: Some(false),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Frontend(fe_opts) =
|
||||
@@ -342,20 +322,9 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_try_from_start_command_to_anymap() {
|
||||
let command = StartCommand {
|
||||
http_addr: None,
|
||||
grpc_addr: None,
|
||||
mysql_addr: None,
|
||||
prom_addr: None,
|
||||
postgres_addr: None,
|
||||
opentsdb_addr: None,
|
||||
influxdb_enable: None,
|
||||
config_file: None,
|
||||
metasrv_addr: None,
|
||||
tls_mode: None,
|
||||
tls_cert_path: None,
|
||||
tls_key_path: None,
|
||||
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
|
||||
disable_dashboard: Some(false),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let plugins = load_frontend_plugins(&command.user_provider);
|
||||
@@ -377,20 +346,8 @@ mod tests {
|
||||
#[test]
|
||||
fn test_top_level_options() {
|
||||
let cmd = StartCommand {
|
||||
http_addr: None,
|
||||
grpc_addr: None,
|
||||
mysql_addr: None,
|
||||
prom_addr: None,
|
||||
postgres_addr: None,
|
||||
opentsdb_addr: None,
|
||||
influxdb_enable: None,
|
||||
config_file: None,
|
||||
metasrv_addr: None,
|
||||
tls_mode: None,
|
||||
tls_cert_path: None,
|
||||
tls_key_path: None,
|
||||
user_provider: None,
|
||||
disable_dashboard: Some(false),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let options = cmd
|
||||
@@ -404,4 +361,114 @@ mod tests {
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||
assert_eq!("debug", logging_opt.level);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_precedence_order() {
|
||||
let mut file = create_named_temp_file();
|
||||
let toml_str = r#"
|
||||
mode = "distributed"
|
||||
|
||||
[http_options]
|
||||
addr = "127.0.0.1:4000"
|
||||
|
||||
[meta_client_options]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
tcp_nodelay = true
|
||||
|
||||
[mysql_options]
|
||||
addr = "127.0.0.1:4002"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
let env_prefix = "FRONTEND_UT";
|
||||
temp_env::with_vars(
|
||||
vec![
|
||||
(
|
||||
// mysql_options.addr = 127.0.0.1:14002
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"mysql_options".to_uppercase(),
|
||||
"addr".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("127.0.0.1:14002"),
|
||||
),
|
||||
(
|
||||
// mysql_options.runtime_size = 11
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"mysql_options".to_uppercase(),
|
||||
"runtime_size".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("11"),
|
||||
),
|
||||
(
|
||||
// http_options.addr = 127.0.0.1:24000
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"http_options".to_uppercase(),
|
||||
"addr".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("127.0.0.1:24000"),
|
||||
),
|
||||
(
|
||||
// meta_client_options.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"meta_client_options".to_uppercase(),
|
||||
"metasrv_addrs".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003"),
|
||||
),
|
||||
],
|
||||
|| {
|
||||
let command = StartCommand {
|
||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||
http_addr: Some("127.0.0.1:14000".to_string()),
|
||||
env_prefix: env_prefix.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let top_level_opts = TopLevelOptions {
|
||||
log_dir: None,
|
||||
log_level: Some("error".to_string()),
|
||||
};
|
||||
let Options::Frontend(fe_opts) =
|
||||
command.load_options(top_level_opts).unwrap() else {unreachable!()};
|
||||
|
||||
// Should be read from env, env > default values.
|
||||
assert_eq!(fe_opts.mysql_options.as_ref().unwrap().runtime_size, 11);
|
||||
assert_eq!(
|
||||
fe_opts.meta_client_options.unwrap().metasrv_addrs,
|
||||
vec![
|
||||
"127.0.0.1:3001".to_string(),
|
||||
"127.0.0.1:3002".to_string(),
|
||||
"127.0.0.1:3003".to_string()
|
||||
]
|
||||
);
|
||||
|
||||
// Should be read from config file, config file > env > default values.
|
||||
assert_eq!(
|
||||
fe_opts.mysql_options.as_ref().unwrap().addr,
|
||||
"127.0.0.1:4002"
|
||||
);
|
||||
|
||||
// Should be read from cli, cli > config file > env > default values.
|
||||
assert_eq!(
|
||||
fe_opts.http_options.as_ref().unwrap().addr,
|
||||
"127.0.0.1:14000"
|
||||
);
|
||||
|
||||
// Should be default value.
|
||||
assert_eq!(
|
||||
fe_opts.grpc_options.as_ref().unwrap().addr,
|
||||
GrpcOptions::default().addr
|
||||
);
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,4 +21,3 @@ pub mod frontend;
|
||||
pub mod metasrv;
|
||||
pub mod options;
|
||||
pub mod standalone;
|
||||
mod toml_loader;
|
||||
|
||||
@@ -20,9 +20,8 @@ use meta_srv::bootstrap::MetaSrvInstance;
|
||||
use meta_srv::metasrv::MetaSrvOptions;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::error::{self, Result};
|
||||
use crate::options::{Options, TopLevelOptions};
|
||||
use crate::{error, toml_loader};
|
||||
|
||||
pub struct Instance {
|
||||
instance: MetaSrvInstance,
|
||||
@@ -79,7 +78,7 @@ impl SubCommand {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
#[derive(Debug, Default, Parser)]
|
||||
struct StartCommand {
|
||||
#[clap(long)]
|
||||
bind_addr: Option<String>,
|
||||
@@ -97,15 +96,17 @@ struct StartCommand {
|
||||
http_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
http_timeout: Option<u64>,
|
||||
#[clap(long, default_value = "GREPTIMEDB_METASRV")]
|
||||
env_prefix: String,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
let mut opts: MetaSrvOptions = if let Some(path) = &self.config_file {
|
||||
toml_loader::from_file!(path)?
|
||||
} else {
|
||||
MetaSrvOptions::default()
|
||||
};
|
||||
let mut opts: MetaSrvOptions = Options::load_layered_options(
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
None,
|
||||
)?;
|
||||
|
||||
if let Some(dir) = top_level_opts.log_dir {
|
||||
opts.logging.dir = dir;
|
||||
@@ -114,15 +115,18 @@ impl StartCommand {
|
||||
opts.logging.level = level;
|
||||
}
|
||||
|
||||
if let Some(addr) = self.bind_addr.clone() {
|
||||
opts.bind_addr = addr;
|
||||
if let Some(addr) = &self.bind_addr {
|
||||
opts.bind_addr = addr.clone();
|
||||
}
|
||||
if let Some(addr) = self.server_addr.clone() {
|
||||
opts.server_addr = addr;
|
||||
|
||||
if let Some(addr) = &self.server_addr {
|
||||
opts.server_addr = addr.clone();
|
||||
}
|
||||
if let Some(addr) = self.store_addr.clone() {
|
||||
opts.store_addr = addr;
|
||||
|
||||
if let Some(addr) = &self.store_addr {
|
||||
opts.store_addr = addr.clone();
|
||||
}
|
||||
|
||||
if let Some(selector_type) = &self.selector {
|
||||
opts.selector = selector_type[..]
|
||||
.try_into()
|
||||
@@ -133,9 +137,10 @@ impl StartCommand {
|
||||
opts.use_memory_store = true;
|
||||
}
|
||||
|
||||
if let Some(http_addr) = self.http_addr.clone() {
|
||||
opts.http_opts.addr = http_addr;
|
||||
if let Some(http_addr) = &self.http_addr {
|
||||
opts.http_opts.addr = http_addr.clone();
|
||||
}
|
||||
|
||||
if let Some(http_timeout) = self.http_timeout {
|
||||
opts.http_opts.timeout = Duration::from_secs(http_timeout);
|
||||
}
|
||||
@@ -167,6 +172,7 @@ mod tests {
|
||||
use meta_srv::selector::SelectorType;
|
||||
|
||||
use super::*;
|
||||
use crate::options::ENV_VAR_SEP;
|
||||
|
||||
#[test]
|
||||
fn test_read_from_cmd() {
|
||||
@@ -174,11 +180,8 @@ mod tests {
|
||||
bind_addr: Some("127.0.0.1:3002".to_string()),
|
||||
server_addr: Some("127.0.0.1:3002".to_string()),
|
||||
store_addr: Some("127.0.0.1:2380".to_string()),
|
||||
config_file: None,
|
||||
selector: Some("LoadBased".to_string()),
|
||||
use_memory_store: false,
|
||||
http_addr: None,
|
||||
http_timeout: None,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Metasrv(options) =
|
||||
@@ -206,14 +209,8 @@ mod tests {
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
let cmd = StartCommand {
|
||||
bind_addr: None,
|
||||
server_addr: None,
|
||||
store_addr: None,
|
||||
selector: None,
|
||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||
use_memory_store: false,
|
||||
http_addr: None,
|
||||
http_timeout: None,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Metasrv(options) =
|
||||
@@ -233,11 +230,8 @@ mod tests {
|
||||
bind_addr: Some("127.0.0.1:3002".to_string()),
|
||||
server_addr: Some("127.0.0.1:3002".to_string()),
|
||||
store_addr: Some("127.0.0.1:2380".to_string()),
|
||||
config_file: None,
|
||||
selector: Some("LoadBased".to_string()),
|
||||
use_memory_store: false,
|
||||
http_addr: None,
|
||||
http_timeout: None,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let options = cmd
|
||||
@@ -251,4 +245,72 @@ mod tests {
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||
assert_eq!("debug", logging_opt.level);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_precedence_order() {
|
||||
let mut file = create_named_temp_file();
|
||||
let toml_str = r#"
|
||||
server_addr = "127.0.0.1:3002"
|
||||
datanode_lease_secs = 15
|
||||
selector = "LeaseBased"
|
||||
use_memory_store = false
|
||||
|
||||
[http_options]
|
||||
addr = "127.0.0.1:4000"
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
let env_prefix = "METASRV_UT";
|
||||
temp_env::with_vars(
|
||||
vec![
|
||||
(
|
||||
// bind_addr = 127.0.0.1:14002
|
||||
vec![env_prefix.to_string(), "bind_addr".to_uppercase()].join(ENV_VAR_SEP),
|
||||
Some("127.0.0.1:14002"),
|
||||
),
|
||||
(
|
||||
// server_addr = 127.0.0.1:13002
|
||||
vec![env_prefix.to_string(), "server_addr".to_uppercase()].join(ENV_VAR_SEP),
|
||||
Some("127.0.0.1:13002"),
|
||||
),
|
||||
(
|
||||
// http_options.addr = 127.0.0.1:24000
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"http_options".to_uppercase(),
|
||||
"addr".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("127.0.0.1:24000"),
|
||||
),
|
||||
],
|
||||
|| {
|
||||
let command = StartCommand {
|
||||
http_addr: Some("127.0.0.1:14000".to_string()),
|
||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||
env_prefix: env_prefix.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Metasrv(opts) =
|
||||
command.load_options(TopLevelOptions::default()).unwrap() else {unreachable!()};
|
||||
|
||||
// Should be read from env, env > default values.
|
||||
assert_eq!(opts.bind_addr, "127.0.0.1:14002");
|
||||
|
||||
// Should be read from config file, config file > env > default values.
|
||||
assert_eq!(opts.server_addr, "127.0.0.1:3002");
|
||||
|
||||
// Should be read from cli, cli > config file > env > default values.
|
||||
assert_eq!(opts.http_opts.addr, "127.0.0.1:14000");
|
||||
|
||||
// Should be default value.
|
||||
assert_eq!(opts.store_addr, "127.0.0.1:2379");
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,10 +11,19 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
use config::{Config, Environment, File, FileFormat};
|
||||
use datanode::datanode::DatanodeOptions;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use meta_srv::metasrv::MetaSrvOptions;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{LoadLayeredConfigSnafu, Result};
|
||||
|
||||
pub const ENV_VAR_SEP: &str = "__";
|
||||
pub const ENV_LIST_SEP: &str = ",";
|
||||
|
||||
pub struct MixOptions {
|
||||
pub fe_opts: FrontendOptions,
|
||||
@@ -30,6 +39,12 @@ pub enum Options {
|
||||
Cli(Box<LoggingOptions>),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct TopLevelOptions {
|
||||
pub log_dir: Option<String>,
|
||||
pub log_level: Option<String>,
|
||||
}
|
||||
|
||||
impl Options {
|
||||
pub fn logging_options(&self) -> &LoggingOptions {
|
||||
match self {
|
||||
@@ -40,10 +55,220 @@ impl Options {
|
||||
Options::Cli(opts) => opts,
|
||||
}
|
||||
}
|
||||
|
||||
/// Load the configuration from multiple sources and merge them.
|
||||
/// The precedence order is: config file > environment variables > default values.
|
||||
/// `env_prefix` is the prefix of environment variables, e.g. "FRONTEND__xxx".
|
||||
/// The function will use dunder(double underscore) `__` as the separator for environment variables, for example:
|
||||
/// `DATANODE__STORAGE__MANIFEST__CHECKPOINT_MARGIN` will be mapped to `DatanodeOptions.storage.manifest.checkpoint_margin` field in the configuration.
|
||||
/// `list_keys` is the list of keys that should be parsed as a list, for example, you can pass `Some(&["meta_client_options.metasrv_addrs"]` to parse `GREPTIMEDB_METASRV__META_CLIENT_OPTIONS__METASRV_ADDRS` as a list.
|
||||
/// The function will use comma `,` as the separator for list values, for example: `127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003`.
|
||||
pub fn load_layered_options<'de, T: Serialize + Deserialize<'de> + Default>(
|
||||
config_file: Option<&str>,
|
||||
env_prefix: &str,
|
||||
list_keys: Option<&[&str]>,
|
||||
) -> Result<T> {
|
||||
let default_opts = T::default();
|
||||
|
||||
let env_source = {
|
||||
let mut env = Environment::default();
|
||||
|
||||
if !env_prefix.is_empty() {
|
||||
env = env.prefix(env_prefix);
|
||||
}
|
||||
|
||||
if let Some(list_keys) = list_keys {
|
||||
env = env.list_separator(ENV_LIST_SEP);
|
||||
for key in list_keys {
|
||||
env = env.with_list_parse_key(key);
|
||||
}
|
||||
}
|
||||
|
||||
env.try_parsing(true)
|
||||
.separator(ENV_VAR_SEP)
|
||||
.ignore_empty(true)
|
||||
};
|
||||
|
||||
// Add default values and environment variables as the sources of the configuration.
|
||||
let mut layered_config = Config::builder()
|
||||
.add_source(Config::try_from(&default_opts).context(LoadLayeredConfigSnafu)?)
|
||||
.add_source(env_source);
|
||||
|
||||
// Add config file as the source of the configuration if it is specified.
|
||||
if let Some(config_file) = config_file {
|
||||
layered_config = layered_config.add_source(File::new(config_file, FileFormat::Toml));
|
||||
}
|
||||
|
||||
let opts = layered_config
|
||||
.build()
|
||||
.context(LoadLayeredConfigSnafu)?
|
||||
.try_deserialize()
|
||||
.context(LoadLayeredConfigSnafu)?;
|
||||
|
||||
Ok(opts)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct TopLevelOptions {
|
||||
pub log_dir: Option<String>,
|
||||
pub log_level: Option<String>,
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::Write;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use datanode::datanode::{DatanodeOptions, ObjectStoreConfig};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_load_layered_options() {
|
||||
let mut file = create_named_temp_file();
|
||||
let toml_str = r#"
|
||||
mode = "distributed"
|
||||
enable_memory_catalog = false
|
||||
rpc_addr = "127.0.0.1:3001"
|
||||
rpc_hostname = "127.0.0.1"
|
||||
rpc_runtime_size = 8
|
||||
mysql_addr = "127.0.0.1:4406"
|
||||
mysql_runtime_size = 2
|
||||
|
||||
[meta_client_options]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
purge_interval = "10m"
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
|
||||
[storage.compaction]
|
||||
max_inflight_tasks = 3
|
||||
max_files_in_level0 = 7
|
||||
max_purge_tasks = 32
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
let env_prefix = "DATANODE_UT";
|
||||
temp_env::with_vars(
|
||||
// The following environment variables will be used to override the values in the config file.
|
||||
vec![
|
||||
(
|
||||
// storage.manifest.checkpoint_margin = 99
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"manifest".to_uppercase(),
|
||||
"checkpoint_margin".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("99"),
|
||||
),
|
||||
(
|
||||
// storage.type = S3
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"type".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("S3"),
|
||||
),
|
||||
(
|
||||
// storage.bucket = mybucket
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"bucket".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("mybucket"),
|
||||
),
|
||||
(
|
||||
// storage.manifest.gc_duration = 42s
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"manifest".to_uppercase(),
|
||||
"gc_duration".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("42s"),
|
||||
),
|
||||
(
|
||||
// storage.manifest.checkpoint_on_startup = true
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"manifest".to_uppercase(),
|
||||
"checkpoint_on_startup".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("true"),
|
||||
),
|
||||
(
|
||||
// wal.dir = /other/wal/dir
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"wal".to_uppercase(),
|
||||
"dir".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("/other/wal/dir"),
|
||||
),
|
||||
(
|
||||
// meta_client_options.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"meta_client_options".to_uppercase(),
|
||||
"metasrv_addrs".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003"),
|
||||
),
|
||||
],
|
||||
|| {
|
||||
let opts: DatanodeOptions = Options::load_layered_options(
|
||||
Some(file.path().to_str().unwrap()),
|
||||
env_prefix,
|
||||
DatanodeOptions::env_list_keys(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Check the configs from environment variables.
|
||||
assert_eq!(opts.storage.manifest.checkpoint_margin, Some(99));
|
||||
match opts.storage.store {
|
||||
ObjectStoreConfig::S3(s3_config) => {
|
||||
assert_eq!(s3_config.bucket, "mybucket".to_string());
|
||||
}
|
||||
_ => panic!("unexpected store type"),
|
||||
}
|
||||
assert_eq!(
|
||||
opts.storage.manifest.gc_duration,
|
||||
Some(Duration::from_secs(42))
|
||||
);
|
||||
assert!(opts.storage.manifest.checkpoint_on_startup);
|
||||
assert_eq!(
|
||||
opts.meta_client_options.unwrap().metasrv_addrs,
|
||||
vec![
|
||||
"127.0.0.1:3001".to_string(),
|
||||
"127.0.0.1:3002".to_string(),
|
||||
"127.0.0.1:3003".to_string()
|
||||
]
|
||||
);
|
||||
|
||||
// Should be the values from config file, not environment variables.
|
||||
assert_eq!(opts.wal.dir, "/tmp/greptimedb/wal".to_string());
|
||||
|
||||
// Should be default values.
|
||||
assert_eq!(opts.node_id, None);
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,14 +21,11 @@ use common_telemetry::logging::LoggingOptions;
|
||||
use datanode::datanode::{Datanode, DatanodeOptions, ProcedureConfig, StorageConfig, WalConfig};
|
||||
use datanode::instance::InstanceRef;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::grpc::GrpcOptions;
|
||||
use frontend::influxdb::InfluxdbOptions;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
||||
use frontend::mysql::MysqlOptions;
|
||||
use frontend::opentsdb::OpentsdbOptions;
|
||||
use frontend::postgres::PostgresOptions;
|
||||
use frontend::prom::PromOptions;
|
||||
use frontend::prometheus::PrometheusOptions;
|
||||
use frontend::service_config::{
|
||||
GrpcOptions, InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromOptions,
|
||||
PrometheusOptions,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servers::http::HttpOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
@@ -41,7 +38,6 @@ use crate::error::{
|
||||
};
|
||||
use crate::frontend::load_frontend_plugins;
|
||||
use crate::options::{MixOptions, Options, TopLevelOptions};
|
||||
use crate::toml_loader;
|
||||
|
||||
#[derive(Parser)]
|
||||
pub struct Command {
|
||||
@@ -184,7 +180,7 @@ impl Instance {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
#[derive(Debug, Default, Parser)]
|
||||
struct StartCommand {
|
||||
#[clap(long)]
|
||||
http_addr: Option<String>,
|
||||
@@ -212,43 +208,45 @@ struct StartCommand {
|
||||
tls_key_path: Option<String>,
|
||||
#[clap(long)]
|
||||
user_provider: Option<String>,
|
||||
#[clap(long, default_value = "GREPTIMEDB_STANDALONE")]
|
||||
env_prefix: String,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
fn load_options(&self, top_level_options: TopLevelOptions) -> Result<Options> {
|
||||
let enable_memory_catalog = self.enable_memory_catalog;
|
||||
let config_file = &self.config_file;
|
||||
let mut opts: StandaloneOptions = if let Some(path) = config_file {
|
||||
toml_loader::from_file!(path)?
|
||||
} else {
|
||||
StandaloneOptions::default()
|
||||
};
|
||||
let mut opts: StandaloneOptions = Options::load_layered_options(
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
None,
|
||||
)?;
|
||||
|
||||
opts.enable_memory_catalog = enable_memory_catalog;
|
||||
opts.enable_memory_catalog = self.enable_memory_catalog;
|
||||
|
||||
let mut fe_opts = opts.clone().frontend_options();
|
||||
let mut logging = opts.logging.clone();
|
||||
let dn_opts = opts.datanode_options();
|
||||
opts.mode = Mode::Standalone;
|
||||
|
||||
if let Some(dir) = top_level_options.log_dir {
|
||||
logging.dir = dir;
|
||||
opts.logging.dir = dir;
|
||||
}
|
||||
if let Some(level) = top_level_options.log_level {
|
||||
logging.level = level;
|
||||
opts.logging.level = level;
|
||||
}
|
||||
|
||||
fe_opts.mode = Mode::Standalone;
|
||||
let tls_opts = TlsOption::new(
|
||||
self.tls_mode.clone(),
|
||||
self.tls_cert_path.clone(),
|
||||
self.tls_key_path.clone(),
|
||||
);
|
||||
|
||||
if let Some(addr) = self.http_addr.clone() {
|
||||
fe_opts.http_options = Some(HttpOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
});
|
||||
if let Some(addr) = &self.http_addr {
|
||||
if let Some(http_opts) = &mut opts.http_options {
|
||||
http_opts.addr = addr.clone()
|
||||
}
|
||||
}
|
||||
if let Some(addr) = self.rpc_addr.clone() {
|
||||
|
||||
if let Some(addr) = &self.rpc_addr {
|
||||
// frontend grpc addr conflict with datanode default grpc addr
|
||||
let datanode_grpc_addr = DatanodeOptions::default().rpc_addr;
|
||||
if addr == datanode_grpc_addr {
|
||||
if addr.eq(&datanode_grpc_addr) {
|
||||
return IllegalConfigSnafu {
|
||||
msg: format!(
|
||||
"gRPC listen address conflicts with datanode reserved gRPC addr: {datanode_grpc_addr}",
|
||||
@@ -256,56 +254,42 @@ impl StartCommand {
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
fe_opts.grpc_options = Some(GrpcOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
});
|
||||
if let Some(grpc_opts) = &mut opts.grpc_options {
|
||||
grpc_opts.addr = addr.clone()
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(addr) = self.mysql_addr.clone() {
|
||||
fe_opts.mysql_options = Some(MysqlOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
})
|
||||
if let Some(addr) = &self.mysql_addr {
|
||||
if let Some(mysql_opts) = &mut opts.mysql_options {
|
||||
mysql_opts.addr = addr.clone();
|
||||
mysql_opts.tls = tls_opts.clone();
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(addr) = self.prom_addr.clone() {
|
||||
fe_opts.prom_options = Some(PromOptions { addr })
|
||||
if let Some(addr) = &self.prom_addr {
|
||||
opts.prom_options = Some(PromOptions { addr: addr.clone() })
|
||||
}
|
||||
|
||||
if let Some(addr) = self.postgres_addr.clone() {
|
||||
fe_opts.postgres_options = Some(PostgresOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
})
|
||||
if let Some(addr) = &self.postgres_addr {
|
||||
if let Some(postgres_opts) = &mut opts.postgres_options {
|
||||
postgres_opts.addr = addr.clone();
|
||||
postgres_opts.tls = tls_opts;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(addr) = self.opentsdb_addr.clone() {
|
||||
fe_opts.opentsdb_options = Some(OpentsdbOptions {
|
||||
addr,
|
||||
..Default::default()
|
||||
});
|
||||
if let Some(addr) = &self.opentsdb_addr {
|
||||
if let Some(opentsdb_addr) = &mut opts.opentsdb_options {
|
||||
opentsdb_addr.addr = addr.clone();
|
||||
}
|
||||
}
|
||||
|
||||
if self.influxdb_enable {
|
||||
fe_opts.influxdb_options = Some(InfluxdbOptions { enable: true });
|
||||
opts.influxdb_options = Some(InfluxdbOptions { enable: true });
|
||||
}
|
||||
|
||||
let tls_option = TlsOption::new(
|
||||
self.tls_mode.clone(),
|
||||
self.tls_cert_path.clone(),
|
||||
self.tls_key_path.clone(),
|
||||
);
|
||||
|
||||
if let Some(mut mysql_options) = fe_opts.mysql_options {
|
||||
mysql_options.tls = tls_option.clone();
|
||||
fe_opts.mysql_options = Some(mysql_options);
|
||||
}
|
||||
|
||||
if let Some(mut postgres_options) = fe_opts.postgres_options {
|
||||
postgres_options.tls = tls_option;
|
||||
fe_opts.postgres_options = Some(postgres_options);
|
||||
}
|
||||
let fe_opts = opts.clone().frontend_options();
|
||||
let logging = opts.logging.clone();
|
||||
let dn_opts = opts.datanode_options();
|
||||
|
||||
Ok(Options::Standalone(Box::new(MixOptions {
|
||||
fe_opts,
|
||||
@@ -351,6 +335,7 @@ async fn build_frontend(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::default::Default;
|
||||
use std::io::Write;
|
||||
use std::time::Duration;
|
||||
|
||||
@@ -359,23 +344,13 @@ mod tests {
|
||||
use servers::Mode;
|
||||
|
||||
use super::*;
|
||||
use crate::options::ENV_VAR_SEP;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_try_from_start_command_to_anymap() {
|
||||
let command = StartCommand {
|
||||
http_addr: None,
|
||||
rpc_addr: None,
|
||||
prom_addr: None,
|
||||
mysql_addr: None,
|
||||
postgres_addr: None,
|
||||
opentsdb_addr: None,
|
||||
config_file: None,
|
||||
influxdb_enable: false,
|
||||
enable_memory_catalog: false,
|
||||
tls_mode: None,
|
||||
tls_cert_path: None,
|
||||
tls_key_path: None,
|
||||
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let plugins = load_frontend_plugins(&command.user_provider);
|
||||
@@ -441,19 +416,9 @@ mod tests {
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
let cmd = StartCommand {
|
||||
http_addr: None,
|
||||
rpc_addr: None,
|
||||
prom_addr: None,
|
||||
mysql_addr: None,
|
||||
postgres_addr: None,
|
||||
opentsdb_addr: None,
|
||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||
influxdb_enable: false,
|
||||
enable_memory_catalog: false,
|
||||
tls_mode: None,
|
||||
tls_cert_path: None,
|
||||
tls_key_path: None,
|
||||
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Standalone(options) = cmd.load_options(TopLevelOptions::default()).unwrap() else {unreachable!()};
|
||||
@@ -504,19 +469,8 @@ mod tests {
|
||||
#[test]
|
||||
fn test_top_level_options() {
|
||||
let cmd = StartCommand {
|
||||
http_addr: None,
|
||||
rpc_addr: None,
|
||||
prom_addr: None,
|
||||
mysql_addr: None,
|
||||
postgres_addr: None,
|
||||
opentsdb_addr: None,
|
||||
config_file: None,
|
||||
influxdb_enable: false,
|
||||
enable_memory_catalog: false,
|
||||
tls_mode: None,
|
||||
tls_cert_path: None,
|
||||
tls_key_path: None,
|
||||
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Standalone(opts) = cmd
|
||||
@@ -531,4 +485,88 @@ mod tests {
|
||||
assert_eq!("/tmp/greptimedb/test/logs", opts.logging.dir);
|
||||
assert_eq!("debug", opts.logging.level);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_precedence_order() {
|
||||
let mut file = create_named_temp_file();
|
||||
let toml_str = r#"
|
||||
mode = "standalone"
|
||||
|
||||
[http_options]
|
||||
addr = "127.0.0.1:4000"
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
let env_prefix = "STANDALONE_UT";
|
||||
temp_env::with_vars(
|
||||
vec![
|
||||
(
|
||||
// logging.dir = /other/log/dir
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"logging".to_uppercase(),
|
||||
"dir".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("/other/log/dir"),
|
||||
),
|
||||
(
|
||||
// logging.level = info
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"logging".to_uppercase(),
|
||||
"level".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("info"),
|
||||
),
|
||||
(
|
||||
// http_options.addr = 127.0.0.1:24000
|
||||
vec![
|
||||
env_prefix.to_string(),
|
||||
"http_options".to_uppercase(),
|
||||
"addr".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("127.0.0.1:24000"),
|
||||
),
|
||||
],
|
||||
|| {
|
||||
let command = StartCommand {
|
||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||
http_addr: Some("127.0.0.1:14000".to_string()),
|
||||
env_prefix: env_prefix.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let top_level_opts = TopLevelOptions {
|
||||
log_dir: None,
|
||||
log_level: None,
|
||||
};
|
||||
let Options::Standalone(opts) =
|
||||
command.load_options(top_level_opts).unwrap() else {unreachable!()};
|
||||
|
||||
// Should be read from env, env > default values.
|
||||
assert_eq!(opts.logging.dir, "/other/log/dir");
|
||||
|
||||
// Should be read from config file, config file > env > default values.
|
||||
assert_eq!(opts.logging.level, "debug");
|
||||
|
||||
// Should be read from cli, cli > config file > env > default values.
|
||||
assert_eq!(
|
||||
opts.fe_opts.http_options.as_ref().unwrap().addr,
|
||||
"127.0.0.1:14000"
|
||||
);
|
||||
|
||||
// Should be default value.
|
||||
assert_eq!(
|
||||
opts.fe_opts.grpc_options.unwrap().addr,
|
||||
GrpcOptions::default().addr
|
||||
);
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,94 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
macro_rules! from_file {
|
||||
($path: expr) => {
|
||||
toml::from_str(
|
||||
&std::fs::read_to_string($path)
|
||||
.context(crate::error::ReadConfigSnafu { path: $path })?,
|
||||
)
|
||||
.context(crate::error::ParseConfigSnafu)
|
||||
};
|
||||
}
|
||||
|
||||
pub(crate) use from_file;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use super::*;
|
||||
use crate::error::Result;
|
||||
|
||||
#[derive(Clone, PartialEq, Debug, Deserialize, Serialize)]
|
||||
#[serde(default)]
|
||||
struct MockConfig {
|
||||
path: String,
|
||||
port: u32,
|
||||
host: String,
|
||||
}
|
||||
|
||||
impl Default for MockConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
path: "test".to_string(),
|
||||
port: 0,
|
||||
host: "localhost".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_file() -> Result<()> {
|
||||
let config = MockConfig {
|
||||
path: "/tmp".to_string(),
|
||||
port: 999,
|
||||
host: "greptime.test".to_string(),
|
||||
};
|
||||
|
||||
let dir = create_temp_dir("test_from_file");
|
||||
let test_file = format!("{}/test.toml", dir.path().to_str().unwrap());
|
||||
|
||||
let s = toml::to_string(&config).unwrap();
|
||||
assert!(s.contains("host") && s.contains("path") && s.contains("port"));
|
||||
|
||||
let mut file = File::create(&test_file).unwrap();
|
||||
file.write_all(s.as_bytes()).unwrap();
|
||||
|
||||
let loaded_config: MockConfig = from_file!(&test_file)?;
|
||||
assert_eq!(loaded_config, config);
|
||||
|
||||
// Only host in file
|
||||
let mut file = File::create(&test_file).unwrap();
|
||||
file.write_all("host='greptime.test'\n".as_bytes()).unwrap();
|
||||
|
||||
let loaded_config: MockConfig = from_file!(&test_file)?;
|
||||
assert_eq!(loaded_config.host, "greptime.test");
|
||||
assert_eq!(loaded_config.port, 0);
|
||||
assert_eq!(loaded_config.path, "test");
|
||||
|
||||
// Truncate the file.
|
||||
let file = File::create(&test_file).unwrap();
|
||||
file.set_len(0).unwrap();
|
||||
let loaded_config: MockConfig = from_file!(&test_file)?;
|
||||
assert_eq!(loaded_config, MockConfig::default());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -29,6 +29,7 @@ snafu.workspace = true
|
||||
tokio.workspace = true
|
||||
tokio-util.workspace = true
|
||||
url = "2.3"
|
||||
paste = "1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
common-test-util = { path = "../test-util" }
|
||||
|
||||
@@ -17,9 +17,10 @@ use std::io;
|
||||
use std::str::FromStr;
|
||||
|
||||
use async_compression::tokio::bufread::{BzDecoder, GzipDecoder, XzDecoder, ZstdDecoder};
|
||||
use async_compression::tokio::write;
|
||||
use bytes::Bytes;
|
||||
use futures::Stream;
|
||||
use tokio::io::{AsyncRead, BufReader};
|
||||
use tokio::io::{AsyncRead, AsyncWriteExt, BufReader};
|
||||
use tokio_util::io::{ReaderStream, StreamReader};
|
||||
|
||||
use crate::error::{self, Error, Result};
|
||||
@@ -73,37 +74,107 @@ impl CompressionType {
|
||||
!matches!(self, &Self::Uncompressed)
|
||||
}
|
||||
|
||||
pub fn convert_async_read<T: AsyncRead + Unpin + Send + 'static>(
|
||||
&self,
|
||||
s: T,
|
||||
) -> Box<dyn AsyncRead + Unpin + Send> {
|
||||
pub const fn file_extension(&self) -> &'static str {
|
||||
match self {
|
||||
CompressionType::Gzip => Box::new(GzipDecoder::new(BufReader::new(s))),
|
||||
CompressionType::Bzip2 => Box::new(BzDecoder::new(BufReader::new(s))),
|
||||
CompressionType::Xz => Box::new(XzDecoder::new(BufReader::new(s))),
|
||||
CompressionType::Zstd => Box::new(ZstdDecoder::new(BufReader::new(s))),
|
||||
CompressionType::Uncompressed => Box::new(s),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn convert_stream<T: Stream<Item = io::Result<Bytes>> + Unpin + Send + 'static>(
|
||||
&self,
|
||||
s: T,
|
||||
) -> Box<dyn Stream<Item = io::Result<Bytes>> + Send + Unpin> {
|
||||
match self {
|
||||
CompressionType::Gzip => {
|
||||
Box::new(ReaderStream::new(GzipDecoder::new(StreamReader::new(s))))
|
||||
}
|
||||
CompressionType::Bzip2 => {
|
||||
Box::new(ReaderStream::new(BzDecoder::new(StreamReader::new(s))))
|
||||
}
|
||||
CompressionType::Xz => {
|
||||
Box::new(ReaderStream::new(XzDecoder::new(StreamReader::new(s))))
|
||||
}
|
||||
CompressionType::Zstd => {
|
||||
Box::new(ReaderStream::new(ZstdDecoder::new(StreamReader::new(s))))
|
||||
}
|
||||
CompressionType::Uncompressed => Box::new(s),
|
||||
Self::Gzip => "gz",
|
||||
Self::Bzip2 => "bz2",
|
||||
Self::Xz => "xz",
|
||||
Self::Zstd => "zst",
|
||||
Self::Uncompressed => "",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! impl_compression_type {
|
||||
($(($enum_item:ident, $prefix:ident)),*) => {
|
||||
paste::item! {
|
||||
impl CompressionType {
|
||||
pub async fn encode(&self, content: impl AsRef<[u8]>) -> io::Result<Vec<u8>> {
|
||||
match self {
|
||||
$(
|
||||
CompressionType::$enum_item => {
|
||||
let mut buffer = Vec::with_capacity(content.as_ref().len());
|
||||
let mut encoder = write::[<$prefix Encoder>]::new(&mut buffer);
|
||||
encoder.write_all(content.as_ref()).await?;
|
||||
encoder.shutdown().await?;
|
||||
Ok(buffer)
|
||||
}
|
||||
)*
|
||||
CompressionType::Uncompressed => Ok(content.as_ref().to_vec()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn decode(&self, content: impl AsRef<[u8]>) -> io::Result<Vec<u8>> {
|
||||
match self {
|
||||
$(
|
||||
CompressionType::$enum_item => {
|
||||
let mut buffer = Vec::with_capacity(content.as_ref().len() * 2);
|
||||
let mut encoder = write::[<$prefix Decoder>]::new(&mut buffer);
|
||||
encoder.write_all(content.as_ref()).await?;
|
||||
encoder.shutdown().await?;
|
||||
Ok(buffer)
|
||||
}
|
||||
)*
|
||||
CompressionType::Uncompressed => Ok(content.as_ref().to_vec()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn convert_async_read<T: AsyncRead + Unpin + Send + 'static>(
|
||||
&self,
|
||||
s: T,
|
||||
) -> Box<dyn AsyncRead + Unpin + Send> {
|
||||
match self {
|
||||
$(CompressionType::$enum_item => Box::new([<$prefix Decoder>]::new(BufReader::new(s))),)*
|
||||
CompressionType::Uncompressed => Box::new(s),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn convert_stream<T: Stream<Item = io::Result<Bytes>> + Unpin + Send + 'static>(
|
||||
&self,
|
||||
s: T,
|
||||
) -> Box<dyn Stream<Item = io::Result<Bytes>> + Send + Unpin> {
|
||||
match self {
|
||||
$(CompressionType::$enum_item => Box::new(ReaderStream::new([<$prefix Decoder>]::new(StreamReader::new(s)))),)*
|
||||
CompressionType::Uncompressed => Box::new(s),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::CompressionType;
|
||||
|
||||
$(
|
||||
#[tokio::test]
|
||||
async fn [<test_ $enum_item:lower _compression>]() {
|
||||
let string = "foo_bar".as_bytes().to_vec();
|
||||
let compress = CompressionType::$enum_item
|
||||
.encode(&string)
|
||||
.await
|
||||
.unwrap();
|
||||
let decompress = CompressionType::$enum_item
|
||||
.decode(&compress)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(decompress, string);
|
||||
})*
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_uncompression() {
|
||||
let string = "foo_bar".as_bytes().to_vec();
|
||||
let compress = CompressionType::Uncompressed
|
||||
.encode(&string)
|
||||
.await
|
||||
.unwrap();
|
||||
let decompress = CompressionType::Uncompressed
|
||||
.decode(&compress)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(decompress, string);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl_compression_type!((Gzip, Gzip), (Bzip2, Bz), (Xz, Xz), (Zstd, Zstd));
|
||||
|
||||
@@ -8,6 +8,8 @@ license.workspace = true
|
||||
proc-macro = true
|
||||
|
||||
[dependencies]
|
||||
common-telemetry = { path = "../telemetry" }
|
||||
backtrace = "0.3"
|
||||
quote = "1.0"
|
||||
syn = "1.0"
|
||||
proc-macro2 = "1.0"
|
||||
|
||||
@@ -15,11 +15,13 @@
|
||||
mod range_fn;
|
||||
|
||||
use proc_macro::TokenStream;
|
||||
use quote::{quote, quote_spanned};
|
||||
use quote::{quote, quote_spanned, ToTokens};
|
||||
use range_fn::process_range_fn;
|
||||
use syn::parse::Parser;
|
||||
use syn::spanned::Spanned;
|
||||
use syn::{parse_macro_input, DeriveInput, ItemStruct};
|
||||
use syn::{
|
||||
parse_macro_input, AttributeArgs, DeriveInput, ItemFn, ItemStruct, Lit, Meta, NestedMeta,
|
||||
};
|
||||
|
||||
/// Make struct implemented trait [AggrFuncTypeStore], which is necessary when writing UDAF.
|
||||
/// This derive macro is expect to be used along with attribute macro [as_aggr_func_creator].
|
||||
@@ -114,3 +116,109 @@ pub fn as_aggr_func_creator(_args: TokenStream, input: TokenStream) -> TokenStre
|
||||
pub fn range_fn(args: TokenStream, input: TokenStream) -> TokenStream {
|
||||
process_range_fn(args, input)
|
||||
}
|
||||
|
||||
/// Attribute macro to print the caller to the annotated function.
|
||||
/// The caller is printed as its filename and the call site line number.
|
||||
///
|
||||
/// This macro works like this: inject the tracking codes as the first statement to the annotated
|
||||
/// function body. The tracking codes use [backtrace-rs](https://crates.io/crates/backtrace) to get
|
||||
/// the callers. So you must dependent on the `backtrace-rs` crate.
|
||||
///
|
||||
/// # Arguments
|
||||
/// - `depth`: The max depth of call stack to print. Optional, defaults to 1.
|
||||
///
|
||||
/// # Example
|
||||
/// ```rust, ignore
|
||||
///
|
||||
/// #[print_caller(depth = 3)]
|
||||
/// fn foo() {}
|
||||
/// ```
|
||||
#[proc_macro_attribute]
|
||||
pub fn print_caller(args: TokenStream, input: TokenStream) -> TokenStream {
|
||||
let mut depth = 1;
|
||||
|
||||
let args = parse_macro_input!(args as AttributeArgs);
|
||||
for meta in args.iter() {
|
||||
if let NestedMeta::Meta(Meta::NameValue(name_value)) = meta {
|
||||
let ident = name_value
|
||||
.path
|
||||
.get_ident()
|
||||
.expect("Expected an ident!")
|
||||
.to_string();
|
||||
if ident == "depth" {
|
||||
let Lit::Int(i) = &name_value.lit else { panic!("Expected 'depth' to be a valid int!") };
|
||||
depth = i.base10_parse::<usize>().expect("Invalid 'depth' value");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let tokens: TokenStream = quote! {
|
||||
{
|
||||
let curr_file = file!();
|
||||
|
||||
let bt = backtrace::Backtrace::new();
|
||||
let call_stack = bt
|
||||
.frames()
|
||||
.iter()
|
||||
.skip_while(|f| {
|
||||
!f.symbols().iter().any(|s| {
|
||||
s.filename()
|
||||
.map(|p| p.ends_with(curr_file))
|
||||
.unwrap_or(false)
|
||||
})
|
||||
})
|
||||
.skip(1)
|
||||
.take(#depth);
|
||||
|
||||
let call_stack = call_stack
|
||||
.map(|f| {
|
||||
f.symbols()
|
||||
.iter()
|
||||
.map(|s| {
|
||||
let filename = s
|
||||
.filename()
|
||||
.map(|p| format!("{:?}", p))
|
||||
.unwrap_or_else(|| "unknown".to_string());
|
||||
|
||||
let lineno = s
|
||||
.lineno()
|
||||
.map(|l| format!("{}", l))
|
||||
.unwrap_or_else(|| "unknown".to_string());
|
||||
|
||||
format!("filename: {}, lineno: {}", filename, lineno)
|
||||
})
|
||||
.collect::<Vec<String>>()
|
||||
.join(", ")
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
match call_stack.len() {
|
||||
0 => common_telemetry::info!("unable to find call stack"),
|
||||
1 => common_telemetry::info!("caller: {}", call_stack[0]),
|
||||
_ => {
|
||||
let mut s = String::new();
|
||||
s.push_str("[\n");
|
||||
for e in call_stack {
|
||||
s.push_str("\t");
|
||||
s.push_str(&e);
|
||||
s.push_str("\n");
|
||||
}
|
||||
s.push_str("]");
|
||||
common_telemetry::info!("call stack: {}", s)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
.into();
|
||||
|
||||
let stmt = match syn::parse(tokens) {
|
||||
Ok(stmt) => stmt,
|
||||
Err(e) => return e.into_compile_error().into(),
|
||||
};
|
||||
|
||||
let mut item = parse_macro_input!(input as ItemFn);
|
||||
item.block.stmts.insert(0, stmt);
|
||||
|
||||
item.into_token_stream().into()
|
||||
}
|
||||
|
||||
@@ -8,11 +8,14 @@ license.workspace = true
|
||||
api = { path = "../../api" }
|
||||
arrow-flight.workspace = true
|
||||
async-trait = "0.1"
|
||||
backtrace = "0.3"
|
||||
common-base = { path = "../base" }
|
||||
common-error = { path = "../error" }
|
||||
common-function-macro = { path = "../function-macro" }
|
||||
common-query = { path = "../query" }
|
||||
common-recordbatch = { path = "../recordbatch" }
|
||||
common-runtime = { path = "../runtime" }
|
||||
common-telemetry = { path = "../telemetry" }
|
||||
dashmap = "5.4"
|
||||
datafusion.workspace = true
|
||||
datatypes = { path = "../../datatypes" }
|
||||
|
||||
@@ -16,6 +16,7 @@ use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_telemetry::info;
|
||||
use dashmap::mapref::entry::Entry;
|
||||
use dashmap::DashMap;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
@@ -33,6 +34,7 @@ pub struct ChannelManager {
|
||||
config: ChannelConfig,
|
||||
client_tls_config: Option<ClientTlsConfig>,
|
||||
pool: Arc<Pool>,
|
||||
channel_recycle_started: bool,
|
||||
}
|
||||
|
||||
impl Default for ChannelManager {
|
||||
@@ -48,19 +50,28 @@ impl ChannelManager {
|
||||
|
||||
pub fn with_config(config: ChannelConfig) -> Self {
|
||||
let pool = Arc::new(Pool::default());
|
||||
let cloned_pool = pool.clone();
|
||||
|
||||
common_runtime::spawn_bg(async {
|
||||
recycle_channel_in_loop(cloned_pool, RECYCLE_CHANNEL_INTERVAL_SECS).await;
|
||||
});
|
||||
|
||||
Self {
|
||||
config,
|
||||
client_tls_config: None,
|
||||
pool,
|
||||
channel_recycle_started: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn start_channel_recycle(&mut self) {
|
||||
if self.channel_recycle_started {
|
||||
return;
|
||||
}
|
||||
|
||||
let pool = self.pool.clone();
|
||||
common_runtime::spawn_bg(async {
|
||||
recycle_channel_in_loop(pool, RECYCLE_CHANNEL_INTERVAL_SECS).await;
|
||||
});
|
||||
info!("Channel recycle is started, running in the background!");
|
||||
|
||||
self.channel_recycle_started = true;
|
||||
}
|
||||
|
||||
pub fn with_tls_config(config: ChannelConfig) -> Result<Self> {
|
||||
let mut cm = Self::with_config(config.clone());
|
||||
|
||||
@@ -224,8 +235,8 @@ pub struct ChannelConfig {
|
||||
impl Default for ChannelConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
timeout: None,
|
||||
connect_timeout: None,
|
||||
timeout: Some(Duration::from_secs(2)),
|
||||
connect_timeout: Some(Duration::from_secs(4)),
|
||||
concurrency_limit: None,
|
||||
rate_limit: None,
|
||||
initial_stream_window_size: None,
|
||||
@@ -455,13 +466,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_access_count() {
|
||||
let pool = Arc::new(Pool::default());
|
||||
let config = ChannelConfig::new();
|
||||
let mgr = Arc::new(ChannelManager {
|
||||
pool,
|
||||
config,
|
||||
client_tls_config: None,
|
||||
});
|
||||
let mgr = Arc::new(ChannelManager::new());
|
||||
let addr = "test_uri";
|
||||
|
||||
let mut joins = Vec::with_capacity(10);
|
||||
@@ -491,8 +496,8 @@ mod tests {
|
||||
let default_cfg = ChannelConfig::new();
|
||||
assert_eq!(
|
||||
ChannelConfig {
|
||||
timeout: None,
|
||||
connect_timeout: None,
|
||||
timeout: Some(Duration::from_secs(2)),
|
||||
connect_timeout: Some(Duration::from_secs(4)),
|
||||
concurrency_limit: None,
|
||||
rate_limit: None,
|
||||
initial_stream_window_size: None,
|
||||
@@ -553,7 +558,6 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_build_endpoint() {
|
||||
let pool = Arc::new(Pool::default());
|
||||
let config = ChannelConfig::new()
|
||||
.timeout(Duration::from_secs(3))
|
||||
.connect_timeout(Duration::from_secs(5))
|
||||
@@ -567,11 +571,7 @@ mod tests {
|
||||
.http2_adaptive_window(true)
|
||||
.tcp_keepalive(Duration::from_secs(2))
|
||||
.tcp_nodelay(true);
|
||||
let mgr = ChannelManager {
|
||||
pool,
|
||||
config,
|
||||
client_tls_config: None,
|
||||
};
|
||||
let mgr = ChannelManager::with_config(config);
|
||||
|
||||
let res = mgr.build_endpoint("test_addr");
|
||||
|
||||
@@ -580,18 +580,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_channel_with_connector() {
|
||||
let pool = Pool {
|
||||
channels: DashMap::default(),
|
||||
};
|
||||
|
||||
let pool = Arc::new(pool);
|
||||
|
||||
let config = ChannelConfig::new();
|
||||
let mgr = ChannelManager {
|
||||
pool,
|
||||
config,
|
||||
client_tls_config: None,
|
||||
};
|
||||
let mgr = ChannelManager::new();
|
||||
|
||||
let addr = "test_addr";
|
||||
let res = mgr.get(addr);
|
||||
|
||||
11
src/common/meta/Cargo.toml
Normal file
11
src/common/meta/Cargo.toml
Normal file
@@ -0,0 +1,11 @@
|
||||
[package]
|
||||
name = "common-meta"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-error = { path = "../error" }
|
||||
serde.workspace = true
|
||||
snafu.workspace = true
|
||||
serde_json.workspace = true
|
||||
85
src/common/meta/src/instruction.rs
Normal file
85
src/common/meta/src/instruction.rs
Normal file
@@ -0,0 +1,85 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct RegionIdent {
|
||||
pub catalog: String,
|
||||
pub schema: String,
|
||||
pub table: String,
|
||||
pub table_id: u32,
|
||||
pub engine: String,
|
||||
pub region_number: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
|
||||
pub struct SimpleReply {
|
||||
pub result: bool,
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(tag = "type", rename_all = "snake_case")]
|
||||
pub enum Instruction {
|
||||
OpenRegion(RegionIdent),
|
||||
CloseRegion(RegionIdent),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
|
||||
#[serde(tag = "type", rename_all = "snake_case")]
|
||||
pub enum InstructionReply {
|
||||
OpenRegion(SimpleReply),
|
||||
CloseRegion(SimpleReply),
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_serialize_instruction() {
|
||||
let open_region = Instruction::OpenRegion(RegionIdent {
|
||||
catalog: "foo".to_string(),
|
||||
schema: "bar".to_string(),
|
||||
table: "hi".to_string(),
|
||||
table_id: 1024,
|
||||
engine: "mito".to_string(),
|
||||
region_number: 1,
|
||||
});
|
||||
|
||||
let serialized = serde_json::to_string(&open_region).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
r#"{"type":"open_region","catalog":"foo","schema":"bar","table":"hi","table_id":1024,"engine":"mito","region_number":1}"#,
|
||||
serialized
|
||||
);
|
||||
|
||||
let close_region = Instruction::CloseRegion(RegionIdent {
|
||||
catalog: "foo".to_string(),
|
||||
schema: "bar".to_string(),
|
||||
table: "hi".to_string(),
|
||||
table_id: 1024,
|
||||
engine: "mito".to_string(),
|
||||
region_number: 1,
|
||||
});
|
||||
|
||||
let serialized = serde_json::to_string(&close_region).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
r#"{"type":"close_region","catalog":"foo","schema":"bar","table":"hi","table_id":1024,"engine":"mito","region_number":1}"#,
|
||||
serialized
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -12,5 +12,4 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Instruction {}
|
||||
pub mod instruction;
|
||||
@@ -121,6 +121,18 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Corrupted data, error: {source}"))]
|
||||
CorruptedData { source: FromUtf8Error },
|
||||
|
||||
#[snafu(display("Failed to start the remove_outdated_meta method, error: {}", source))]
|
||||
StartRemoveOutdatedMetaTask {
|
||||
source: common_runtime::error::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to stop the remove_outdated_meta method, error: {}", source))]
|
||||
StopRemoveOutdatedMetaTask {
|
||||
source: common_runtime::error::Error,
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -145,6 +157,8 @@ impl ErrorExt for Error {
|
||||
}
|
||||
Error::ProcedurePanic { .. } | Error::CorruptedData { .. } => StatusCode::Unexpected,
|
||||
Error::ProcedureExec { source, .. } => source.status_code(),
|
||||
Error::StartRemoveOutdatedMetaTask { source, .. }
|
||||
| Error::StopRemoveOutdatedMetaTask { source, .. } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -21,12 +21,16 @@ use std::time::{Duration, Instant};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use backon::ExponentialBuilder;
|
||||
use common_runtime::{RepeatedTask, TaskFunction};
|
||||
use common_telemetry::logging;
|
||||
use snafu::ensure;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use tokio::sync::watch::{self, Receiver, Sender};
|
||||
use tokio::sync::Notify;
|
||||
|
||||
use crate::error::{DuplicateProcedureSnafu, LoaderConflictSnafu, Result};
|
||||
use crate::error::{
|
||||
DuplicateProcedureSnafu, Error, LoaderConflictSnafu, Result, StartRemoveOutdatedMetaTaskSnafu,
|
||||
StopRemoveOutdatedMetaTaskSnafu,
|
||||
};
|
||||
use crate::local::lock::LockMap;
|
||||
use crate::local::runner::Runner;
|
||||
use crate::procedure::BoxedProcedureLoader;
|
||||
@@ -341,6 +345,8 @@ impl ManagerContext {
|
||||
pub struct ManagerConfig {
|
||||
pub max_retry_times: usize,
|
||||
pub retry_delay: Duration,
|
||||
pub remove_outdated_meta_task_interval: Duration,
|
||||
pub remove_outdated_meta_ttl: Duration,
|
||||
}
|
||||
|
||||
impl Default for ManagerConfig {
|
||||
@@ -348,6 +354,8 @@ impl Default for ManagerConfig {
|
||||
Self {
|
||||
max_retry_times: 3,
|
||||
retry_delay: Duration::from_millis(500),
|
||||
remove_outdated_meta_task_interval: Duration::from_secs(60 * 10),
|
||||
remove_outdated_meta_ttl: META_TTL,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -358,16 +366,26 @@ pub struct LocalManager {
|
||||
state_store: StateStoreRef,
|
||||
max_retry_times: usize,
|
||||
retry_delay: Duration,
|
||||
remove_outdated_meta_task: RepeatedTask<Error>,
|
||||
}
|
||||
|
||||
impl LocalManager {
|
||||
/// Create a new [LocalManager] with specific `config`.
|
||||
pub fn new(config: ManagerConfig, state_store: StateStoreRef) -> LocalManager {
|
||||
let manager_ctx = Arc::new(ManagerContext::new());
|
||||
let remove_outdated_meta_task = RepeatedTask::new(
|
||||
config.remove_outdated_meta_task_interval,
|
||||
Box::new(RemoveOutdatedMetaFunction {
|
||||
manager_ctx: manager_ctx.clone(),
|
||||
ttl: config.remove_outdated_meta_ttl,
|
||||
}),
|
||||
);
|
||||
LocalManager {
|
||||
manager_ctx: Arc::new(ManagerContext::new()),
|
||||
manager_ctx,
|
||||
state_store,
|
||||
max_retry_times: config.max_retry_times,
|
||||
retry_delay: config.retry_delay,
|
||||
remove_outdated_meta_task,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -419,6 +437,21 @@ impl ProcedureManager for LocalManager {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn start(&self) -> Result<()> {
|
||||
self.remove_outdated_meta_task
|
||||
.start(common_runtime::bg_runtime())
|
||||
.context(StartRemoveOutdatedMetaTaskSnafu)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn stop(&self) -> Result<()> {
|
||||
self.remove_outdated_meta_task
|
||||
.stop()
|
||||
.await
|
||||
.context(StopRemoveOutdatedMetaTaskSnafu)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn submit(&self, procedure: ProcedureWithId) -> Result<Watcher> {
|
||||
let procedure_id = procedure.id;
|
||||
ensure!(
|
||||
@@ -426,9 +459,6 @@ impl ProcedureManager for LocalManager {
|
||||
DuplicateProcedureSnafu { procedure_id }
|
||||
);
|
||||
|
||||
// TODO(yingwen): We can use a repeated task to remove outdated meta.
|
||||
self.manager_ctx.remove_outdated_meta(META_TTL);
|
||||
|
||||
self.submit_root(procedure.id, 0, procedure.procedure)
|
||||
}
|
||||
|
||||
@@ -487,18 +517,31 @@ impl ProcedureManager for LocalManager {
|
||||
}
|
||||
|
||||
async fn procedure_state(&self, procedure_id: ProcedureId) -> Result<Option<ProcedureState>> {
|
||||
self.manager_ctx.remove_outdated_meta(META_TTL);
|
||||
|
||||
Ok(self.manager_ctx.state(procedure_id))
|
||||
}
|
||||
|
||||
fn procedure_watcher(&self, procedure_id: ProcedureId) -> Option<Watcher> {
|
||||
self.manager_ctx.remove_outdated_meta(META_TTL);
|
||||
|
||||
self.manager_ctx.watcher(procedure_id)
|
||||
}
|
||||
}
|
||||
|
||||
struct RemoveOutdatedMetaFunction {
|
||||
manager_ctx: Arc<ManagerContext>,
|
||||
ttl: Duration,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TaskFunction<Error> for RemoveOutdatedMetaFunction {
|
||||
fn name(&self) -> &str {
|
||||
"ProcedureManager-remove-outdated-meta-task"
|
||||
}
|
||||
|
||||
async fn call(&mut self) -> Result<()> {
|
||||
self.manager_ctx.remove_outdated_meta(self.ttl);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new [ProcedureMeta] for test purpose.
|
||||
#[cfg(test)]
|
||||
mod test_util {
|
||||
@@ -639,6 +682,7 @@ mod tests {
|
||||
let config = ManagerConfig {
|
||||
max_retry_times: 3,
|
||||
retry_delay: Duration::from_millis(500),
|
||||
..Default::default()
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
@@ -660,6 +704,7 @@ mod tests {
|
||||
let config = ManagerConfig {
|
||||
max_retry_times: 3,
|
||||
retry_delay: Duration::from_millis(500),
|
||||
..Default::default()
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(object_store.clone()));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
@@ -706,6 +751,7 @@ mod tests {
|
||||
let config = ManagerConfig {
|
||||
max_retry_times: 3,
|
||||
retry_delay: Duration::from_millis(500),
|
||||
..Default::default()
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
@@ -754,6 +800,7 @@ mod tests {
|
||||
let config = ManagerConfig {
|
||||
max_retry_times: 3,
|
||||
retry_delay: Duration::from_millis(500),
|
||||
..Default::default()
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
@@ -807,4 +854,59 @@ mod tests {
|
||||
check_procedure(MockProcedure { panic: false }).await;
|
||||
check_procedure(MockProcedure { panic: true }).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_remove_outdated_meta_task() {
|
||||
let dir = create_temp_dir("remove_outdated_meta_task");
|
||||
let object_store = test_util::new_object_store(&dir);
|
||||
let config = ManagerConfig {
|
||||
max_retry_times: 3,
|
||||
retry_delay: Duration::from_millis(500),
|
||||
remove_outdated_meta_task_interval: Duration::from_millis(1),
|
||||
remove_outdated_meta_ttl: Duration::from_millis(1),
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(object_store.clone()));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
|
||||
let mut procedure = ProcedureToLoad::new("submit");
|
||||
procedure.lock_key = LockKey::single("test.submit");
|
||||
let procedure_id = ProcedureId::random();
|
||||
manager
|
||||
.submit(ProcedureWithId {
|
||||
id: procedure_id,
|
||||
procedure: Box::new(procedure),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
let mut watcher = manager.procedure_watcher(procedure_id).unwrap();
|
||||
watcher.changed().await.unwrap();
|
||||
manager.start().unwrap();
|
||||
tokio::time::sleep(Duration::from_millis(10)).await;
|
||||
assert!(manager
|
||||
.procedure_state(procedure_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_none());
|
||||
|
||||
// The remove_outdated_meta method has been stopped, so any procedure meta-data will not be automatically removed.
|
||||
manager.stop().await.unwrap();
|
||||
let mut procedure = ProcedureToLoad::new("submit");
|
||||
procedure.lock_key = LockKey::single("test.submit");
|
||||
let procedure_id = ProcedureId::random();
|
||||
manager
|
||||
.submit(ProcedureWithId {
|
||||
id: procedure_id,
|
||||
procedure: Box::new(procedure),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
let mut watcher = manager.procedure_watcher(procedure_id).unwrap();
|
||||
watcher.changed().await.unwrap();
|
||||
tokio::time::sleep(Duration::from_millis(10)).await;
|
||||
assert!(manager
|
||||
.procedure_state(procedure_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_some());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -260,6 +260,10 @@ pub trait ProcedureManager: Send + Sync + 'static {
|
||||
/// Registers loader for specific procedure type `name`.
|
||||
fn register_loader(&self, name: &str, loader: BoxedProcedureLoader) -> Result<()>;
|
||||
|
||||
fn start(&self) -> Result<()>;
|
||||
|
||||
async fn stop(&self) -> Result<()>;
|
||||
|
||||
/// Submits a procedure to execute.
|
||||
///
|
||||
/// Returns a [Watcher] to watch the created procedure.
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
use std::future::Future;
|
||||
use std::sync::{Mutex, Once};
|
||||
|
||||
use common_telemetry::logging;
|
||||
use common_telemetry::info;
|
||||
use once_cell::sync::Lazy;
|
||||
use paste::paste;
|
||||
|
||||
@@ -26,13 +26,10 @@ const READ_WORKERS: usize = 8;
|
||||
const WRITE_WORKERS: usize = 8;
|
||||
const BG_WORKERS: usize = 8;
|
||||
|
||||
pub fn create_runtime(thread_name: &str, worker_threads: usize) -> Runtime {
|
||||
logging::info!(
|
||||
"Creating runtime, thread name: {}, work_threads: {}.",
|
||||
thread_name,
|
||||
worker_threads
|
||||
);
|
||||
pub fn create_runtime(runtime_name: &str, thread_name: &str, worker_threads: usize) -> Runtime {
|
||||
info!("Creating runtime with runtime_name: {runtime_name}, thread_name: {thread_name}, work_threads: {worker_threads}.");
|
||||
Builder::default()
|
||||
.runtime_name(runtime_name)
|
||||
.thread_name(thread_name)
|
||||
.worker_threads(worker_threads)
|
||||
.build()
|
||||
@@ -79,9 +76,12 @@ impl GlobalRuntimes {
|
||||
|
||||
fn new(read: Option<Runtime>, write: Option<Runtime>, background: Option<Runtime>) -> Self {
|
||||
Self {
|
||||
read_runtime: read.unwrap_or_else(|| create_runtime("read-worker", READ_WORKERS)),
|
||||
write_runtime: write.unwrap_or_else(|| create_runtime("write-worker", WRITE_WORKERS)),
|
||||
bg_runtime: background.unwrap_or_else(|| create_runtime("bg-worker", BG_WORKERS)),
|
||||
read_runtime: read
|
||||
.unwrap_or_else(|| create_runtime("global-read", "read-worker", READ_WORKERS)),
|
||||
write_runtime: write
|
||||
.unwrap_or_else(|| create_runtime("global-write", "write-worker", WRITE_WORKERS)),
|
||||
bg_runtime: background
|
||||
.unwrap_or_else(|| create_runtime("global-bg", "bg-worker", BG_WORKERS)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::future::Future;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
@@ -26,9 +27,12 @@ pub use tokio::task::{JoinError, JoinHandle};
|
||||
use crate::error::*;
|
||||
use crate::metrics::*;
|
||||
|
||||
static RUNTIME_ID: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
/// A runtime to run future tasks
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Runtime {
|
||||
name: String,
|
||||
handle: Handle,
|
||||
// Used to receive a drop signal when dropper is dropped, inspired by databend
|
||||
_dropper: Arc<Dropper>,
|
||||
@@ -73,9 +77,14 @@ impl Runtime {
|
||||
pub fn block_on<F: Future>(&self, future: F) -> F::Output {
|
||||
self.handle.block_on(future)
|
||||
}
|
||||
|
||||
pub fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Builder {
|
||||
runtime_name: String,
|
||||
thread_name: String,
|
||||
builder: RuntimeBuilder,
|
||||
}
|
||||
@@ -83,6 +92,7 @@ pub struct Builder {
|
||||
impl Default for Builder {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
runtime_name: format!("runtime-{}", RUNTIME_ID.fetch_add(1, Ordering::Relaxed)),
|
||||
thread_name: "default-worker".to_string(),
|
||||
builder: RuntimeBuilder::new_multi_thread(),
|
||||
}
|
||||
@@ -116,6 +126,11 @@ impl Builder {
|
||||
self
|
||||
}
|
||||
|
||||
pub fn runtime_name(&mut self, val: impl Into<String>) -> &mut Self {
|
||||
self.runtime_name = val.into();
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets name of threads spawned by the Runtime thread pool
|
||||
pub fn thread_name(&mut self, val: impl Into<String>) -> &mut Self {
|
||||
self.thread_name = val.into();
|
||||
@@ -142,6 +157,7 @@ impl Builder {
|
||||
.spawn(move || runtime.block_on(recv_stop));
|
||||
|
||||
Ok(Runtime {
|
||||
name: self.runtime_name.clone(),
|
||||
handle,
|
||||
_dropper: Arc::new(Dropper {
|
||||
close: Some(send_stop),
|
||||
|
||||
@@ -5,14 +5,15 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
console = ["console-subscriber"]
|
||||
tokio-console = ["console-subscriber", "tokio/tracing"]
|
||||
deadlock_detection = ["parking_lot"]
|
||||
|
||||
[dependencies]
|
||||
backtrace = "0.3"
|
||||
common-error = { path = "../error" }
|
||||
console-subscriber = { version = "0.1", optional = true }
|
||||
metrics-exporter-prometheus = { git = "https://github.com/GreptimeTeam/metrics.git", rev = "174de287e9f7f9f57c0272be56c95df156489476", default-features = false }
|
||||
metrics-exporter-prometheus = { version = "0.11", default-features = false }
|
||||
metrics-util = "0.14.0"
|
||||
metrics.workspace = true
|
||||
once_cell = "1.10"
|
||||
opentelemetry = { version = "0.17", default-features = false, features = [
|
||||
@@ -24,6 +25,7 @@ parking_lot = { version = "0.12", features = [
|
||||
"deadlock_detection",
|
||||
], optional = true }
|
||||
serde = "1.0"
|
||||
tokio.workspace = true
|
||||
tracing = "0.1"
|
||||
tracing-appender = "0.2"
|
||||
tracing-bunyan-formatter = "0.3"
|
||||
|
||||
@@ -50,6 +50,12 @@ impl Default for LoggingOptions {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct TracingOptions {
|
||||
#[cfg(feature = "tokio-console")]
|
||||
pub tokio_console_addr: Option<String>,
|
||||
}
|
||||
|
||||
/// Init tracing for unittest.
|
||||
/// Write logs to file `unittest`.
|
||||
pub fn init_default_ut_logging() {
|
||||
@@ -71,7 +77,11 @@ pub fn init_default_ut_logging() {
|
||||
level,
|
||||
..Default::default()
|
||||
};
|
||||
*g = Some(init_global_logging("unittest", &opts));
|
||||
*g = Some(init_global_logging(
|
||||
"unittest",
|
||||
&opts,
|
||||
TracingOptions::default(),
|
||||
));
|
||||
|
||||
info!("logs dir = {}", dir);
|
||||
});
|
||||
@@ -80,7 +90,12 @@ pub fn init_default_ut_logging() {
|
||||
static GLOBAL_UT_LOG_GUARD: Lazy<Arc<Mutex<Option<Vec<WorkerGuard>>>>> =
|
||||
Lazy::new(|| Arc::new(Mutex::new(None)));
|
||||
|
||||
pub fn init_global_logging(app_name: &str, opts: &LoggingOptions) -> Vec<WorkerGuard> {
|
||||
#[allow(clippy::print_stdout)]
|
||||
pub fn init_global_logging(
|
||||
app_name: &str,
|
||||
opts: &LoggingOptions,
|
||||
tracing_opts: TracingOptions,
|
||||
) -> Vec<WorkerGuard> {
|
||||
let mut guards = vec![];
|
||||
let dir = &opts.dir;
|
||||
let level = &opts.level;
|
||||
@@ -127,6 +142,42 @@ pub fn init_global_logging(app_name: &str, opts: &LoggingOptions) -> Vec<WorkerG
|
||||
.expect("error parsing level string"),
|
||||
);
|
||||
|
||||
// Must enable 'tokio_unstable' cfg to use this feature.
|
||||
// For example: `RUSTFLAGS="--cfg tokio_unstable" cargo run -F common-telemetry/console -- standalone start`
|
||||
#[cfg(feature = "tokio-console")]
|
||||
let subscriber = {
|
||||
let tokio_console_layer = if let Some(tokio_console_addr) = &tracing_opts.tokio_console_addr
|
||||
{
|
||||
let addr: std::net::SocketAddr = tokio_console_addr.parse().unwrap_or_else(|e| {
|
||||
panic!("Invalid binding address '{tokio_console_addr}' for tokio-console: {e}");
|
||||
});
|
||||
println!("tokio-console listening on {addr}");
|
||||
|
||||
Some(
|
||||
console_subscriber::ConsoleLayer::builder()
|
||||
.server_addr(addr)
|
||||
.spawn(),
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let stdout_logging_layer = stdout_logging_layer.with_filter(filter.clone());
|
||||
|
||||
let file_logging_layer = file_logging_layer.with_filter(filter);
|
||||
|
||||
Registry::default()
|
||||
.with(tokio_console_layer)
|
||||
.with(JsonStorageLayer)
|
||||
.with(stdout_logging_layer)
|
||||
.with(file_logging_layer)
|
||||
.with(err_file_logging_layer.with_filter(filter::LevelFilter::ERROR))
|
||||
};
|
||||
|
||||
// consume the `tracing_opts`, to avoid "unused" warnings
|
||||
let _ = tracing_opts;
|
||||
|
||||
#[cfg(not(feature = "tokio-console"))]
|
||||
let subscriber = Registry::default()
|
||||
.with(filter)
|
||||
.with(JsonStorageLayer)
|
||||
@@ -134,10 +185,6 @@ pub fn init_global_logging(app_name: &str, opts: &LoggingOptions) -> Vec<WorkerG
|
||||
.with(file_logging_layer)
|
||||
.with(err_file_logging_layer.with_filter(filter::LevelFilter::ERROR));
|
||||
|
||||
// Must enable 'tokio_unstable' cfg, https://github.com/tokio-rs/console
|
||||
#[cfg(feature = "console")]
|
||||
let subscriber = subscriber.with(console_subscriber::spawn());
|
||||
|
||||
if enable_jaeger_tracing {
|
||||
// Jaeger layer.
|
||||
global::set_text_map_propagator(TraceContextPropagator::new());
|
||||
|
||||
@@ -20,6 +20,7 @@ use std::time::{Duration, Instant};
|
||||
use metrics::histogram;
|
||||
use metrics_exporter_prometheus::PrometheusBuilder;
|
||||
pub use metrics_exporter_prometheus::PrometheusHandle;
|
||||
use metrics_util::layers::{Layer, PrefixLayer};
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
static PROMETHEUS_HANDLE: Lazy<Arc<RwLock<Option<PrometheusHandle>>>> =
|
||||
@@ -32,9 +33,7 @@ pub fn init_default_metrics_recorder() {
|
||||
|
||||
/// Init prometheus recorder.
|
||||
fn init_prometheus_recorder() {
|
||||
let recorder = PrometheusBuilder::new()
|
||||
.add_global_prefix("greptime".to_string())
|
||||
.build_recorder();
|
||||
let recorder = PrometheusBuilder::new().build_recorder();
|
||||
let mut h = PROMETHEUS_HANDLE.as_ref().write().unwrap();
|
||||
*h = Some(recorder.handle());
|
||||
// TODO(LFC): separate metrics for testing and metrics for production
|
||||
@@ -47,7 +46,9 @@ fn init_prometheus_recorder() {
|
||||
unsafe {
|
||||
metrics::clear_recorder();
|
||||
}
|
||||
match metrics::set_boxed_recorder(Box::new(recorder)) {
|
||||
let layer = PrefixLayer::new("greptime");
|
||||
let layered = layer.layer(recorder);
|
||||
match metrics::set_boxed_recorder(Box::new(layered)) {
|
||||
Ok(_) => (),
|
||||
Err(err) => crate::warn!("Install prometheus recorder failed, cause: {}", err),
|
||||
};
|
||||
|
||||
@@ -48,7 +48,7 @@ impl Timestamp {
|
||||
/// The result time unit remains unchanged even if `duration` has a different unit with `self`.
|
||||
/// For example, a timestamp with value 1 and time unit second, subtracted by 1 millisecond
|
||||
/// and the result is still 1 second.
|
||||
pub fn sub(&self, duration: Duration) -> error::Result<Self> {
|
||||
pub fn sub_duration(&self, duration: Duration) -> error::Result<Self> {
|
||||
let duration: i64 = match self.unit {
|
||||
TimeUnit::Second => {
|
||||
i64::try_from(duration.as_secs()).context(TimestampOverflowSnafu)?
|
||||
@@ -79,6 +79,13 @@ impl Timestamp {
|
||||
})
|
||||
}
|
||||
|
||||
/// Subtracts current timestamp with another timestamp, yielding a duration.
|
||||
pub fn sub(&self, rhs: Self) -> Option<chrono::Duration> {
|
||||
let lhs = self.to_chrono_datetime()?;
|
||||
let rhs = rhs.to_chrono_datetime()?;
|
||||
Some(lhs - rhs)
|
||||
}
|
||||
|
||||
pub fn new(value: i64, unit: TimeUnit) -> Self {
|
||||
Self { unit, value }
|
||||
}
|
||||
@@ -863,19 +870,19 @@ mod tests {
|
||||
#[test]
|
||||
fn test_timestamp_sub() {
|
||||
let res = Timestamp::new(1, TimeUnit::Second)
|
||||
.sub(Duration::from_secs(1))
|
||||
.sub_duration(Duration::from_secs(1))
|
||||
.unwrap();
|
||||
assert_eq!(0, res.value);
|
||||
assert_eq!(TimeUnit::Second, res.unit);
|
||||
|
||||
let res = Timestamp::new(0, TimeUnit::Second)
|
||||
.sub(Duration::from_secs(1))
|
||||
.sub_duration(Duration::from_secs(1))
|
||||
.unwrap();
|
||||
assert_eq!(-1, res.value);
|
||||
assert_eq!(TimeUnit::Second, res.unit);
|
||||
|
||||
let res = Timestamp::new(1, TimeUnit::Second)
|
||||
.sub(Duration::from_millis(1))
|
||||
.sub_duration(Duration::from_millis(1))
|
||||
.unwrap();
|
||||
assert_eq!(1, res.value);
|
||||
assert_eq!(TimeUnit::Second, res.unit);
|
||||
@@ -914,4 +921,17 @@ mod tests {
|
||||
Timestamp::new(1, TimeUnit::Second).to_local_string()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_subtract_timestamp() {
|
||||
assert_eq!(
|
||||
Some(chrono::Duration::milliseconds(42)),
|
||||
Timestamp::new_millisecond(100).sub(Timestamp::new_millisecond(58))
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
Some(chrono::Duration::milliseconds(-42)),
|
||||
Timestamp::new_millisecond(58).sub(Timestamp::new_millisecond(100))
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,6 +22,13 @@ pub fn current_time_rfc3339() -> String {
|
||||
chrono::Utc::now().to_rfc3339()
|
||||
}
|
||||
|
||||
/// Returns the yesterday time in rfc3339 format.
|
||||
pub fn yesterday_rfc3339() -> String {
|
||||
let now = chrono::Utc::now();
|
||||
let day_before = now - chrono::Duration::days(1);
|
||||
day_before.to_rfc3339()
|
||||
}
|
||||
|
||||
/// Port of rust unstable features `int_roundings`.
|
||||
pub(crate) fn div_ceil(this: i64, rhs: i64) -> i64 {
|
||||
let d = this / rhs;
|
||||
|
||||
@@ -20,6 +20,7 @@ common-datasource = { path = "../common/datasource" }
|
||||
common-function = { path = "../common/function" }
|
||||
common-grpc = { path = "../common/grpc" }
|
||||
common-grpc-expr = { path = "../common/grpc-expr" }
|
||||
common-meta = { path = "../common/meta" }
|
||||
common-procedure = { path = "../common/procedure" }
|
||||
common-query = { path = "../common/query" }
|
||||
common-recordbatch = { path = "../common/recordbatch" }
|
||||
@@ -33,6 +34,7 @@ datatypes = { path = "../datatypes" }
|
||||
file-table-engine = { path = "../file-table-engine" }
|
||||
futures = "0.3"
|
||||
futures-util.workspace = true
|
||||
key-lock = "0.1"
|
||||
hyper = { version = "0.14", features = ["full"] }
|
||||
humantime-serde = "1.1"
|
||||
log = "0.4"
|
||||
|
||||
@@ -23,7 +23,10 @@ use secrecy::SecretString;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servers::http::HttpOptions;
|
||||
use servers::Mode;
|
||||
use storage::config::EngineConfig as StorageEngineConfig;
|
||||
use storage::config::{
|
||||
EngineConfig as StorageEngineConfig, DEFAULT_AUTO_FLUSH_INTERVAL, DEFAULT_MAX_FLUSH_TASKS,
|
||||
DEFAULT_PICKER_SCHEDULE_INTERVAL, DEFAULT_REGION_WRITE_BUFFER_SIZE,
|
||||
};
|
||||
use storage::scheduler::SchedulerConfig;
|
||||
|
||||
use crate::error::Result;
|
||||
@@ -49,6 +52,7 @@ pub struct StorageConfig {
|
||||
pub store: ObjectStoreConfig,
|
||||
pub compaction: CompactionConfig,
|
||||
pub manifest: RegionManifestConfig,
|
||||
pub flush: FlushConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Default, Deserialize)]
|
||||
@@ -166,6 +170,8 @@ pub struct RegionManifestConfig {
|
||||
pub gc_duration: Option<Duration>,
|
||||
/// Whether to try creating a manifest checkpoint on region opening
|
||||
pub checkpoint_on_startup: bool,
|
||||
/// Whether to compress manifest and checkpoint file by gzip
|
||||
pub compress: bool,
|
||||
}
|
||||
|
||||
impl Default for RegionManifestConfig {
|
||||
@@ -174,6 +180,7 @@ impl Default for RegionManifestConfig {
|
||||
checkpoint_margin: Some(10u16),
|
||||
gc_duration: Some(Duration::from_secs(30)),
|
||||
checkpoint_on_startup: false,
|
||||
compress: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -203,6 +210,34 @@ impl Default for CompactionConfig {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
|
||||
#[serde(default)]
|
||||
pub struct FlushConfig {
|
||||
/// Max inflight flush tasks.
|
||||
pub max_flush_tasks: usize,
|
||||
/// Default write buffer size for a region.
|
||||
pub region_write_buffer_size: ReadableSize,
|
||||
/// Interval to schedule auto flush picker to find region to flush.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub picker_schedule_interval: Duration,
|
||||
/// Interval to auto flush a region if it has not flushed yet.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub auto_flush_interval: Duration,
|
||||
}
|
||||
|
||||
impl Default for FlushConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_flush_tasks: DEFAULT_MAX_FLUSH_TASKS,
|
||||
region_write_buffer_size: DEFAULT_REGION_WRITE_BUFFER_SIZE,
|
||||
picker_schedule_interval: Duration::from_millis(
|
||||
DEFAULT_PICKER_SCHEDULE_INTERVAL.into(),
|
||||
),
|
||||
auto_flush_interval: Duration::from_millis(DEFAULT_AUTO_FLUSH_INTERVAL.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&DatanodeOptions> for SchedulerConfig {
|
||||
fn from(value: &DatanodeOptions) -> Self {
|
||||
Self {
|
||||
@@ -214,12 +249,17 @@ impl From<&DatanodeOptions> for SchedulerConfig {
|
||||
impl From<&DatanodeOptions> for StorageEngineConfig {
|
||||
fn from(value: &DatanodeOptions) -> Self {
|
||||
Self {
|
||||
compress_manifest: value.storage.manifest.compress,
|
||||
manifest_checkpoint_on_startup: value.storage.manifest.checkpoint_on_startup,
|
||||
manifest_checkpoint_margin: value.storage.manifest.checkpoint_margin,
|
||||
manifest_gc_duration: value.storage.manifest.gc_duration,
|
||||
max_files_in_l0: value.storage.compaction.max_files_in_level0,
|
||||
max_purge_tasks: value.storage.compaction.max_purge_tasks,
|
||||
sst_write_buffer_size: value.storage.compaction.sst_write_buffer_size,
|
||||
max_flush_tasks: value.storage.flush.max_flush_tasks,
|
||||
region_write_buffer_size: value.storage.flush.region_write_buffer_size,
|
||||
picker_schedule_interval: value.storage.flush.picker_schedule_interval,
|
||||
auto_flush_interval: value.storage.flush.auto_flush_interval,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -283,6 +323,12 @@ impl Default for DatanodeOptions {
|
||||
}
|
||||
}
|
||||
|
||||
impl DatanodeOptions {
|
||||
pub fn env_list_keys() -> Option<&'static [&'static str]> {
|
||||
Some(&["meta_client_options.metasrv_addrs"])
|
||||
}
|
||||
}
|
||||
|
||||
/// Datanode service.
|
||||
pub struct Datanode {
|
||||
opts: DatanodeOptions,
|
||||
|
||||
@@ -19,12 +19,44 @@ use common_procedure::ProcedureId;
|
||||
use serde_json::error::Error as JsonError;
|
||||
use snafu::Location;
|
||||
use storage::error::Error as StorageError;
|
||||
use store_api::storage::RegionNumber;
|
||||
use table::error::Error as TableError;
|
||||
|
||||
/// Business error of datanode.
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to check region in table: {}, source: {}", table_name, source))]
|
||||
CheckRegion {
|
||||
table_name: String,
|
||||
#[snafu(backtrace)]
|
||||
source: TableError,
|
||||
region_number: RegionNumber,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to access catalog, source: {}", source))]
|
||||
AccessCatalog {
|
||||
#[snafu(backtrace)]
|
||||
source: catalog::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to open table: {}, source: {}", table_name, source))]
|
||||
OpenTable {
|
||||
table_name: String,
|
||||
#[snafu(backtrace)]
|
||||
source: TableError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to register table: {}, source: {}", table_name, source))]
|
||||
RegisterTable {
|
||||
table_name: String,
|
||||
#[snafu(backtrace)]
|
||||
source: catalog::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to send message: {err_msg}"))]
|
||||
SendMessage { err_msg: String, location: Location },
|
||||
|
||||
#[snafu(display("Failed to execute sql, source: {}", source))]
|
||||
ExecuteSql {
|
||||
#[snafu(backtrace)]
|
||||
@@ -441,6 +473,25 @@ pub enum Error {
|
||||
location: Location,
|
||||
source: JsonError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to decode object into json, source: {}", source))]
|
||||
DecodeJson {
|
||||
location: Location,
|
||||
source: JsonError,
|
||||
},
|
||||
|
||||
#[snafu(display("Payload not exist"))]
|
||||
PayloadNotExist { location: Location },
|
||||
|
||||
#[snafu(display("Failed to start the procedure manager"))]
|
||||
StartProcedureManager {
|
||||
source: common_procedure::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to stop the procedure manager"))]
|
||||
StopProcedureManager {
|
||||
source: common_procedure::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -455,12 +506,16 @@ impl ErrorExt for Error {
|
||||
| ExecuteLogicalPlan { source }
|
||||
| DescribeStatement { source } => source.status_code(),
|
||||
|
||||
OpenTable { source, .. } => source.status_code(),
|
||||
RegisterTable { source, .. } | AccessCatalog { source, .. } => source.status_code(),
|
||||
|
||||
DecodeLogicalPlan { source } => source.status_code(),
|
||||
NewCatalog { source } | RegisterSchema { source } => source.status_code(),
|
||||
FindTable { source, .. } => source.status_code(),
|
||||
CreateTable { source, .. } | GetTable { source, .. } | AlterTable { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
CreateTable { source, .. }
|
||||
| GetTable { source, .. }
|
||||
| AlterTable { source, .. }
|
||||
| CheckRegion { source, .. } => source.status_code(),
|
||||
DropTable { source, .. } => source.status_code(),
|
||||
FlushTable { source, .. } => source.status_code(),
|
||||
|
||||
@@ -501,7 +556,9 @@ impl ErrorExt for Error {
|
||||
| ColumnNoneDefaultValue { .. }
|
||||
| PrepareImmutableTable { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
EncodeJson { .. } => StatusCode::Unexpected,
|
||||
EncodeJson { .. } | DecodeJson { .. } | PayloadNotExist { .. } => {
|
||||
StatusCode::Unexpected
|
||||
}
|
||||
|
||||
// TODO(yingwen): Further categorize http error.
|
||||
StartServer { .. }
|
||||
@@ -517,7 +574,8 @@ impl ErrorExt for Error {
|
||||
| IncorrectInternalState { .. }
|
||||
| ShutdownServer { .. }
|
||||
| ShutdownInstance { .. }
|
||||
| CloseTableEngine { .. } => StatusCode::Internal,
|
||||
| CloseTableEngine { .. }
|
||||
| SendMessage { .. } => StatusCode::Internal,
|
||||
|
||||
InitBackend { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
@@ -533,6 +591,9 @@ impl ErrorExt for Error {
|
||||
source.status_code()
|
||||
}
|
||||
WaitProcedure { source, .. } => source.status_code(),
|
||||
StartProcedureManager { source } | StopProcedureManager { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,14 +16,26 @@ use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::meta::{HeartbeatRequest, HeartbeatResponse, NodeStat, Peer};
|
||||
use api::v1::meta::{HeartbeatRequest, NodeStat, Peer};
|
||||
use catalog::{datanode_stat, CatalogManagerRef};
|
||||
use common_telemetry::{error, info, trace, warn};
|
||||
use mailbox::{HeartbeatMailbox, MailboxRef};
|
||||
use meta_client::client::{HeartbeatSender, MetaClient};
|
||||
use snafu::ResultExt;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::time::Instant;
|
||||
|
||||
use self::handler::{HeartbeatResponseHandlerContext, HeartbeatResponseHandlerExecutorRef};
|
||||
use self::utils::outgoing_message_to_mailbox_message;
|
||||
use crate::error::{MetaClientInitSnafu, Result};
|
||||
|
||||
pub mod handler;
|
||||
pub mod utils;
|
||||
|
||||
// TODO(weny): remove allow dead_code
|
||||
#[allow(dead_code)]
|
||||
pub mod mailbox;
|
||||
|
||||
pub struct HeartbeatTask {
|
||||
node_id: u64,
|
||||
server_addr: String,
|
||||
@@ -32,6 +44,7 @@ pub struct HeartbeatTask {
|
||||
meta_client: Arc<MetaClient>,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
interval: u64,
|
||||
heartbeat_response_handler_exector: HeartbeatResponseHandlerExecutorRef,
|
||||
}
|
||||
|
||||
impl Drop for HeartbeatTask {
|
||||
@@ -48,6 +61,7 @@ impl HeartbeatTask {
|
||||
server_hostname: Option<String>,
|
||||
meta_client: Arc<MetaClient>,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
heartbeat_response_handler_exector: HeartbeatResponseHandlerExecutorRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
node_id,
|
||||
@@ -57,12 +71,15 @@ impl HeartbeatTask {
|
||||
meta_client,
|
||||
catalog_manager,
|
||||
interval: 5_000, // default interval is set to 5 secs
|
||||
heartbeat_response_handler_exector,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn create_streams(
|
||||
meta_client: &MetaClient,
|
||||
running: Arc<AtomicBool>,
|
||||
handler_executor: HeartbeatResponseHandlerExecutorRef,
|
||||
mailbox: MailboxRef,
|
||||
) -> Result<HeartbeatSender> {
|
||||
let (tx, mut rx) = meta_client.heartbeat().await.context(MetaClientInitSnafu)?;
|
||||
common_runtime::spawn_bg(async move {
|
||||
@@ -73,7 +90,10 @@ impl HeartbeatTask {
|
||||
None
|
||||
}
|
||||
} {
|
||||
Self::handle_response(res).await;
|
||||
let ctx = HeartbeatResponseHandlerContext::new(mailbox.clone(), res);
|
||||
if let Err(e) = Self::handle_response(ctx, handler_executor.clone()) {
|
||||
error!(e;"Error while handling heartbeat response");
|
||||
}
|
||||
if !running.load(Ordering::Acquire) {
|
||||
info!("Heartbeat task shutdown");
|
||||
}
|
||||
@@ -83,8 +103,12 @@ impl HeartbeatTask {
|
||||
Ok(tx)
|
||||
}
|
||||
|
||||
async fn handle_response(resp: HeartbeatResponse) {
|
||||
trace!("heartbeat response: {:?}", resp);
|
||||
fn handle_response(
|
||||
ctx: HeartbeatResponseHandlerContext,
|
||||
handler_executor: HeartbeatResponseHandlerExecutorRef,
|
||||
) -> Result<()> {
|
||||
trace!("heartbeat response: {:?}", ctx.response);
|
||||
handler_executor.handle(ctx)
|
||||
}
|
||||
|
||||
/// Start heartbeat task, spawn background task.
|
||||
@@ -101,39 +125,93 @@ impl HeartbeatTask {
|
||||
let node_id = self.node_id;
|
||||
let addr = resolve_addr(&self.server_addr, &self.server_hostname);
|
||||
let meta_client = self.meta_client.clone();
|
||||
|
||||
let catalog_manager_clone = self.catalog_manager.clone();
|
||||
let mut tx = Self::create_streams(&meta_client, running.clone()).await?;
|
||||
|
||||
let handler_executor = self.heartbeat_response_handler_exector.clone();
|
||||
|
||||
let (outgoing_tx, mut outgoing_rx) = mpsc::channel(16);
|
||||
let mailbox = Arc::new(HeartbeatMailbox::new(outgoing_tx));
|
||||
|
||||
let mut tx = Self::create_streams(
|
||||
&meta_client,
|
||||
running.clone(),
|
||||
handler_executor.clone(),
|
||||
mailbox.clone(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
common_runtime::spawn_bg(async move {
|
||||
while running.load(Ordering::Acquire) {
|
||||
let (region_num, region_stats) = datanode_stat(&catalog_manager_clone).await;
|
||||
let sleep = tokio::time::sleep(Duration::from_millis(0));
|
||||
tokio::pin!(sleep);
|
||||
|
||||
let req = HeartbeatRequest {
|
||||
peer: Some(Peer {
|
||||
id: node_id,
|
||||
addr: addr.clone(),
|
||||
}),
|
||||
node_stat: Some(NodeStat {
|
||||
region_num: region_num as _,
|
||||
..Default::default()
|
||||
}),
|
||||
region_stats,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
if let Err(e) = tx.send(req).await {
|
||||
error!("Failed to send heartbeat to metasrv, error: {:?}", e);
|
||||
match Self::create_streams(&meta_client, running.clone()).await {
|
||||
Ok(new_tx) => {
|
||||
info!("Reconnected to metasrv");
|
||||
tx = new_tx;
|
||||
loop {
|
||||
if !running.load(Ordering::Acquire) {
|
||||
info!("shutdown heartbeat task");
|
||||
break;
|
||||
}
|
||||
let req = tokio::select! {
|
||||
message = outgoing_rx.recv() => {
|
||||
if let Some(message) = message {
|
||||
match outgoing_message_to_mailbox_message(message) {
|
||||
Ok(message) => {
|
||||
let req = HeartbeatRequest {
|
||||
peer: Some(Peer {
|
||||
id: node_id,
|
||||
addr: addr.clone(),
|
||||
}),
|
||||
mailbox_message: Some(message),
|
||||
..Default::default()
|
||||
};
|
||||
Some(req)
|
||||
}
|
||||
Err(e) => {
|
||||
error!(e;"Failed to encode mailbox messages!");
|
||||
None
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
Err(e) => {
|
||||
error!(e;"Failed to reconnect to metasrv!");
|
||||
}
|
||||
_ = &mut sleep => {
|
||||
let (region_num, region_stats) = datanode_stat(&catalog_manager_clone).await;
|
||||
let req = HeartbeatRequest {
|
||||
peer: Some(Peer {
|
||||
id: node_id,
|
||||
addr: addr.clone(),
|
||||
}),
|
||||
node_stat: Some(NodeStat {
|
||||
region_num: region_num as _,
|
||||
..Default::default()
|
||||
}),
|
||||
region_stats,
|
||||
..Default::default()
|
||||
};
|
||||
sleep.as_mut().reset(Instant::now() + Duration::from_millis(interval));
|
||||
Some(req)
|
||||
}
|
||||
};
|
||||
if let Some(req) = req {
|
||||
if let Err(e) = tx.send(req).await {
|
||||
error!("Failed to send heartbeat to metasrv, error: {:?}", e);
|
||||
match Self::create_streams(
|
||||
&meta_client,
|
||||
running.clone(),
|
||||
handler_executor.clone(),
|
||||
mailbox.clone(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(new_tx) => {
|
||||
info!("Reconnected to metasrv");
|
||||
tx = new_tx;
|
||||
}
|
||||
Err(e) => {
|
||||
error!(e;"Failed to reconnect to metasrv!");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
tokio::time::sleep(Duration::from_millis(interval)).await;
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
94
src/datanode/src/heartbeat/handler.rs
Normal file
94
src/datanode/src/heartbeat/handler.rs
Normal file
@@ -0,0 +1,94 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::HeartbeatResponse;
|
||||
use common_telemetry::error;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::heartbeat::mailbox::{IncomingMessage, MailboxRef};
|
||||
|
||||
pub mod open_region;
|
||||
pub mod parse_mailbox_message;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
pub type HeartbeatResponseHandlerExecutorRef = Arc<dyn HeartbeatResponseHandlerExecutor>;
|
||||
pub type HeartbeatResponseHandlerRef = Arc<dyn HeartbeatResponseHandler>;
|
||||
|
||||
pub struct HeartbeatResponseHandlerContext {
|
||||
pub mailbox: MailboxRef,
|
||||
pub response: HeartbeatResponse,
|
||||
pub incoming_message: Option<IncomingMessage>,
|
||||
is_skip_all: bool,
|
||||
}
|
||||
|
||||
impl HeartbeatResponseHandlerContext {
|
||||
pub fn new(mailbox: MailboxRef, response: HeartbeatResponse) -> Self {
|
||||
Self {
|
||||
mailbox,
|
||||
response,
|
||||
incoming_message: None,
|
||||
is_skip_all: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_skip_all(&self) -> bool {
|
||||
self.is_skip_all
|
||||
}
|
||||
|
||||
pub fn finish(&mut self) {
|
||||
self.is_skip_all = true
|
||||
}
|
||||
}
|
||||
|
||||
pub trait HeartbeatResponseHandler: Send + Sync {
|
||||
fn is_acceptable(&self, ctx: &HeartbeatResponseHandlerContext) -> bool;
|
||||
|
||||
fn handle(&self, ctx: &mut HeartbeatResponseHandlerContext) -> Result<()>;
|
||||
}
|
||||
|
||||
pub trait HeartbeatResponseHandlerExecutor: Send + Sync {
|
||||
fn handle(&self, ctx: HeartbeatResponseHandlerContext) -> Result<()>;
|
||||
}
|
||||
|
||||
pub struct HandlerGroupExecutor {
|
||||
handlers: Vec<HeartbeatResponseHandlerRef>,
|
||||
}
|
||||
|
||||
impl HandlerGroupExecutor {
|
||||
pub fn new(handlers: Vec<HeartbeatResponseHandlerRef>) -> Self {
|
||||
Self { handlers }
|
||||
}
|
||||
}
|
||||
|
||||
impl HeartbeatResponseHandlerExecutor for HandlerGroupExecutor {
|
||||
fn handle(&self, mut ctx: HeartbeatResponseHandlerContext) -> Result<()> {
|
||||
for handler in &self.handlers {
|
||||
if ctx.is_skip_all() {
|
||||
break;
|
||||
}
|
||||
|
||||
if !handler.is_acceptable(&ctx) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Err(e) = handler.handle(&mut ctx) {
|
||||
error!(e;"Error while handling: {:?}", ctx.response);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
201
src/datanode/src/heartbeat/handler/open_region.rs
Normal file
201
src/datanode/src/heartbeat/handler/open_region.rs
Normal file
@@ -0,0 +1,201 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use catalog::{CatalogManagerRef, RegisterTableRequest};
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_meta::instruction::{Instruction, InstructionReply, RegionIdent, SimpleReply};
|
||||
use common_telemetry::{error, warn};
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::RegionNumber;
|
||||
use table::engine::manager::TableEngineManagerRef;
|
||||
use table::engine::EngineContext;
|
||||
use table::requests::OpenTableRequest;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::heartbeat::handler::HeartbeatResponseHandler;
|
||||
use crate::heartbeat::HeartbeatResponseHandlerContext;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct OpenRegionHandler {
|
||||
catalog_manager: CatalogManagerRef,
|
||||
table_engine_manager: TableEngineManagerRef,
|
||||
}
|
||||
|
||||
impl HeartbeatResponseHandler for OpenRegionHandler {
|
||||
fn is_acceptable(&self, ctx: &HeartbeatResponseHandlerContext) -> bool {
|
||||
matches!(
|
||||
ctx.incoming_message,
|
||||
Some((_, Instruction::OpenRegion { .. }))
|
||||
)
|
||||
}
|
||||
|
||||
fn handle(&self, ctx: &mut HeartbeatResponseHandlerContext) -> Result<()> {
|
||||
let Some((meta, Instruction::OpenRegion(region_ident))) = ctx.incoming_message.take() else {
|
||||
unreachable!("OpenRegionHandler: should be guarded by 'is_acceptable'");
|
||||
};
|
||||
|
||||
ctx.finish();
|
||||
let mailbox = ctx.mailbox.clone();
|
||||
let self_ref = Arc::new(self.clone());
|
||||
|
||||
common_runtime::spawn_bg(async move {
|
||||
let (engine, request) = OpenRegionHandler::prepare_request(region_ident);
|
||||
let result = self_ref.open_region_inner(engine, request).await;
|
||||
if let Err(e) = mailbox
|
||||
.send((meta, OpenRegionHandler::map_result(result)))
|
||||
.await
|
||||
{
|
||||
error!(e; "Failed to send reply to mailbox");
|
||||
}
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl OpenRegionHandler {
|
||||
fn map_result(result: Result<bool>) -> InstructionReply {
|
||||
result.map_or_else(
|
||||
|error| {
|
||||
InstructionReply::OpenRegion(SimpleReply {
|
||||
result: false,
|
||||
error: Some(error.to_string()),
|
||||
})
|
||||
},
|
||||
|result| {
|
||||
InstructionReply::OpenRegion(SimpleReply {
|
||||
result,
|
||||
error: None,
|
||||
})
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn prepare_request(ident: RegionIdent) -> (String, OpenTableRequest) {
|
||||
let RegionIdent {
|
||||
catalog,
|
||||
schema,
|
||||
table,
|
||||
table_id,
|
||||
region_number,
|
||||
engine,
|
||||
} = ident;
|
||||
|
||||
(
|
||||
engine,
|
||||
OpenTableRequest {
|
||||
catalog_name: catalog,
|
||||
schema_name: schema,
|
||||
table_name: table,
|
||||
table_id,
|
||||
region_numbers: vec![region_number],
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns true if table has been opened.
|
||||
async fn check_table(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
region_numbers: &[RegionNumber],
|
||||
) -> Result<bool> {
|
||||
if let Some(table) = self
|
||||
.catalog_manager
|
||||
.table(catalog_name, schema_name, table_name)
|
||||
.await
|
||||
.context(error::AccessCatalogSnafu)?
|
||||
{
|
||||
for r in region_numbers {
|
||||
let region_exist =
|
||||
table
|
||||
.contain_regions(*r)
|
||||
.with_context(|_| error::CheckRegionSnafu {
|
||||
table_name: format_full_table_name(
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
),
|
||||
region_number: *r,
|
||||
})?;
|
||||
if !region_exist {
|
||||
warn!(
|
||||
"Failed to check table: {}, region: {} does not exist",
|
||||
format_full_table_name(catalog_name, schema_name, table_name,),
|
||||
r
|
||||
);
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
async fn open_region_inner(&self, engine: String, request: OpenTableRequest) -> Result<bool> {
|
||||
let OpenTableRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
region_numbers,
|
||||
..
|
||||
} = &request;
|
||||
let engine =
|
||||
self.table_engine_manager
|
||||
.engine(&engine)
|
||||
.context(error::TableEngineNotFoundSnafu {
|
||||
engine_name: &engine,
|
||||
})?;
|
||||
let ctx = EngineContext::default();
|
||||
|
||||
if self
|
||||
.check_table(catalog_name, schema_name, table_name, region_numbers)
|
||||
.await?
|
||||
{
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
if let Some(table) = engine
|
||||
.open_table(&ctx, request.clone())
|
||||
.await
|
||||
.with_context(|_| error::OpenTableSnafu {
|
||||
table_name: format_full_table_name(catalog_name, schema_name, table_name),
|
||||
})?
|
||||
{
|
||||
let request = RegisterTableRequest {
|
||||
catalog: request.catalog_name.clone(),
|
||||
schema: request.schema_name.clone(),
|
||||
table_name: request.table_name.clone(),
|
||||
table_id: request.table_id,
|
||||
table,
|
||||
};
|
||||
self.catalog_manager
|
||||
.register_table(request)
|
||||
.await
|
||||
.with_context(|_| error::RegisterTableSnafu {
|
||||
table_name: format_full_table_name(catalog_name, schema_name, table_name),
|
||||
})?;
|
||||
Ok(true)
|
||||
} else {
|
||||
// Case 1:
|
||||
// TODO(weny): Fix/Cleanup the broken table manifest
|
||||
// The manifest writing operation should be atomic.
|
||||
// Therefore, we won't meet this case, in theory.
|
||||
|
||||
// Case 2: The target region was not found in table meta
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
37
src/datanode/src/heartbeat/handler/parse_mailbox_message.rs
Normal file
37
src/datanode/src/heartbeat/handler/parse_mailbox_message.rs
Normal file
@@ -0,0 +1,37 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::heartbeat::handler::{HeartbeatResponseHandler, HeartbeatResponseHandlerContext};
|
||||
use crate::heartbeat::utils::mailbox_message_to_incoming_message;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ParseMailboxMessageHandler;
|
||||
|
||||
impl HeartbeatResponseHandler for ParseMailboxMessageHandler {
|
||||
fn is_acceptable(&self, _ctx: &HeartbeatResponseHandlerContext) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn handle(&self, ctx: &mut HeartbeatResponseHandlerContext) -> Result<()> {
|
||||
if let Some(message) = &ctx.response.mailbox_message {
|
||||
if message.payload.is_some() {
|
||||
// mailbox_message_to_incoming_message will raise an error if payload is none
|
||||
ctx.incoming_message = Some(mailbox_message_to_incoming_message(message.clone())?)
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
36
src/datanode/src/heartbeat/handler/tests.rs
Normal file
36
src/datanode/src/heartbeat/handler/tests.rs
Normal file
@@ -0,0 +1,36 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_meta::instruction::{InstructionReply, SimpleReply};
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use crate::heartbeat::mailbox::{HeartbeatMailbox, MessageMeta};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_heartbeat_mailbox() {
|
||||
let (tx, mut rx) = mpsc::channel(8);
|
||||
|
||||
let mailbox = HeartbeatMailbox::new(tx);
|
||||
|
||||
let meta = MessageMeta::new_test(1, "test", "foo", "bar");
|
||||
let reply = InstructionReply::OpenRegion(SimpleReply {
|
||||
result: true,
|
||||
error: None,
|
||||
});
|
||||
mailbox.send((meta.clone(), reply.clone())).await.unwrap();
|
||||
|
||||
let message = rx.recv().await.unwrap();
|
||||
assert_eq!(message.0, meta);
|
||||
assert_eq!(message.1, reply);
|
||||
}
|
||||
64
src/datanode/src/heartbeat/mailbox.rs
Normal file
64
src/datanode/src/heartbeat/mailbox.rs
Normal file
@@ -0,0 +1,64 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_meta::instruction::{Instruction, InstructionReply};
|
||||
use tokio::sync::mpsc::Sender;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
|
||||
pub type IncomingMessage = (MessageMeta, Instruction);
|
||||
pub type OutgoingMessage = (MessageMeta, InstructionReply);
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
pub struct MessageMeta {
|
||||
pub id: u64,
|
||||
pub subject: String,
|
||||
pub to: String,
|
||||
pub from: String,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl MessageMeta {
|
||||
pub fn new_test(id: u64, subject: &str, to: &str, from: &str) -> Self {
|
||||
MessageMeta {
|
||||
id,
|
||||
subject: subject.to_string(),
|
||||
to: to.to_string(),
|
||||
from: from.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct HeartbeatMailbox {
|
||||
sender: Sender<OutgoingMessage>,
|
||||
}
|
||||
|
||||
impl HeartbeatMailbox {
|
||||
pub fn new(sender: Sender<OutgoingMessage>) -> Self {
|
||||
Self { sender }
|
||||
}
|
||||
|
||||
pub async fn send(&self, message: OutgoingMessage) -> Result<()> {
|
||||
self.sender.send(message).await.map_err(|e| {
|
||||
error::SendMessageSnafu {
|
||||
err_msg: e.to_string(),
|
||||
}
|
||||
.build()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub type MailboxRef = Arc<HeartbeatMailbox>;
|
||||
58
src/datanode/src/heartbeat/utils.rs
Normal file
58
src/datanode/src/heartbeat/utils.rs
Normal file
@@ -0,0 +1,58 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::meta::mailbox_message::Payload;
|
||||
use api::v1::meta::MailboxMessage;
|
||||
use common_meta::instruction::Instruction;
|
||||
use common_time::util::current_time_millis;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::heartbeat::mailbox::{IncomingMessage, MessageMeta, OutgoingMessage};
|
||||
|
||||
pub fn mailbox_message_to_incoming_message(m: MailboxMessage) -> Result<IncomingMessage> {
|
||||
m.payload
|
||||
.map(|payload| match payload {
|
||||
Payload::Json(json) => {
|
||||
let instruction: Instruction = serde_json::from_str(&json)?;
|
||||
Ok((
|
||||
MessageMeta {
|
||||
id: m.id,
|
||||
subject: m.subject,
|
||||
to: m.to,
|
||||
from: m.from,
|
||||
},
|
||||
instruction,
|
||||
))
|
||||
}
|
||||
})
|
||||
.transpose()
|
||||
.context(error::DecodeJsonSnafu)?
|
||||
.context(error::PayloadNotExistSnafu)
|
||||
}
|
||||
|
||||
pub fn outgoing_message_to_mailbox_message(
|
||||
(meta, reply): OutgoingMessage,
|
||||
) -> Result<MailboxMessage> {
|
||||
Ok(MailboxMessage {
|
||||
id: meta.id,
|
||||
subject: meta.subject,
|
||||
from: meta.to,
|
||||
to: meta.from,
|
||||
timestamp_millis: current_time_millis(),
|
||||
payload: Some(Payload::Json(
|
||||
serde_json::to_string(&reply).context(error::EncodeJsonSnafu)?,
|
||||
)),
|
||||
})
|
||||
}
|
||||
@@ -17,6 +17,7 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use std::{fs, path};
|
||||
|
||||
use api::v1::meta::Role;
|
||||
use catalog::remote::MetaKvBackend;
|
||||
use catalog::{CatalogManager, CatalogManagerRef, RegisterTableRequest};
|
||||
use common_base::readable_size::ReadableSize;
|
||||
@@ -61,7 +62,10 @@ use crate::datanode::{
|
||||
use crate::error::{
|
||||
self, CatalogSnafu, MetaClientInitSnafu, MissingMetasrvOptsSnafu, MissingNodeIdSnafu,
|
||||
NewCatalogSnafu, OpenLogStoreSnafu, RecoverProcedureSnafu, Result, ShutdownInstanceSnafu,
|
||||
StartProcedureManagerSnafu, StopProcedureManagerSnafu,
|
||||
};
|
||||
use crate::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||
use crate::heartbeat::handler::HandlerGroupExecutor;
|
||||
use crate::heartbeat::HeartbeatTask;
|
||||
use crate::sql::{SqlHandler, SqlRequest};
|
||||
|
||||
@@ -112,13 +116,16 @@ impl Instance {
|
||||
let log_store = Arc::new(create_log_store(&opts.wal).await?);
|
||||
|
||||
let mito_engine = Arc::new(DefaultEngine::new(
|
||||
TableEngineConfig::default(),
|
||||
TableEngineConfig {
|
||||
compress_manifest: opts.storage.manifest.compress,
|
||||
},
|
||||
EngineImpl::new(
|
||||
StorageEngineConfig::from(opts),
|
||||
log_store.clone(),
|
||||
object_store.clone(),
|
||||
compaction_scheduler,
|
||||
),
|
||||
)
|
||||
.unwrap(),
|
||||
object_store.clone(),
|
||||
));
|
||||
|
||||
@@ -196,6 +203,9 @@ impl Instance {
|
||||
let factory = QueryEngineFactory::new(catalog_manager.clone());
|
||||
let query_engine = factory.query_engine();
|
||||
|
||||
let handlder_executor =
|
||||
HandlerGroupExecutor::new(vec![Arc::new(ParseMailboxMessageHandler::default())]);
|
||||
|
||||
let heartbeat_task = match opts.mode {
|
||||
Mode::Standalone => None,
|
||||
Mode::Distributed => Some(HeartbeatTask::new(
|
||||
@@ -204,6 +214,7 @@ impl Instance {
|
||||
opts.rpc_hostname.clone(),
|
||||
meta_client.as_ref().unwrap().clone(),
|
||||
catalog_manager.clone(),
|
||||
Arc::new(handlder_executor),
|
||||
)),
|
||||
};
|
||||
|
||||
@@ -250,10 +261,17 @@ impl Instance {
|
||||
.recover()
|
||||
.await
|
||||
.context(RecoverProcedureSnafu)?;
|
||||
self.procedure_manager
|
||||
.start()
|
||||
.context(StartProcedureManagerSnafu)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn shutdown(&self) -> Result<()> {
|
||||
self.procedure_manager
|
||||
.stop()
|
||||
.await
|
||||
.context(StopProcedureManagerSnafu)?;
|
||||
if let Some(heartbeat_task) = &self.heartbeat_task {
|
||||
heartbeat_task
|
||||
.close()
|
||||
@@ -502,8 +520,11 @@ async fn new_metasrv_client(node_id: u64, meta_config: &MetaClientOptions) -> Re
|
||||
.timeout(Duration::from_millis(meta_config.timeout_millis))
|
||||
.connect_timeout(Duration::from_millis(meta_config.connect_timeout_millis))
|
||||
.tcp_nodelay(meta_config.tcp_nodelay);
|
||||
let channel_manager = ChannelManager::with_config(config);
|
||||
let mut meta_client = MetaClientBuilder::new(cluster_id, member_id)
|
||||
|
||||
let mut channel_manager = ChannelManager::with_config(config);
|
||||
channel_manager.start_channel_recycle();
|
||||
|
||||
let mut meta_client = MetaClientBuilder::new(cluster_id, member_id, Role::Datanode)
|
||||
.enable_heartbeat()
|
||||
.enable_router()
|
||||
.enable_store()
|
||||
@@ -557,6 +578,7 @@ pub(crate) async fn create_procedure_manager(
|
||||
let manager_config = ManagerConfig {
|
||||
max_retry_times: procedure_config.max_retry_times,
|
||||
retry_delay: procedure_config.retry_delay,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
Ok(Arc::new(LocalManager::new(manager_config, state_store)))
|
||||
|
||||
@@ -171,9 +171,7 @@ impl Instance {
|
||||
) -> Result<Output> {
|
||||
let query = PromQuery {
|
||||
query: promql.to_string(),
|
||||
start: "0".to_string(),
|
||||
end: "0".to_string(),
|
||||
step: "5m".to_string(),
|
||||
..PromQuery::default()
|
||||
};
|
||||
let mut stmt = QueryLanguageParser::parse_promql(&query).context(ExecuteSqlSnafu)?;
|
||||
match &mut stmt {
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::Role;
|
||||
use meta_client::client::{MetaClient, MetaClientBuilder};
|
||||
use meta_srv::mocks::MockInfo;
|
||||
use storage::compaction::noop::NoopCompactionScheduler;
|
||||
@@ -42,7 +43,7 @@ async fn mock_meta_client(mock_info: MockInfo, node_id: u64) -> MetaClient {
|
||||
} = mock_info;
|
||||
|
||||
let id = (1000u64, 2000u64);
|
||||
let mut meta_client = MetaClientBuilder::new(id.0, node_id)
|
||||
let mut meta_client = MetaClientBuilder::new(id.0, node_id, Role::Datanode)
|
||||
.enable_heartbeat()
|
||||
.enable_router()
|
||||
.enable_store()
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow::datatypes::{DataType as ArrowDataType, TimeUnit as ArrowTimeUnit};
|
||||
@@ -62,6 +63,32 @@ pub enum ConcreteDataType {
|
||||
Dictionary(DictionaryType),
|
||||
}
|
||||
|
||||
impl fmt::Display for ConcreteDataType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
ConcreteDataType::Null(_) => write!(f, "Null"),
|
||||
ConcreteDataType::Boolean(_) => write!(f, "Boolean"),
|
||||
ConcreteDataType::Int8(_) => write!(f, "Int8"),
|
||||
ConcreteDataType::Int16(_) => write!(f, "Int16"),
|
||||
ConcreteDataType::Int32(_) => write!(f, "Int32"),
|
||||
ConcreteDataType::Int64(_) => write!(f, "Int64"),
|
||||
ConcreteDataType::UInt8(_) => write!(f, "UInt8"),
|
||||
ConcreteDataType::UInt16(_) => write!(f, "UInt16"),
|
||||
ConcreteDataType::UInt32(_) => write!(f, "UInt32"),
|
||||
ConcreteDataType::UInt64(_) => write!(f, "UInt64"),
|
||||
ConcreteDataType::Float32(_) => write!(f, "Float32"),
|
||||
ConcreteDataType::Float64(_) => write!(f, "Float64"),
|
||||
ConcreteDataType::Binary(_) => write!(f, "Binary"),
|
||||
ConcreteDataType::String(_) => write!(f, "String"),
|
||||
ConcreteDataType::Date(_) => write!(f, "Date"),
|
||||
ConcreteDataType::DateTime(_) => write!(f, "DateTime"),
|
||||
ConcreteDataType::Timestamp(_) => write!(f, "Timestamp"),
|
||||
ConcreteDataType::List(_) => write!(f, "List"),
|
||||
ConcreteDataType::Dictionary(_) => write!(f, "Dictionary"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(yingwen): Refactor these `is_xxx()` methods, such as adding a `properties()` method
|
||||
// returning all these properties to the `DataType` trait
|
||||
impl ConcreteDataType {
|
||||
@@ -514,4 +541,81 @@ mod tests {
|
||||
);
|
||||
assert!(ConcreteDataType::int32_datatype().as_list().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_display_concrete_data_type() {
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::Null).to_string(),
|
||||
"Null"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::Boolean).to_string(),
|
||||
"Boolean"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::Binary).to_string(),
|
||||
"Binary"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::LargeBinary).to_string(),
|
||||
"Binary"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::Int8).to_string(),
|
||||
"Int8"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::Int16).to_string(),
|
||||
"Int16"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::Int32).to_string(),
|
||||
"Int32"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::Int64).to_string(),
|
||||
"Int64"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::UInt8).to_string(),
|
||||
"UInt8"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::UInt16).to_string(),
|
||||
"UInt16"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::UInt32).to_string(),
|
||||
"UInt32"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::UInt64).to_string(),
|
||||
"UInt64"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::Float32).to_string(),
|
||||
"Float32"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::Float64).to_string(),
|
||||
"Float64"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::Utf8).to_string(),
|
||||
"String"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::List(Arc::new(Field::new(
|
||||
"item",
|
||||
ArrowDataType::Int32,
|
||||
true,
|
||||
))))
|
||||
.to_string(),
|
||||
"List"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::Date32).to_string(),
|
||||
"Date"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -180,6 +180,15 @@ impl Value {
|
||||
}
|
||||
}
|
||||
|
||||
/// Cast Value to timestamp. Return None if value is not a valid timestamp data type.
|
||||
pub fn as_timestamp(&self) -> Option<Timestamp> {
|
||||
match self {
|
||||
Value::Int64(v) => Some(Timestamp::new_millisecond(*v)),
|
||||
Value::Timestamp(t) => Some(*t),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the logical type of the value.
|
||||
pub fn logical_type_id(&self) -> LogicalTypeId {
|
||||
match self {
|
||||
|
||||
@@ -59,6 +59,7 @@ async fn test_open_table() {
|
||||
table_name: test_util::TEST_TABLE_NAME.to_string(),
|
||||
// the test table id is 1
|
||||
table_id: 1,
|
||||
region_numbers: vec![0],
|
||||
};
|
||||
|
||||
let table_ref = TableReference {
|
||||
|
||||
@@ -7,6 +7,7 @@ license.workspace = true
|
||||
[features]
|
||||
default = ["python"]
|
||||
python = ["dep:script"]
|
||||
testing = []
|
||||
|
||||
[dependencies]
|
||||
api = { path = "../api" }
|
||||
@@ -66,8 +67,6 @@ common-test-util = { path = "../common/test-util" }
|
||||
datanode = { path = "../datanode" }
|
||||
futures = "0.3"
|
||||
meta-srv = { path = "../meta-srv", features = ["mock"] }
|
||||
rstest = "0.17"
|
||||
rstest_reuse = "0.5"
|
||||
strfmt = "0.2"
|
||||
toml = "0.5"
|
||||
tower = "0.4"
|
||||
|
||||
@@ -75,19 +75,19 @@ impl FrontendCatalogManager {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn set_dist_instance(&mut self, dist_instance: Arc<DistInstance>) {
|
||||
pub fn set_dist_instance(&mut self, dist_instance: Arc<DistInstance>) {
|
||||
self.dist_instance = Some(dist_instance)
|
||||
}
|
||||
|
||||
pub(crate) fn backend(&self) -> KvBackendRef {
|
||||
pub fn backend(&self) -> KvBackendRef {
|
||||
self.backend.clone()
|
||||
}
|
||||
|
||||
pub(crate) fn partition_manager(&self) -> PartitionRuleManagerRef {
|
||||
pub fn partition_manager(&self) -> PartitionRuleManagerRef {
|
||||
self.partition_manager.clone()
|
||||
}
|
||||
|
||||
pub(crate) fn datanode_clients(&self) -> Arc<DatanodeClients> {
|
||||
pub fn datanode_clients(&self) -> Arc<DatanodeClients> {
|
||||
self.datanode_clients.clone()
|
||||
}
|
||||
}
|
||||
@@ -406,71 +406,3 @@ impl SchemaProvider for FrontendSchemaProvider {
|
||||
Ok(self.table_names().await?.contains(&name.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE};
|
||||
use script::table::{build_scripts_schema, SCRIPTS_TABLE_NAME};
|
||||
use table::requests::{CreateTableRequest, TableOptions};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_register_system_table() {
|
||||
let instance =
|
||||
crate::tests::create_distributed_instance("test_register_system_table").await;
|
||||
|
||||
let catalog_name = DEFAULT_CATALOG_NAME;
|
||||
let schema_name = DEFAULT_SCHEMA_NAME;
|
||||
let table_name = SCRIPTS_TABLE_NAME;
|
||||
let request = CreateTableRequest {
|
||||
id: 1,
|
||||
catalog_name: catalog_name.to_string(),
|
||||
schema_name: schema_name.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
desc: Some("Scripts table".to_string()),
|
||||
schema: build_scripts_schema(),
|
||||
region_numbers: vec![0],
|
||||
primary_key_indices: vec![0, 1],
|
||||
create_if_not_exists: true,
|
||||
table_options: TableOptions::default(),
|
||||
engine: MITO_ENGINE.to_string(),
|
||||
};
|
||||
|
||||
let result = instance
|
||||
.catalog_manager
|
||||
.register_system_table(RegisterSystemTableRequest {
|
||||
create_table_request: request,
|
||||
open_hook: None,
|
||||
})
|
||||
.await;
|
||||
assert!(result.is_ok());
|
||||
|
||||
assert!(
|
||||
instance
|
||||
.catalog_manager
|
||||
.table(catalog_name, schema_name, table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_some(),
|
||||
"the registered system table cannot be found in catalog"
|
||||
);
|
||||
|
||||
let mut actually_created_table_in_datanode = 0;
|
||||
for datanode in instance.datanodes.values() {
|
||||
if datanode
|
||||
.catalog_manager()
|
||||
.table(catalog_name, schema_name, table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_some()
|
||||
{
|
||||
actually_created_table_in_datanode += 1;
|
||||
}
|
||||
}
|
||||
assert_eq!(
|
||||
actually_created_table_in_datanode, 1,
|
||||
"system table should be actually created at one and only one datanode"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,12 +16,14 @@ use std::time::Duration;
|
||||
|
||||
use client::Client;
|
||||
use common_grpc::channel_manager::ChannelManager;
|
||||
use common_telemetry::info;
|
||||
use meta_client::rpc::Peer;
|
||||
use moka::future::{Cache, CacheBuilder};
|
||||
|
||||
pub struct DatanodeClients {
|
||||
channel_manager: ChannelManager,
|
||||
clients: Cache<Peer, Client>,
|
||||
started: bool,
|
||||
}
|
||||
|
||||
impl Default for DatanodeClients {
|
||||
@@ -32,11 +34,23 @@ impl Default for DatanodeClients {
|
||||
.time_to_live(Duration::from_secs(30 * 60))
|
||||
.time_to_idle(Duration::from_secs(5 * 60))
|
||||
.build(),
|
||||
started: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DatanodeClients {
|
||||
pub(crate) fn start(&mut self) {
|
||||
if self.started {
|
||||
return;
|
||||
}
|
||||
|
||||
self.channel_manager.start_channel_recycle();
|
||||
|
||||
info!("Datanode clients manager is started!");
|
||||
self.started = true;
|
||||
}
|
||||
|
||||
pub(crate) async fn get_client(&self, datanode: &Peer) -> Client {
|
||||
self.clients
|
||||
.get_with_by_ref(datanode, async move {
|
||||
@@ -48,8 +62,8 @@ impl DatanodeClients {
|
||||
.await
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) async fn insert_client(&self, datanode: Peer, client: Client) {
|
||||
#[cfg(feature = "testing")]
|
||||
pub async fn insert_client(&self, datanode: Peer, client: Client) {
|
||||
self.clients.insert(datanode, client).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -170,7 +170,13 @@ pub enum Error {
|
||||
source: meta_client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to request Meta, source: {}", source))]
|
||||
#[snafu(display("Failed to create heartbeat stream to Metasrv, source: {}", source))]
|
||||
CreateMetaHeartbeatStream {
|
||||
source: meta_client::error::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to request Metasrv, source: {}", source))]
|
||||
RequestMeta {
|
||||
#[snafu(backtrace)]
|
||||
source: meta_client::error::Error,
|
||||
@@ -620,9 +626,10 @@ impl ErrorExt for Error {
|
||||
Error::Catalog { source, .. } => source.status_code(),
|
||||
Error::CatalogEntrySerde { source, .. } => source.status_code(),
|
||||
|
||||
Error::StartMetaClient { source } | Error::RequestMeta { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::StartMetaClient { source }
|
||||
| Error::RequestMeta { source }
|
||||
| Error::CreateMetaHeartbeatStream { source, .. } => source.status_code(),
|
||||
|
||||
Error::BuildCreateExprOnInsertion { source }
|
||||
| Error::ToTableInsertRequest { source }
|
||||
| Error::ToTableDeleteRequest { source }
|
||||
|
||||
@@ -16,7 +16,11 @@ use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
use api::v1::{Column, ColumnDataType, CreateTableExpr};
|
||||
use api::v1::alter_expr::Kind;
|
||||
use api::v1::{
|
||||
AddColumn, AddColumns, AlterExpr, Column, ColumnDataType, CreateTableExpr, DropColumn,
|
||||
DropColumns, RenameTable,
|
||||
};
|
||||
use common_error::prelude::BoxedError;
|
||||
use datanode::instance::sql::table_idents_to_full_name;
|
||||
use datatypes::schema::ColumnSchema;
|
||||
@@ -25,15 +29,16 @@ use query::sql::prepare_immutable_file_table_files_and_schema;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use sql::ast::{ColumnDef, ColumnOption, TableConstraint};
|
||||
use sql::statements::column_def_to_schema;
|
||||
use sql::statements::alter::{AlterTable, AlterTableOperation};
|
||||
use sql::statements::create::{CreateExternalTable, CreateTable, TIME_INDEX};
|
||||
use sql::statements::{column_def_to_schema, sql_column_def_to_grpc_column_def};
|
||||
use sql::util::to_lowercase_options_map;
|
||||
use table::requests::{TableOptions, IMMUTABLE_TABLE_META_KEY};
|
||||
|
||||
use crate::error::{
|
||||
self, BuildCreateExprOnInsertionSnafu, ColumnDataTypeSnafu,
|
||||
ConvertColumnDefaultConstraintSnafu, IllegalPrimaryKeysDefSnafu, InvalidSqlSnafu,
|
||||
ParseSqlSnafu, Result,
|
||||
ConvertColumnDefaultConstraintSnafu, ExternalSnafu, IllegalPrimaryKeysDefSnafu,
|
||||
InvalidSqlSnafu, ParseSqlSnafu, Result,
|
||||
};
|
||||
|
||||
pub type CreateExprFactoryRef = Arc<dyn CreateExprFactory + Send + Sync>;
|
||||
@@ -117,10 +122,7 @@ pub(crate) async fn create_external_expr(
|
||||
}
|
||||
|
||||
/// Convert `CreateTable` statement to `CreateExpr` gRPC request.
|
||||
pub(crate) fn create_to_expr(
|
||||
create: &CreateTable,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<CreateTableExpr> {
|
||||
pub fn create_to_expr(create: &CreateTable, query_ctx: QueryContextRef) -> Result<CreateTableExpr> {
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(&create.name, query_ctx)
|
||||
.map_err(BoxedError::new)
|
||||
@@ -273,6 +275,50 @@ pub(crate) fn column_schemas_to_defs(
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub(crate) fn to_alter_expr(
|
||||
alter_table: AlterTable,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<AlterExpr> {
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(alter_table.table_name(), query_ctx)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
|
||||
let kind = match alter_table.alter_operation() {
|
||||
AlterTableOperation::AddConstraint(_) => {
|
||||
return error::NotSupportedSnafu {
|
||||
feat: "ADD CONSTRAINT",
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
AlterTableOperation::AddColumn { column_def } => Kind::AddColumns(AddColumns {
|
||||
add_columns: vec![AddColumn {
|
||||
column_def: Some(
|
||||
sql_column_def_to_grpc_column_def(column_def)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?,
|
||||
),
|
||||
is_key: false,
|
||||
}],
|
||||
}),
|
||||
AlterTableOperation::DropColumn { name } => Kind::DropColumns(DropColumns {
|
||||
drop_columns: vec![DropColumn {
|
||||
name: name.value.to_string(),
|
||||
}],
|
||||
}),
|
||||
AlterTableOperation::RenameTable { new_table_name } => Kind::RenameTable(RenameTable {
|
||||
new_table_name: new_table_name.to_string(),
|
||||
}),
|
||||
};
|
||||
|
||||
Ok(AlterExpr {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
kind: Some(kind),
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use session::context::QueryContext;
|
||||
|
||||
@@ -18,13 +18,10 @@ use serde::{Deserialize, Serialize};
|
||||
use servers::http::HttpOptions;
|
||||
use servers::Mode;
|
||||
|
||||
use crate::grpc::GrpcOptions;
|
||||
use crate::influxdb::InfluxdbOptions;
|
||||
use crate::mysql::MysqlOptions;
|
||||
use crate::opentsdb::OpentsdbOptions;
|
||||
use crate::postgres::PostgresOptions;
|
||||
use crate::prom::PromOptions;
|
||||
use crate::prometheus::PrometheusOptions;
|
||||
use crate::service_config::{
|
||||
GrpcOptions, InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromOptions,
|
||||
PrometheusOptions,
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
@@ -60,6 +57,12 @@ impl Default for FrontendOptions {
|
||||
}
|
||||
}
|
||||
|
||||
impl FrontendOptions {
|
||||
pub fn env_list_keys() -> Option<&'static [&'static str]> {
|
||||
Some(&["meta_client_options.metasrv_addrs"])
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
116
src/frontend/src/heartbeat.rs
Normal file
116
src/frontend/src/heartbeat.rs
Normal file
@@ -0,0 +1,116 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::meta::{HeartbeatRequest, HeartbeatResponse};
|
||||
use common_telemetry::tracing::trace;
|
||||
use common_telemetry::{error, info};
|
||||
use meta_client::client::{HeartbeatSender, HeartbeatStream, MetaClient};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error;
|
||||
use crate::error::Result;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct HeartbeatTask {
|
||||
meta_client: Arc<MetaClient>,
|
||||
report_interval: u64,
|
||||
retry_interval: u64,
|
||||
}
|
||||
|
||||
impl HeartbeatTask {
|
||||
pub fn new(meta_client: Arc<MetaClient>, report_interval: u64, retry_interval: u64) -> Self {
|
||||
HeartbeatTask {
|
||||
meta_client,
|
||||
report_interval,
|
||||
retry_interval,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start(&self) -> Result<()> {
|
||||
let (req_sender, resp_stream) = self
|
||||
.meta_client
|
||||
.heartbeat()
|
||||
.await
|
||||
.context(error::CreateMetaHeartbeatStreamSnafu)?;
|
||||
|
||||
info!("A heartbeat connection has been established with metasrv");
|
||||
|
||||
self.start_handle_resp_stream(resp_stream);
|
||||
|
||||
self.start_heartbeat_report(req_sender);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn start_handle_resp_stream(&self, mut resp_stream: HeartbeatStream) {
|
||||
let capture_self = self.clone();
|
||||
let retry_interval = self.retry_interval;
|
||||
|
||||
common_runtime::spawn_bg(async move {
|
||||
loop {
|
||||
match resp_stream.message().await {
|
||||
Ok(Some(resp)) => capture_self.handle_response(resp).await,
|
||||
Ok(None) => break,
|
||||
Err(e) => {
|
||||
error!(e; "Occur error while reading heartbeat response");
|
||||
|
||||
capture_self
|
||||
.start_with_retry(Duration::from_secs(retry_interval))
|
||||
.await;
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn start_heartbeat_report(&self, req_sender: HeartbeatSender) {
|
||||
let report_interval = self.report_interval;
|
||||
|
||||
common_runtime::spawn_bg(async move {
|
||||
loop {
|
||||
let req = HeartbeatRequest::default();
|
||||
|
||||
if let Err(e) = req_sender.send(req.clone()).await {
|
||||
error!(e; "Failed to send heartbeat to metasrv");
|
||||
break;
|
||||
} else {
|
||||
trace!("Send a heartbeat request to metasrv, content: {:?}", req);
|
||||
}
|
||||
|
||||
tokio::time::sleep(Duration::from_secs(report_interval)).await;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async fn handle_response(&self, resp: HeartbeatResponse) {
|
||||
trace!("Received a heartbeat response: {:?}", resp);
|
||||
}
|
||||
|
||||
async fn start_with_retry(&self, retry_interval: Duration) {
|
||||
loop {
|
||||
tokio::time::sleep(retry_interval).await;
|
||||
|
||||
info!("Try to re-establish the heartbeat connection to metasrv.");
|
||||
|
||||
if self.start().await.is_ok() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub(crate) mod distributed;
|
||||
pub mod distributed;
|
||||
mod grpc;
|
||||
mod influxdb;
|
||||
mod opentsdb;
|
||||
@@ -27,6 +27,7 @@ use std::time::Duration;
|
||||
use api::v1::alter_expr::Kind;
|
||||
use api::v1::ddl_request::Expr as DdlExpr;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::meta::Role;
|
||||
use api::v1::{AddColumns, AlterExpr, Column, DdlRequest, InsertRequest};
|
||||
use async_trait::async_trait;
|
||||
use catalog::remote::MetaKvBackend;
|
||||
@@ -74,6 +75,7 @@ use crate::error::{
|
||||
};
|
||||
use crate::expr_factory::{CreateExprFactoryRef, DefaultCreateExprFactory};
|
||||
use crate::frontend::FrontendOptions;
|
||||
use crate::heartbeat::HeartbeatTask;
|
||||
use crate::instance::standalone::StandaloneGrpcQueryHandler;
|
||||
use crate::metrics;
|
||||
use crate::script::ScriptExecutor;
|
||||
@@ -114,6 +116,8 @@ pub struct Instance {
|
||||
plugins: Arc<Plugins>,
|
||||
|
||||
servers: Arc<ServerHandlers>,
|
||||
|
||||
heartbeat_task: Option<HeartbeatTask>,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
@@ -128,13 +132,16 @@ impl Instance {
|
||||
});
|
||||
let table_routes = Arc::new(TableRoutes::new(meta_client.clone()));
|
||||
let partition_manager = Arc::new(PartitionRuleManager::new(table_routes));
|
||||
let datanode_clients = Arc::new(DatanodeClients::default());
|
||||
|
||||
let mut datanode_clients = DatanodeClients::default();
|
||||
datanode_clients.start();
|
||||
let datanode_clients = Arc::new(datanode_clients);
|
||||
|
||||
let mut catalog_manager =
|
||||
FrontendCatalogManager::new(meta_backend, partition_manager, datanode_clients.clone());
|
||||
|
||||
let dist_instance = DistInstance::new(
|
||||
meta_client,
|
||||
meta_client.clone(),
|
||||
Arc::new(catalog_manager.clone()),
|
||||
datanode_clients,
|
||||
);
|
||||
@@ -158,6 +165,8 @@ impl Instance {
|
||||
|
||||
plugins.insert::<StatementExecutorRef>(statement_executor.clone());
|
||||
|
||||
let heartbeat_task = Some(HeartbeatTask::new(meta_client, 5, 5));
|
||||
|
||||
Ok(Instance {
|
||||
catalog_manager,
|
||||
script_executor,
|
||||
@@ -167,6 +176,7 @@ impl Instance {
|
||||
grpc_query_handler: dist_instance,
|
||||
plugins: plugins.clone(),
|
||||
servers: Arc::new(HashMap::new()),
|
||||
heartbeat_task,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -186,11 +196,14 @@ impl Instance {
|
||||
.timeout(Duration::from_millis(meta_config.timeout_millis))
|
||||
.connect_timeout(Duration::from_millis(meta_config.connect_timeout_millis))
|
||||
.tcp_nodelay(meta_config.tcp_nodelay);
|
||||
let channel_manager = ChannelManager::with_config(channel_config);
|
||||
|
||||
let mut meta_client = MetaClientBuilder::new(0, 0)
|
||||
let mut channel_manager = ChannelManager::with_config(channel_config);
|
||||
channel_manager.start_channel_recycle();
|
||||
|
||||
let mut meta_client = MetaClientBuilder::new(0, 0, Role::Frontend)
|
||||
.enable_router()
|
||||
.enable_store()
|
||||
.enable_heartbeat()
|
||||
.channel_manager(channel_manager)
|
||||
.build();
|
||||
meta_client
|
||||
@@ -221,6 +234,7 @@ impl Instance {
|
||||
grpc_query_handler: StandaloneGrpcQueryHandler::arc(dn_instance.clone()),
|
||||
plugins: Default::default(),
|
||||
servers: Arc::new(HashMap::new()),
|
||||
heartbeat_task: None,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -231,8 +245,7 @@ impl Instance {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) async fn new_distributed(
|
||||
pub async fn new_distributed(
|
||||
catalog_manager: CatalogManagerRef,
|
||||
dist_instance: Arc<DistInstance>,
|
||||
) -> Self {
|
||||
@@ -258,6 +271,7 @@ impl Instance {
|
||||
grpc_query_handler: dist_instance,
|
||||
plugins: Default::default(),
|
||||
servers: Arc::new(HashMap::new()),
|
||||
heartbeat_task: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -418,8 +432,7 @@ impl Instance {
|
||||
.map(|_| ())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn statement_executor(&self) -> Arc<StatementExecutor> {
|
||||
pub fn statement_executor(&self) -> Arc<StatementExecutor> {
|
||||
self.statement_executor.clone()
|
||||
}
|
||||
}
|
||||
@@ -429,6 +442,10 @@ impl FrontendInstance for Instance {
|
||||
async fn start(&mut self) -> Result<()> {
|
||||
// TODO(hl): Frontend init should move to here
|
||||
|
||||
if let Some(heartbeat_task) = &self.heartbeat_task {
|
||||
heartbeat_task.start().await?;
|
||||
}
|
||||
|
||||
futures::future::try_join_all(self.servers.values().map(start_server))
|
||||
.await
|
||||
.context(error::StartServerSnafu)
|
||||
@@ -646,13 +663,9 @@ fn validate_insert_request(schema: &Schema, request: &InsertRequest) -> Result<(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::borrow::Cow;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::atomic::AtomicU32;
|
||||
|
||||
use api::v1::column::Values;
|
||||
use catalog::helper::{TableGlobalKey, TableGlobalValue};
|
||||
use common_recordbatch::RecordBatches;
|
||||
use datatypes::prelude::{ConcreteDataType, Value};
|
||||
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema};
|
||||
use query::query_engine::options::QueryOptions;
|
||||
@@ -660,9 +673,6 @@ mod tests {
|
||||
use strfmt::Format;
|
||||
|
||||
use super::*;
|
||||
use crate::table::DistTable;
|
||||
use crate::tests;
|
||||
use crate::tests::MockDistributedInstance;
|
||||
|
||||
#[test]
|
||||
fn test_validate_insert_request() {
|
||||
@@ -837,372 +847,4 @@ mod tests {
|
||||
let sql = "DESC TABLE {catalog}{schema}demo;";
|
||||
replace_test(sql, plugins, &query_ctx);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_standalone_exec_sql() {
|
||||
let standalone = tests::create_standalone_instance("test_standalone_exec_sql").await;
|
||||
let instance = standalone.instance.as_ref();
|
||||
|
||||
let sql = r#"
|
||||
CREATE TABLE demo(
|
||||
host STRING,
|
||||
ts TIMESTAMP,
|
||||
cpu DOUBLE NULL,
|
||||
memory DOUBLE NULL,
|
||||
disk_util DOUBLE DEFAULT 9.9,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(host)
|
||||
) engine=mito"#;
|
||||
create_table(instance, sql).await;
|
||||
|
||||
insert_and_query(instance).await;
|
||||
|
||||
drop_table(instance).await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_distributed_exec_sql() {
|
||||
let distributed = tests::create_distributed_instance("test_distributed_exec_sql").await;
|
||||
let instance = distributed.frontend.as_ref();
|
||||
|
||||
let sql = r#"
|
||||
CREATE TABLE demo(
|
||||
host STRING,
|
||||
ts TIMESTAMP,
|
||||
cpu DOUBLE NULL,
|
||||
memory DOUBLE NULL,
|
||||
disk_util DOUBLE DEFAULT 9.9,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(host)
|
||||
)
|
||||
PARTITION BY RANGE COLUMNS (host) (
|
||||
PARTITION r0 VALUES LESS THAN ('550-A'),
|
||||
PARTITION r1 VALUES LESS THAN ('550-W'),
|
||||
PARTITION r2 VALUES LESS THAN ('MOSS'),
|
||||
PARTITION r3 VALUES LESS THAN (MAXVALUE),
|
||||
)
|
||||
engine=mito"#;
|
||||
create_table(instance, sql).await;
|
||||
|
||||
insert_and_query(instance).await;
|
||||
|
||||
verify_data_distribution(
|
||||
&distributed,
|
||||
HashMap::from([
|
||||
(
|
||||
0u32,
|
||||
"\
|
||||
+---------------------+------+
|
||||
| ts | host |
|
||||
+---------------------+------+
|
||||
| 2013-12-31T16:00:00 | 490 |
|
||||
+---------------------+------+",
|
||||
),
|
||||
(
|
||||
1u32,
|
||||
"\
|
||||
+---------------------+-------+
|
||||
| ts | host |
|
||||
+---------------------+-------+
|
||||
| 2022-12-31T16:00:00 | 550-A |
|
||||
+---------------------+-------+",
|
||||
),
|
||||
(
|
||||
2u32,
|
||||
"\
|
||||
+---------------------+-------+
|
||||
| ts | host |
|
||||
+---------------------+-------+
|
||||
| 2023-12-31T16:00:00 | 550-W |
|
||||
+---------------------+-------+",
|
||||
),
|
||||
(
|
||||
3u32,
|
||||
"\
|
||||
+---------------------+------+
|
||||
| ts | host |
|
||||
+---------------------+------+
|
||||
| 2043-12-31T16:00:00 | MOSS |
|
||||
+---------------------+------+",
|
||||
),
|
||||
]),
|
||||
)
|
||||
.await;
|
||||
|
||||
drop_table(instance).await;
|
||||
|
||||
verify_table_is_dropped(&distributed).await;
|
||||
}
|
||||
|
||||
async fn query(instance: &Instance, sql: &str) -> Output {
|
||||
SqlQueryHandler::do_query(instance, sql, QueryContext::arc())
|
||||
.await
|
||||
.remove(0)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
async fn create_table(instance: &Instance, sql: &str) {
|
||||
let output = query(instance, sql).await;
|
||||
let Output::AffectedRows(x) = output else { unreachable!() };
|
||||
assert_eq!(x, 0);
|
||||
}
|
||||
|
||||
async fn insert_and_query(instance: &Instance) {
|
||||
let sql = r#"INSERT INTO demo(host, cpu, memory, ts) VALUES
|
||||
('490', 0.1, 1, 1388505600000),
|
||||
('550-A', 1, 100, 1672502400000),
|
||||
('550-W', 10000, 1000000, 1704038400000),
|
||||
('MOSS', 100000000, 10000000000, 2335190400000)
|
||||
"#;
|
||||
let output = query(instance, sql).await;
|
||||
let Output::AffectedRows(x) = output else { unreachable!() };
|
||||
assert_eq!(x, 4);
|
||||
|
||||
let sql = "SELECT * FROM demo WHERE ts > cast(1000000000 as timestamp) ORDER BY host"; // use nanoseconds as where condition
|
||||
let output = query(instance, sql).await;
|
||||
let Output::Stream(s) = output else { unreachable!() };
|
||||
let batches = common_recordbatch::util::collect_batches(s).await.unwrap();
|
||||
let pretty_print = batches.pretty_print().unwrap();
|
||||
let expected = "\
|
||||
+-------+---------------------+-------------+-----------+-----------+
|
||||
| host | ts | cpu | memory | disk_util |
|
||||
+-------+---------------------+-------------+-----------+-----------+
|
||||
| 490 | 2013-12-31T16:00:00 | 0.1 | 1.0 | 9.9 |
|
||||
| 550-A | 2022-12-31T16:00:00 | 1.0 | 100.0 | 9.9 |
|
||||
| 550-W | 2023-12-31T16:00:00 | 10000.0 | 1000000.0 | 9.9 |
|
||||
| MOSS | 2043-12-31T16:00:00 | 100000000.0 | 1.0e10 | 9.9 |
|
||||
+-------+---------------------+-------------+-----------+-----------+";
|
||||
assert_eq!(pretty_print, expected);
|
||||
}
|
||||
|
||||
async fn verify_data_distribution(
|
||||
instance: &MockDistributedInstance,
|
||||
expected_distribution: HashMap<u32, &str>,
|
||||
) {
|
||||
let table = instance
|
||||
.frontend
|
||||
.catalog_manager()
|
||||
.table("greptime", "public", "demo")
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let table = table.as_any().downcast_ref::<DistTable>().unwrap();
|
||||
|
||||
let TableGlobalValue { regions_id_map, .. } = table
|
||||
.table_global_value(&TableGlobalKey {
|
||||
catalog_name: "greptime".to_string(),
|
||||
schema_name: "public".to_string(),
|
||||
table_name: "demo".to_string(),
|
||||
})
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let region_to_dn_map = regions_id_map
|
||||
.iter()
|
||||
.map(|(k, v)| (v[0], *k))
|
||||
.collect::<HashMap<u32, u64>>();
|
||||
assert_eq!(region_to_dn_map.len(), expected_distribution.len());
|
||||
|
||||
let stmt = QueryLanguageParser::parse_sql("SELECT ts, host FROM demo ORDER BY ts").unwrap();
|
||||
for (region, dn) in region_to_dn_map.iter() {
|
||||
let dn = instance.datanodes.get(dn).unwrap();
|
||||
let engine = dn.query_engine();
|
||||
let plan = engine
|
||||
.planner()
|
||||
.plan(stmt.clone(), QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
let output = engine.execute(plan, QueryContext::arc()).await.unwrap();
|
||||
let Output::Stream(stream) = output else { unreachable!() };
|
||||
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
let actual = recordbatches.pretty_print().unwrap();
|
||||
|
||||
let expected = expected_distribution.get(region).unwrap();
|
||||
assert_eq!(&actual, expected);
|
||||
}
|
||||
}
|
||||
|
||||
async fn drop_table(instance: &Instance) {
|
||||
let sql = "DROP TABLE demo";
|
||||
let output = query(instance, sql).await;
|
||||
let Output::AffectedRows(x) = output else { unreachable!() };
|
||||
assert_eq!(x, 1);
|
||||
}
|
||||
|
||||
async fn verify_table_is_dropped(instance: &MockDistributedInstance) {
|
||||
for (_, dn) in instance.datanodes.iter() {
|
||||
assert!(dn
|
||||
.catalog_manager()
|
||||
.table("greptime", "public", "demo")
|
||||
.await
|
||||
.unwrap()
|
||||
.is_none())
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_sql_interceptor_plugin() {
|
||||
#[derive(Default)]
|
||||
struct AssertionHook {
|
||||
pub(crate) c: AtomicU32,
|
||||
}
|
||||
|
||||
impl SqlQueryInterceptor for AssertionHook {
|
||||
type Error = Error;
|
||||
|
||||
fn pre_parsing<'a>(
|
||||
&self,
|
||||
query: &'a str,
|
||||
_query_ctx: QueryContextRef,
|
||||
) -> Result<Cow<'a, str>> {
|
||||
self.c.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
assert!(query.starts_with("CREATE TABLE demo"));
|
||||
Ok(Cow::Borrowed(query))
|
||||
}
|
||||
|
||||
fn post_parsing(
|
||||
&self,
|
||||
statements: Vec<Statement>,
|
||||
_query_ctx: QueryContextRef,
|
||||
) -> Result<Vec<Statement>> {
|
||||
self.c.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
assert!(matches!(statements[0], Statement::CreateTable(_)));
|
||||
Ok(statements)
|
||||
}
|
||||
|
||||
fn pre_execute(
|
||||
&self,
|
||||
_statement: &Statement,
|
||||
_plan: Option<&query::plan::LogicalPlan>,
|
||||
_query_ctx: QueryContextRef,
|
||||
) -> Result<()> {
|
||||
self.c.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn post_execute(
|
||||
&self,
|
||||
mut output: Output,
|
||||
_query_ctx: QueryContextRef,
|
||||
) -> Result<Output> {
|
||||
self.c.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
match &mut output {
|
||||
Output::AffectedRows(rows) => {
|
||||
assert_eq!(*rows, 0);
|
||||
// update output result
|
||||
*rows = 10;
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
Ok(output)
|
||||
}
|
||||
}
|
||||
|
||||
let standalone = tests::create_standalone_instance("test_hook").await;
|
||||
let mut instance = standalone.instance;
|
||||
|
||||
let plugins = Plugins::new();
|
||||
let counter_hook = Arc::new(AssertionHook::default());
|
||||
plugins.insert::<SqlQueryInterceptorRef<Error>>(counter_hook.clone());
|
||||
Arc::make_mut(&mut instance).set_plugins(Arc::new(plugins));
|
||||
|
||||
let sql = r#"CREATE TABLE demo(
|
||||
host STRING,
|
||||
ts TIMESTAMP,
|
||||
cpu DOUBLE NULL,
|
||||
memory DOUBLE NULL,
|
||||
disk_util DOUBLE DEFAULT 9.9,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(host)
|
||||
) engine=mito with(regions=1);"#;
|
||||
let output = SqlQueryHandler::do_query(&*instance, sql, QueryContext::arc())
|
||||
.await
|
||||
.remove(0)
|
||||
.unwrap();
|
||||
|
||||
// assert that the hook is called 3 times
|
||||
assert_eq!(4, counter_hook.c.load(std::sync::atomic::Ordering::Relaxed));
|
||||
match output {
|
||||
Output::AffectedRows(rows) => assert_eq!(rows, 10),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_disable_db_operation_plugin() {
|
||||
#[derive(Default)]
|
||||
struct DisableDBOpHook;
|
||||
|
||||
impl SqlQueryInterceptor for DisableDBOpHook {
|
||||
type Error = Error;
|
||||
|
||||
fn post_parsing(
|
||||
&self,
|
||||
statements: Vec<Statement>,
|
||||
_query_ctx: QueryContextRef,
|
||||
) -> Result<Vec<Statement>> {
|
||||
for s in &statements {
|
||||
match s {
|
||||
Statement::CreateDatabase(_) | Statement::ShowDatabases(_) => {
|
||||
return Err(Error::NotSupported {
|
||||
feat: "Database operations".to_owned(),
|
||||
})
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(statements)
|
||||
}
|
||||
}
|
||||
|
||||
let query_ctx = Arc::new(QueryContext::new());
|
||||
|
||||
let standalone = tests::create_standalone_instance("test_db_hook").await;
|
||||
let mut instance = standalone.instance;
|
||||
|
||||
let plugins = Plugins::new();
|
||||
let hook = Arc::new(DisableDBOpHook::default());
|
||||
plugins.insert::<SqlQueryInterceptorRef<Error>>(hook.clone());
|
||||
Arc::make_mut(&mut instance).set_plugins(Arc::new(plugins));
|
||||
|
||||
let sql = r#"CREATE TABLE demo(
|
||||
host STRING,
|
||||
ts TIMESTAMP,
|
||||
cpu DOUBLE NULL,
|
||||
memory DOUBLE NULL,
|
||||
disk_util DOUBLE DEFAULT 9.9,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(host)
|
||||
) engine=mito with(regions=1);"#;
|
||||
let output = SqlQueryHandler::do_query(&*instance, sql, query_ctx.clone())
|
||||
.await
|
||||
.remove(0)
|
||||
.unwrap();
|
||||
|
||||
match output {
|
||||
Output::AffectedRows(rows) => assert_eq!(rows, 0),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
let sql = r#"CREATE DATABASE tomcat"#;
|
||||
if let Err(e) = SqlQueryHandler::do_query(&*instance, sql, query_ctx.clone())
|
||||
.await
|
||||
.remove(0)
|
||||
{
|
||||
assert!(matches!(e, error::Error::NotSupported { .. }));
|
||||
} else {
|
||||
unreachable!();
|
||||
}
|
||||
|
||||
let sql = r#"SELECT 1; SHOW DATABASES"#;
|
||||
if let Err(e) = SqlQueryHandler::do_query(&*instance, sql, query_ctx.clone())
|
||||
.await
|
||||
.remove(0)
|
||||
{
|
||||
assert!(matches!(e, error::Error::NotSupported { .. }));
|
||||
} else {
|
||||
unreachable!();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,12 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod grpc;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
use api::v1::ddl_request::Expr as DdlExpr;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::{
|
||||
column_def, AlterExpr, CreateDatabaseExpr, CreateTableExpr, DeleteRequest, DropTableExpr,
|
||||
FlushTableExpr, InsertRequest, TableId,
|
||||
@@ -46,6 +46,7 @@ use partition::manager::PartitionInfo;
|
||||
use partition::partition::{PartitionBound, PartitionDef};
|
||||
use query::error::QueryExecutionSnafu;
|
||||
use query::query_engine::SqlStatementExecutor;
|
||||
use servers::query_handler::grpc::GrpcQueryHandler;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use sql::ast::{Ident, Value as SqlValue};
|
||||
@@ -73,14 +74,14 @@ use crate::table::DistTable;
|
||||
const MAX_VALUE: &str = "MAXVALUE";
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct DistInstance {
|
||||
pub struct DistInstance {
|
||||
meta_client: Arc<MetaClient>,
|
||||
catalog_manager: Arc<FrontendCatalogManager>,
|
||||
datanode_clients: Arc<DatanodeClients>,
|
||||
}
|
||||
|
||||
impl DistInstance {
|
||||
pub(crate) fn new(
|
||||
pub fn new(
|
||||
meta_client: Arc<MetaClient>,
|
||||
catalog_manager: Arc<FrontendCatalogManager>,
|
||||
datanode_clients: Arc<DatanodeClients>,
|
||||
@@ -92,7 +93,7 @@ impl DistInstance {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn create_table(
|
||||
pub async fn create_table(
|
||||
&self,
|
||||
create_table: &mut CreateTableExpr,
|
||||
partitions: Option<Partitions>,
|
||||
@@ -340,7 +341,7 @@ impl DistInstance {
|
||||
Ok(Output::AffectedRows(0))
|
||||
}
|
||||
Statement::Alter(alter_table) => {
|
||||
let expr = grpc::to_alter_expr(alter_table, query_ctx)?;
|
||||
let expr = expr_factory::to_alter_expr(alter_table, query_ctx)?;
|
||||
self.handle_alter_table(expr).await
|
||||
}
|
||||
Statement::DropTable(stmt) => {
|
||||
@@ -579,8 +580,7 @@ impl DistInstance {
|
||||
Ok(Output::AffectedRows(affected_rows))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn catalog_manager(&self) -> Arc<FrontendCatalogManager> {
|
||||
pub fn catalog_manager(&self) -> Arc<FrontendCatalogManager> {
|
||||
self.catalog_manager.clone()
|
||||
}
|
||||
}
|
||||
@@ -599,6 +599,46 @@ impl SqlStatementExecutor for DistInstance {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl GrpcQueryHandler for DistInstance {
|
||||
type Error = error::Error;
|
||||
|
||||
async fn do_query(&self, request: Request, ctx: QueryContextRef) -> Result<Output> {
|
||||
match request {
|
||||
Request::Insert(request) => self.handle_dist_insert(request, ctx).await,
|
||||
Request::Delete(request) => self.handle_dist_delete(request, ctx).await,
|
||||
Request::Query(_) => {
|
||||
unreachable!("Query should have been handled directly in Frontend Instance!")
|
||||
}
|
||||
Request::Ddl(request) => {
|
||||
let expr = request.expr.context(error::IncompleteGrpcResultSnafu {
|
||||
err_msg: "Missing 'expr' in DDL request",
|
||||
})?;
|
||||
match expr {
|
||||
DdlExpr::CreateDatabase(expr) => self.handle_create_database(expr, ctx).await,
|
||||
DdlExpr::CreateTable(mut expr) => {
|
||||
// TODO(LFC): Support creating distributed table through GRPC interface.
|
||||
// Currently only SQL supports it; how to design the fields in CreateTableExpr?
|
||||
let _ = self.create_table(&mut expr, None).await;
|
||||
Ok(Output::AffectedRows(0))
|
||||
}
|
||||
DdlExpr::Alter(expr) => self.handle_alter_table(expr).await,
|
||||
DdlExpr::DropTable(expr) => {
|
||||
let table_name =
|
||||
TableName::new(&expr.catalog_name, &expr.schema_name, &expr.table_name);
|
||||
self.drop_table(table_name).await
|
||||
}
|
||||
DdlExpr::FlushTable(expr) => {
|
||||
let table_name =
|
||||
TableName::new(&expr.catalog_name, &expr.schema_name, &expr.table_name);
|
||||
self.flush_table(table_name, expr.region_id).await
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn create_partitions_stmt(partitions: Vec<PartitionInfo>) -> Result<Option<Partitions>> {
|
||||
if partitions.is_empty() {
|
||||
return Ok(None);
|
||||
|
||||
@@ -1,115 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use alter_expr::Kind;
|
||||
use api::v1::ddl_request::Expr as DdlExpr;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::{alter_expr, AddColumn, AddColumns, AlterExpr, DropColumn, DropColumns, RenameTable};
|
||||
use async_trait::async_trait;
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_query::Output;
|
||||
use datanode::instance::sql::table_idents_to_full_name;
|
||||
use meta_client::rpc::TableName;
|
||||
use servers::query_handler::grpc::GrpcQueryHandler;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use sql::statements::alter::{AlterTable, AlterTableOperation};
|
||||
use sql::statements::sql_column_def_to_grpc_column_def;
|
||||
|
||||
use crate::error::{self, ExternalSnafu, Result};
|
||||
use crate::instance::distributed::DistInstance;
|
||||
|
||||
#[async_trait]
|
||||
impl GrpcQueryHandler for DistInstance {
|
||||
type Error = error::Error;
|
||||
|
||||
async fn do_query(&self, request: Request, ctx: QueryContextRef) -> Result<Output> {
|
||||
match request {
|
||||
Request::Insert(request) => self.handle_dist_insert(request, ctx).await,
|
||||
Request::Delete(request) => self.handle_dist_delete(request, ctx).await,
|
||||
Request::Query(_) => {
|
||||
unreachable!("Query should have been handled directly in Frontend Instance!")
|
||||
}
|
||||
Request::Ddl(request) => {
|
||||
let expr = request.expr.context(error::IncompleteGrpcResultSnafu {
|
||||
err_msg: "Missing 'expr' in DDL request",
|
||||
})?;
|
||||
match expr {
|
||||
DdlExpr::CreateDatabase(expr) => self.handle_create_database(expr, ctx).await,
|
||||
DdlExpr::CreateTable(mut expr) => {
|
||||
// TODO(LFC): Support creating distributed table through GRPC interface.
|
||||
// Currently only SQL supports it; how to design the fields in CreateTableExpr?
|
||||
let _ = self.create_table(&mut expr, None).await;
|
||||
Ok(Output::AffectedRows(0))
|
||||
}
|
||||
DdlExpr::Alter(expr) => self.handle_alter_table(expr).await,
|
||||
DdlExpr::DropTable(expr) => {
|
||||
let table_name =
|
||||
TableName::new(&expr.catalog_name, &expr.schema_name, &expr.table_name);
|
||||
self.drop_table(table_name).await
|
||||
}
|
||||
DdlExpr::FlushTable(expr) => {
|
||||
let table_name =
|
||||
TableName::new(&expr.catalog_name, &expr.schema_name, &expr.table_name);
|
||||
self.flush_table(table_name, expr.region_id).await
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn to_alter_expr(
|
||||
alter_table: AlterTable,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<AlterExpr> {
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(alter_table.table_name(), query_ctx)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
|
||||
let kind = match alter_table.alter_operation() {
|
||||
AlterTableOperation::AddConstraint(_) => {
|
||||
return error::NotSupportedSnafu {
|
||||
feat: "ADD CONSTRAINT",
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
AlterTableOperation::AddColumn { column_def } => Kind::AddColumns(AddColumns {
|
||||
add_columns: vec![AddColumn {
|
||||
column_def: Some(
|
||||
sql_column_def_to_grpc_column_def(column_def)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?,
|
||||
),
|
||||
is_key: false,
|
||||
}],
|
||||
}),
|
||||
AlterTableOperation::DropColumn { name } => Kind::DropColumns(DropColumns {
|
||||
drop_columns: vec![DropColumn {
|
||||
name: name.value.to_string(),
|
||||
}],
|
||||
}),
|
||||
AlterTableOperation::RenameTable { new_table_name } => Kind::RenameTable(RenameTable {
|
||||
new_table_name: new_table_name.to_string(),
|
||||
}),
|
||||
};
|
||||
|
||||
Ok(AlterExpr {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
kind: Some(kind),
|
||||
})
|
||||
}
|
||||
@@ -81,845 +81,3 @@ impl GrpcQueryHandler for Instance {
|
||||
Ok(output)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::collections::HashMap;
|
||||
|
||||
use api::v1::column::{SemanticType, Values};
|
||||
use api::v1::ddl_request::Expr as DdlExpr;
|
||||
use api::v1::{
|
||||
alter_expr, AddColumn, AddColumns, AlterExpr, Column, ColumnDataType, ColumnDef,
|
||||
CreateDatabaseExpr, CreateTableExpr, DdlRequest, DeleteRequest, DropTableExpr,
|
||||
FlushTableExpr, InsertRequest, QueryRequest,
|
||||
};
|
||||
use catalog::helper::{TableGlobalKey, TableGlobalValue};
|
||||
use common_catalog::consts::MITO_ENGINE;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use query::parser::QueryLanguageParser;
|
||||
use session::context::QueryContext;
|
||||
use tests::{has_parquet_file, test_region_dir};
|
||||
|
||||
use super::*;
|
||||
use crate::table::DistTable;
|
||||
use crate::tests;
|
||||
use crate::tests::MockDistributedInstance;
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_distributed_handle_ddl_request() {
|
||||
let instance =
|
||||
tests::create_distributed_instance("test_distributed_handle_ddl_request").await;
|
||||
let frontend = &instance.frontend;
|
||||
|
||||
test_handle_ddl_request(frontend.as_ref()).await;
|
||||
|
||||
verify_table_is_dropped(&instance).await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_standalone_handle_ddl_request() {
|
||||
let standalone =
|
||||
tests::create_standalone_instance("test_standalone_handle_ddl_request").await;
|
||||
let instance = &standalone.instance;
|
||||
|
||||
test_handle_ddl_request(instance.as_ref()).await;
|
||||
}
|
||||
|
||||
async fn query(instance: &Instance, request: Request) -> Output {
|
||||
GrpcQueryHandler::do_query(instance, request, QueryContext::arc())
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
async fn test_handle_ddl_request(instance: &Instance) {
|
||||
let request = Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::CreateDatabase(CreateDatabaseExpr {
|
||||
database_name: "database_created_through_grpc".to_string(),
|
||||
create_if_not_exists: true,
|
||||
})),
|
||||
});
|
||||
let output = query(instance, request).await;
|
||||
assert!(matches!(output, Output::AffectedRows(1)));
|
||||
|
||||
let request = Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::CreateTable(CreateTableExpr {
|
||||
catalog_name: "greptime".to_string(),
|
||||
schema_name: "database_created_through_grpc".to_string(),
|
||||
table_name: "table_created_through_grpc".to_string(),
|
||||
column_defs: vec![
|
||||
ColumnDef {
|
||||
name: "a".to_string(),
|
||||
datatype: ColumnDataType::String as _,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
},
|
||||
ColumnDef {
|
||||
name: "ts".to_string(),
|
||||
datatype: ColumnDataType::TimestampMillisecond as _,
|
||||
is_nullable: false,
|
||||
default_constraint: vec![],
|
||||
},
|
||||
],
|
||||
time_index: "ts".to_string(),
|
||||
engine: MITO_ENGINE.to_string(),
|
||||
..Default::default()
|
||||
})),
|
||||
});
|
||||
let output = query(instance, request).await;
|
||||
assert!(matches!(output, Output::AffectedRows(0)));
|
||||
|
||||
let request = Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::Alter(AlterExpr {
|
||||
catalog_name: "greptime".to_string(),
|
||||
schema_name: "database_created_through_grpc".to_string(),
|
||||
table_name: "table_created_through_grpc".to_string(),
|
||||
kind: Some(alter_expr::Kind::AddColumns(AddColumns {
|
||||
add_columns: vec![AddColumn {
|
||||
column_def: Some(ColumnDef {
|
||||
name: "b".to_string(),
|
||||
datatype: ColumnDataType::Int32 as _,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
}),
|
||||
is_key: false,
|
||||
}],
|
||||
})),
|
||||
})),
|
||||
});
|
||||
let output = query(instance, request).await;
|
||||
assert!(matches!(output, Output::AffectedRows(0)));
|
||||
|
||||
let request = Request::Query(QueryRequest {
|
||||
query: Some(Query::Sql("INSERT INTO database_created_through_grpc.table_created_through_grpc (a, b, ts) VALUES ('s', 1, 1672816466000)".to_string()))
|
||||
});
|
||||
let output = query(instance, request).await;
|
||||
assert!(matches!(output, Output::AffectedRows(1)));
|
||||
|
||||
let request = Request::Query(QueryRequest {
|
||||
query: Some(Query::Sql(
|
||||
"SELECT ts, a, b FROM database_created_through_grpc.table_created_through_grpc"
|
||||
.to_string(),
|
||||
)),
|
||||
});
|
||||
let output = query(instance, request).await;
|
||||
let Output::Stream(stream) = output else { unreachable!() };
|
||||
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
let expected = "\
|
||||
+---------------------+---+---+
|
||||
| ts | a | b |
|
||||
+---------------------+---+---+
|
||||
| 2023-01-04T07:14:26 | s | 1 |
|
||||
+---------------------+---+---+";
|
||||
assert_eq!(recordbatches.pretty_print().unwrap(), expected);
|
||||
|
||||
let request = Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::DropTable(DropTableExpr {
|
||||
catalog_name: "greptime".to_string(),
|
||||
schema_name: "database_created_through_grpc".to_string(),
|
||||
table_name: "table_created_through_grpc".to_string(),
|
||||
})),
|
||||
});
|
||||
let output = query(instance, request).await;
|
||||
assert!(matches!(output, Output::AffectedRows(1)));
|
||||
}
|
||||
|
||||
async fn verify_table_is_dropped(instance: &MockDistributedInstance) {
|
||||
for (_, dn) in instance.datanodes.iter() {
|
||||
assert!(dn
|
||||
.catalog_manager()
|
||||
.table(
|
||||
"greptime",
|
||||
"database_created_through_grpc",
|
||||
"table_created_through_grpc"
|
||||
)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_none());
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_distributed_insert_delete_and_query() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let instance =
|
||||
tests::create_distributed_instance("test_distributed_insert_delete_and_query").await;
|
||||
let frontend = instance.frontend.as_ref();
|
||||
|
||||
let table_name = "my_dist_table";
|
||||
let sql = format!(
|
||||
r"
|
||||
CREATE TABLE {table_name} (
|
||||
a INT,
|
||||
b STRING PRIMARY KEY,
|
||||
ts TIMESTAMP,
|
||||
TIME INDEX (ts)
|
||||
) PARTITION BY RANGE COLUMNS(a) (
|
||||
PARTITION r0 VALUES LESS THAN (10),
|
||||
PARTITION r1 VALUES LESS THAN (20),
|
||||
PARTITION r2 VALUES LESS THAN (50),
|
||||
PARTITION r3 VALUES LESS THAN (MAXVALUE),
|
||||
)"
|
||||
);
|
||||
create_table(frontend, sql).await;
|
||||
|
||||
test_insert_delete_and_query_on_existing_table(frontend, table_name).await;
|
||||
|
||||
verify_data_distribution(
|
||||
&instance,
|
||||
table_name,
|
||||
HashMap::from([
|
||||
(
|
||||
0u32,
|
||||
"\
|
||||
+---------------------+---+-------------------+
|
||||
| ts | a | b |
|
||||
+---------------------+---+-------------------+
|
||||
| 2023-01-01T07:26:12 | 1 | ts: 1672557972000 |
|
||||
| 2023-01-01T07:26:14 | 3 | ts: 1672557974000 |
|
||||
| 2023-01-01T07:26:15 | 4 | ts: 1672557975000 |
|
||||
| 2023-01-01T07:26:16 | 5 | ts: 1672557976000 |
|
||||
| 2023-01-01T07:26:17 | | ts: 1672557977000 |
|
||||
+---------------------+---+-------------------+",
|
||||
),
|
||||
(
|
||||
1u32,
|
||||
"\
|
||||
+---------------------+----+-------------------+
|
||||
| ts | a | b |
|
||||
+---------------------+----+-------------------+
|
||||
| 2023-01-01T07:26:18 | 11 | ts: 1672557978000 |
|
||||
+---------------------+----+-------------------+",
|
||||
),
|
||||
(
|
||||
2u32,
|
||||
"\
|
||||
+---------------------+----+-------------------+
|
||||
| ts | a | b |
|
||||
+---------------------+----+-------------------+
|
||||
| 2023-01-01T07:26:20 | 20 | ts: 1672557980000 |
|
||||
| 2023-01-01T07:26:21 | 21 | ts: 1672557981000 |
|
||||
| 2023-01-01T07:26:23 | 23 | ts: 1672557983000 |
|
||||
+---------------------+----+-------------------+",
|
||||
),
|
||||
(
|
||||
3u32,
|
||||
"\
|
||||
+---------------------+----+-------------------+
|
||||
| ts | a | b |
|
||||
+---------------------+----+-------------------+
|
||||
| 2023-01-01T07:26:24 | 50 | ts: 1672557984000 |
|
||||
| 2023-01-01T07:26:25 | 51 | ts: 1672557985000 |
|
||||
| 2023-01-01T07:26:27 | 53 | ts: 1672557987000 |
|
||||
+---------------------+----+-------------------+",
|
||||
),
|
||||
]),
|
||||
)
|
||||
.await;
|
||||
|
||||
test_insert_delete_and_query_on_auto_created_table(frontend).await;
|
||||
|
||||
// Auto created table has only one region.
|
||||
verify_data_distribution(
|
||||
&instance,
|
||||
"auto_created_table",
|
||||
HashMap::from([(
|
||||
0u32,
|
||||
"\
|
||||
+---------------------+---+---+
|
||||
| ts | a | b |
|
||||
+---------------------+---+---+
|
||||
| 2023-01-01T07:26:16 | | |
|
||||
| 2023-01-01T07:26:17 | 6 | |
|
||||
| 2023-01-01T07:26:18 | | x |
|
||||
| 2023-01-01T07:26:20 | | z |
|
||||
+---------------------+---+---+",
|
||||
)]),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_standalone_insert_and_query() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let standalone =
|
||||
tests::create_standalone_instance("test_standalone_insert_and_query").await;
|
||||
let instance = &standalone.instance;
|
||||
|
||||
let table_name = "my_table";
|
||||
let sql = format!("CREATE TABLE {table_name} (a INT, b STRING, ts TIMESTAMP, TIME INDEX (ts), PRIMARY KEY (a, b))");
|
||||
create_table(instance, sql).await;
|
||||
|
||||
test_insert_delete_and_query_on_existing_table(instance, table_name).await;
|
||||
|
||||
test_insert_delete_and_query_on_auto_created_table(instance).await
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_distributed_flush_table() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let instance = tests::create_distributed_instance("test_distributed_flush_table").await;
|
||||
let data_tmp_dirs = instance.data_tmp_dirs();
|
||||
let frontend = instance.frontend.as_ref();
|
||||
|
||||
let table_name = "my_dist_table";
|
||||
let sql = format!(
|
||||
r"
|
||||
CREATE TABLE {table_name} (
|
||||
a INT,
|
||||
ts TIMESTAMP,
|
||||
TIME INDEX (ts)
|
||||
) PARTITION BY RANGE COLUMNS(a) (
|
||||
PARTITION r0 VALUES LESS THAN (10),
|
||||
PARTITION r1 VALUES LESS THAN (20),
|
||||
PARTITION r2 VALUES LESS THAN (50),
|
||||
PARTITION r3 VALUES LESS THAN (MAXVALUE),
|
||||
)"
|
||||
);
|
||||
create_table(frontend, sql).await;
|
||||
|
||||
test_insert_delete_and_query_on_existing_table(frontend, table_name).await;
|
||||
|
||||
flush_table(frontend, "greptime", "public", table_name, None).await;
|
||||
// Wait for previous task finished
|
||||
flush_table(frontend, "greptime", "public", table_name, None).await;
|
||||
|
||||
let table = instance
|
||||
.frontend
|
||||
.catalog_manager()
|
||||
.table("greptime", "public", table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let table = table.as_any().downcast_ref::<DistTable>().unwrap();
|
||||
|
||||
let tgv = table
|
||||
.table_global_value(&TableGlobalKey {
|
||||
catalog_name: "greptime".to_string(),
|
||||
schema_name: "public".to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
})
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let table_id = tgv.table_id();
|
||||
|
||||
let region_to_dn_map = tgv
|
||||
.regions_id_map
|
||||
.iter()
|
||||
.map(|(k, v)| (v[0], *k))
|
||||
.collect::<HashMap<u32, u64>>();
|
||||
|
||||
for (region, dn) in region_to_dn_map.iter() {
|
||||
// data_tmp_dirs -> dn: 1..4
|
||||
let data_tmp_dir = data_tmp_dirs.get((*dn - 1) as usize).unwrap();
|
||||
let region_dir = test_region_dir(
|
||||
data_tmp_dir.path().to_str().unwrap(),
|
||||
"greptime",
|
||||
"public",
|
||||
table_id,
|
||||
*region,
|
||||
);
|
||||
has_parquet_file(®ion_dir);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_standalone_flush_table() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let standalone = tests::create_standalone_instance("test_standalone_flush_table").await;
|
||||
let instance = &standalone.instance;
|
||||
let data_tmp_dir = standalone.data_tmp_dir();
|
||||
|
||||
let table_name = "my_table";
|
||||
let sql = format!("CREATE TABLE {table_name} (a INT, b STRING, ts TIMESTAMP, TIME INDEX (ts), PRIMARY KEY (a, b))");
|
||||
|
||||
create_table(instance, sql).await;
|
||||
|
||||
test_insert_delete_and_query_on_existing_table(instance, table_name).await;
|
||||
|
||||
let table_id = 1024;
|
||||
let region_id = 0;
|
||||
let region_dir = test_region_dir(
|
||||
data_tmp_dir.path().to_str().unwrap(),
|
||||
"greptime",
|
||||
"public",
|
||||
table_id,
|
||||
region_id,
|
||||
);
|
||||
assert!(!has_parquet_file(®ion_dir));
|
||||
|
||||
flush_table(instance, "greptime", "public", "my_table", None).await;
|
||||
// Wait for previous task finished
|
||||
flush_table(instance, "greptime", "public", "my_table", None).await;
|
||||
|
||||
assert!(has_parquet_file(®ion_dir));
|
||||
}
|
||||
|
||||
async fn create_table(frontend: &Instance, sql: String) {
|
||||
let request = Request::Query(QueryRequest {
|
||||
query: Some(Query::Sql(sql)),
|
||||
});
|
||||
let output = query(frontend, request).await;
|
||||
assert!(matches!(output, Output::AffectedRows(0)));
|
||||
}
|
||||
|
||||
async fn flush_table(
|
||||
frontend: &Instance,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
region_id: Option<u32>,
|
||||
) {
|
||||
let request = Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::FlushTable(FlushTableExpr {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
schema_name: schema_name.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
region_id,
|
||||
})),
|
||||
});
|
||||
|
||||
let output = query(frontend, request).await;
|
||||
assert!(matches!(output, Output::AffectedRows(0)));
|
||||
}
|
||||
|
||||
async fn test_insert_delete_and_query_on_existing_table(instance: &Instance, table_name: &str) {
|
||||
let ts_millisecond_values = vec![
|
||||
1672557972000,
|
||||
1672557973000,
|
||||
1672557974000,
|
||||
1672557975000,
|
||||
1672557976000,
|
||||
1672557977000,
|
||||
1672557978000,
|
||||
1672557979000,
|
||||
1672557980000,
|
||||
1672557981000,
|
||||
1672557982000,
|
||||
1672557983000,
|
||||
1672557984000,
|
||||
1672557985000,
|
||||
1672557986000,
|
||||
1672557987000,
|
||||
];
|
||||
let insert = InsertRequest {
|
||||
table_name: table_name.to_string(),
|
||||
columns: vec![
|
||||
Column {
|
||||
column_name: "a".to_string(),
|
||||
values: Some(Values {
|
||||
i32_values: vec![1, 2, 3, 4, 5, 11, 12, 20, 21, 22, 23, 50, 51, 52, 53],
|
||||
..Default::default()
|
||||
}),
|
||||
null_mask: vec![32, 0],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
datatype: ColumnDataType::Int32 as i32,
|
||||
},
|
||||
Column {
|
||||
column_name: "b".to_string(),
|
||||
values: Some(Values {
|
||||
string_values: ts_millisecond_values
|
||||
.iter()
|
||||
.map(|x| format!("ts: {x}"))
|
||||
.collect(),
|
||||
..Default::default()
|
||||
}),
|
||||
semantic_type: SemanticType::Tag as i32,
|
||||
datatype: ColumnDataType::String as i32,
|
||||
..Default::default()
|
||||
},
|
||||
Column {
|
||||
column_name: "ts".to_string(),
|
||||
values: Some(Values {
|
||||
ts_millisecond_values,
|
||||
..Default::default()
|
||||
}),
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
datatype: ColumnDataType::TimestampMillisecond as i32,
|
||||
..Default::default()
|
||||
},
|
||||
],
|
||||
row_count: 16,
|
||||
..Default::default()
|
||||
};
|
||||
let output = query(instance, Request::Insert(insert)).await;
|
||||
assert!(matches!(output, Output::AffectedRows(16)));
|
||||
|
||||
let request = Request::Query(QueryRequest {
|
||||
query: Some(Query::Sql(format!(
|
||||
"SELECT ts, a, b FROM {table_name} ORDER BY ts"
|
||||
))),
|
||||
});
|
||||
let output = query(instance, request.clone()).await;
|
||||
let Output::Stream(stream) = output else { unreachable!() };
|
||||
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
let expected = "\
|
||||
+---------------------+----+-------------------+
|
||||
| ts | a | b |
|
||||
+---------------------+----+-------------------+
|
||||
| 2023-01-01T07:26:12 | 1 | ts: 1672557972000 |
|
||||
| 2023-01-01T07:26:13 | 2 | ts: 1672557973000 |
|
||||
| 2023-01-01T07:26:14 | 3 | ts: 1672557974000 |
|
||||
| 2023-01-01T07:26:15 | 4 | ts: 1672557975000 |
|
||||
| 2023-01-01T07:26:16 | 5 | ts: 1672557976000 |
|
||||
| 2023-01-01T07:26:17 | | ts: 1672557977000 |
|
||||
| 2023-01-01T07:26:18 | 11 | ts: 1672557978000 |
|
||||
| 2023-01-01T07:26:19 | 12 | ts: 1672557979000 |
|
||||
| 2023-01-01T07:26:20 | 20 | ts: 1672557980000 |
|
||||
| 2023-01-01T07:26:21 | 21 | ts: 1672557981000 |
|
||||
| 2023-01-01T07:26:22 | 22 | ts: 1672557982000 |
|
||||
| 2023-01-01T07:26:23 | 23 | ts: 1672557983000 |
|
||||
| 2023-01-01T07:26:24 | 50 | ts: 1672557984000 |
|
||||
| 2023-01-01T07:26:25 | 51 | ts: 1672557985000 |
|
||||
| 2023-01-01T07:26:26 | 52 | ts: 1672557986000 |
|
||||
| 2023-01-01T07:26:27 | 53 | ts: 1672557987000 |
|
||||
+---------------------+----+-------------------+";
|
||||
assert_eq!(recordbatches.pretty_print().unwrap(), expected);
|
||||
|
||||
let delete = DeleteRequest {
|
||||
table_name: table_name.to_string(),
|
||||
region_number: 0,
|
||||
key_columns: vec![
|
||||
Column {
|
||||
column_name: "a".to_string(),
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
values: Some(Values {
|
||||
i32_values: vec![2, 12, 22, 52],
|
||||
..Default::default()
|
||||
}),
|
||||
datatype: ColumnDataType::Int32 as i32,
|
||||
..Default::default()
|
||||
},
|
||||
Column {
|
||||
column_name: "b".to_string(),
|
||||
semantic_type: SemanticType::Tag as i32,
|
||||
values: Some(Values {
|
||||
string_values: vec![
|
||||
"ts: 1672557973000".to_string(),
|
||||
"ts: 1672557979000".to_string(),
|
||||
"ts: 1672557982000".to_string(),
|
||||
"ts: 1672557986000".to_string(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
datatype: ColumnDataType::String as i32,
|
||||
..Default::default()
|
||||
},
|
||||
Column {
|
||||
column_name: "ts".to_string(),
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
values: Some(Values {
|
||||
ts_millisecond_values: vec![
|
||||
1672557973000,
|
||||
1672557979000,
|
||||
1672557982000,
|
||||
1672557986000,
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
datatype: ColumnDataType::TimestampMillisecond as i32,
|
||||
..Default::default()
|
||||
},
|
||||
],
|
||||
row_count: 4,
|
||||
};
|
||||
let output = query(instance, Request::Delete(delete)).await;
|
||||
assert!(matches!(output, Output::AffectedRows(4)));
|
||||
|
||||
let output = query(instance, request).await;
|
||||
let Output::Stream(stream) = output else { unreachable!() };
|
||||
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
let expected = "\
|
||||
+---------------------+----+-------------------+
|
||||
| ts | a | b |
|
||||
+---------------------+----+-------------------+
|
||||
| 2023-01-01T07:26:12 | 1 | ts: 1672557972000 |
|
||||
| 2023-01-01T07:26:14 | 3 | ts: 1672557974000 |
|
||||
| 2023-01-01T07:26:15 | 4 | ts: 1672557975000 |
|
||||
| 2023-01-01T07:26:16 | 5 | ts: 1672557976000 |
|
||||
| 2023-01-01T07:26:17 | | ts: 1672557977000 |
|
||||
| 2023-01-01T07:26:18 | 11 | ts: 1672557978000 |
|
||||
| 2023-01-01T07:26:20 | 20 | ts: 1672557980000 |
|
||||
| 2023-01-01T07:26:21 | 21 | ts: 1672557981000 |
|
||||
| 2023-01-01T07:26:23 | 23 | ts: 1672557983000 |
|
||||
| 2023-01-01T07:26:24 | 50 | ts: 1672557984000 |
|
||||
| 2023-01-01T07:26:25 | 51 | ts: 1672557985000 |
|
||||
| 2023-01-01T07:26:27 | 53 | ts: 1672557987000 |
|
||||
+---------------------+----+-------------------+";
|
||||
assert_eq!(recordbatches.pretty_print().unwrap(), expected);
|
||||
}
|
||||
|
||||
async fn verify_data_distribution(
|
||||
instance: &MockDistributedInstance,
|
||||
table_name: &str,
|
||||
expected_distribution: HashMap<u32, &str>,
|
||||
) {
|
||||
let table = instance
|
||||
.frontend
|
||||
.catalog_manager()
|
||||
.table("greptime", "public", table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let table = table.as_any().downcast_ref::<DistTable>().unwrap();
|
||||
|
||||
let TableGlobalValue { regions_id_map, .. } = table
|
||||
.table_global_value(&TableGlobalKey {
|
||||
catalog_name: "greptime".to_string(),
|
||||
schema_name: "public".to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
})
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let region_to_dn_map = regions_id_map
|
||||
.iter()
|
||||
.map(|(k, v)| (v[0], *k))
|
||||
.collect::<HashMap<u32, u64>>();
|
||||
assert_eq!(region_to_dn_map.len(), expected_distribution.len());
|
||||
|
||||
for (region, dn) in region_to_dn_map.iter() {
|
||||
let stmt = QueryLanguageParser::parse_sql(&format!(
|
||||
"SELECT ts, a, b FROM {table_name} ORDER BY ts"
|
||||
))
|
||||
.unwrap();
|
||||
let dn = instance.datanodes.get(dn).unwrap();
|
||||
let engine = dn.query_engine();
|
||||
let plan = engine
|
||||
.planner()
|
||||
.plan(stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
let output = engine.execute(plan, QueryContext::arc()).await.unwrap();
|
||||
let Output::Stream(stream) = output else { unreachable!() };
|
||||
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
let actual = recordbatches.pretty_print().unwrap();
|
||||
|
||||
let expected = expected_distribution.get(region).unwrap();
|
||||
assert_eq!(&actual, expected);
|
||||
}
|
||||
}
|
||||
|
||||
async fn test_insert_delete_and_query_on_auto_created_table(instance: &Instance) {
|
||||
let insert = InsertRequest {
|
||||
table_name: "auto_created_table".to_string(),
|
||||
columns: vec![
|
||||
Column {
|
||||
column_name: "a".to_string(),
|
||||
values: Some(Values {
|
||||
i32_values: vec![4, 6],
|
||||
..Default::default()
|
||||
}),
|
||||
null_mask: vec![2],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
datatype: ColumnDataType::Int32 as i32,
|
||||
},
|
||||
Column {
|
||||
column_name: "ts".to_string(),
|
||||
values: Some(Values {
|
||||
ts_millisecond_values: vec![1672557975000, 1672557976000, 1672557977000],
|
||||
..Default::default()
|
||||
}),
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
datatype: ColumnDataType::TimestampMillisecond as i32,
|
||||
..Default::default()
|
||||
},
|
||||
],
|
||||
row_count: 3,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Test auto create not existed table upon insertion.
|
||||
let request = Request::Insert(insert);
|
||||
let output = query(instance, request).await;
|
||||
assert!(matches!(output, Output::AffectedRows(3)));
|
||||
|
||||
let insert = InsertRequest {
|
||||
table_name: "auto_created_table".to_string(),
|
||||
columns: vec![
|
||||
Column {
|
||||
column_name: "b".to_string(),
|
||||
values: Some(Values {
|
||||
string_values: vec!["x".to_string(), "z".to_string()],
|
||||
..Default::default()
|
||||
}),
|
||||
null_mask: vec![2],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
datatype: ColumnDataType::String as i32,
|
||||
},
|
||||
Column {
|
||||
column_name: "ts".to_string(),
|
||||
values: Some(Values {
|
||||
ts_millisecond_values: vec![1672557978000, 1672557979000, 1672557980000],
|
||||
..Default::default()
|
||||
}),
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
datatype: ColumnDataType::TimestampMillisecond as i32,
|
||||
..Default::default()
|
||||
},
|
||||
],
|
||||
row_count: 3,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Test auto add not existed column upon insertion.
|
||||
let request = Request::Insert(insert);
|
||||
let output = query(instance, request).await;
|
||||
assert!(matches!(output, Output::AffectedRows(3)));
|
||||
|
||||
let request = Request::Query(QueryRequest {
|
||||
query: Some(Query::Sql(
|
||||
"SELECT ts, a, b FROM auto_created_table".to_string(),
|
||||
)),
|
||||
});
|
||||
let output = query(instance, request.clone()).await;
|
||||
let Output::Stream(stream) = output else { unreachable!() };
|
||||
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
let expected = "\
|
||||
+---------------------+---+---+
|
||||
| ts | a | b |
|
||||
+---------------------+---+---+
|
||||
| 2023-01-01T07:26:15 | 4 | |
|
||||
| 2023-01-01T07:26:16 | | |
|
||||
| 2023-01-01T07:26:17 | 6 | |
|
||||
| 2023-01-01T07:26:18 | | x |
|
||||
| 2023-01-01T07:26:19 | | |
|
||||
| 2023-01-01T07:26:20 | | z |
|
||||
+---------------------+---+---+";
|
||||
assert_eq!(recordbatches.pretty_print().unwrap(), expected);
|
||||
|
||||
let delete = DeleteRequest {
|
||||
table_name: "auto_created_table".to_string(),
|
||||
region_number: 0,
|
||||
key_columns: vec![Column {
|
||||
column_name: "ts".to_string(),
|
||||
values: Some(Values {
|
||||
ts_millisecond_values: vec![1672557975000, 1672557979000],
|
||||
..Default::default()
|
||||
}),
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
datatype: ColumnDataType::TimestampMillisecond as i32,
|
||||
..Default::default()
|
||||
}],
|
||||
row_count: 2,
|
||||
};
|
||||
|
||||
let output = query(instance, Request::Delete(delete)).await;
|
||||
assert!(matches!(output, Output::AffectedRows(2)));
|
||||
|
||||
let output = query(instance, request).await;
|
||||
let Output::Stream(stream) = output else { unreachable!() };
|
||||
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
let expected = "\
|
||||
+---------------------+---+---+
|
||||
| ts | a | b |
|
||||
+---------------------+---+---+
|
||||
| 2023-01-01T07:26:16 | | |
|
||||
| 2023-01-01T07:26:17 | 6 | |
|
||||
| 2023-01-01T07:26:18 | | x |
|
||||
| 2023-01-01T07:26:20 | | z |
|
||||
+---------------------+---+---+";
|
||||
assert_eq!(recordbatches.pretty_print().unwrap(), expected);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_promql_query() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let standalone = tests::create_standalone_instance("test_standalone_promql_query").await;
|
||||
let instance = &standalone.instance;
|
||||
|
||||
let table_name = "my_table";
|
||||
let sql = format!("CREATE TABLE {table_name} (h string, a double, ts TIMESTAMP, TIME INDEX (ts), PRIMARY KEY(h))");
|
||||
create_table(instance, sql).await;
|
||||
|
||||
let insert = InsertRequest {
|
||||
table_name: table_name.to_string(),
|
||||
columns: vec![
|
||||
Column {
|
||||
column_name: "h".to_string(),
|
||||
values: Some(Values {
|
||||
string_values: vec![
|
||||
"t".to_string(),
|
||||
"t".to_string(),
|
||||
"t".to_string(),
|
||||
"t".to_string(),
|
||||
"t".to_string(),
|
||||
"t".to_string(),
|
||||
"t".to_string(),
|
||||
"t".to_string(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
semantic_type: SemanticType::Tag as i32,
|
||||
datatype: ColumnDataType::String as i32,
|
||||
..Default::default()
|
||||
},
|
||||
Column {
|
||||
column_name: "a".to_string(),
|
||||
values: Some(Values {
|
||||
f64_values: vec![1f64, 11f64, 20f64, 22f64, 50f64, 55f64, 99f64],
|
||||
..Default::default()
|
||||
}),
|
||||
null_mask: vec![4],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
},
|
||||
Column {
|
||||
column_name: "ts".to_string(),
|
||||
values: Some(Values {
|
||||
ts_millisecond_values: vec![
|
||||
1672557972000,
|
||||
1672557973000,
|
||||
1672557974000,
|
||||
1672557975000,
|
||||
1672557976000,
|
||||
1672557977000,
|
||||
1672557978000,
|
||||
1672557979000,
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
datatype: ColumnDataType::TimestampMillisecond as i32,
|
||||
..Default::default()
|
||||
},
|
||||
],
|
||||
row_count: 8,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let request = Request::Insert(insert);
|
||||
let output = query(instance, request).await;
|
||||
assert!(matches!(output, Output::AffectedRows(8)));
|
||||
|
||||
let request = Request::Query(QueryRequest {
|
||||
query: Some(Query::PromRangeQuery(api::v1::PromRangeQuery {
|
||||
query: "my_table".to_owned(),
|
||||
start: "1672557973".to_owned(),
|
||||
end: "1672557978".to_owned(),
|
||||
step: "1s".to_owned(),
|
||||
})),
|
||||
});
|
||||
let output = query(instance, request).await;
|
||||
let Output::Stream(stream) = output else { unreachable!() };
|
||||
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
let expected = "\
|
||||
+---+------+---------------------+
|
||||
| h | a | ts |
|
||||
+---+------+---------------------+
|
||||
| t | 11.0 | 2023-01-01T07:26:13 |
|
||||
| t | | 2023-01-01T07:26:14 |
|
||||
| t | 20.0 | 2023-01-01T07:26:15 |
|
||||
| t | 22.0 | 2023-01-01T07:26:16 |
|
||||
| t | 50.0 | 2023-01-01T07:26:17 |
|
||||
| t | 55.0 | 2023-01-01T07:26:18 |
|
||||
+---+------+---------------------+";
|
||||
assert_eq!(recordbatches.pretty_print().unwrap(), expected);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,65 +36,3 @@ impl InfluxdbLineProtocolHandler for Instance {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::Output;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use servers::query_handler::sql::SqlQueryHandler;
|
||||
use session::context::QueryContext;
|
||||
|
||||
use super::*;
|
||||
use crate::tests;
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_standalone_put_influxdb_lines() {
|
||||
let standalone =
|
||||
tests::create_standalone_instance("test_standalone_put_influxdb_lines").await;
|
||||
let instance = &standalone.instance;
|
||||
|
||||
test_put_influxdb_lines(instance).await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_distributed_put_influxdb_lines() {
|
||||
let instance =
|
||||
tests::create_distributed_instance("test_distributed_put_influxdb_lines").await;
|
||||
let instance = &instance.frontend;
|
||||
|
||||
test_put_influxdb_lines(instance).await;
|
||||
}
|
||||
|
||||
async fn test_put_influxdb_lines(instance: &Arc<Instance>) {
|
||||
let lines = r"
|
||||
monitor1,host=host1 cpu=66.6,memory=1024 1663840496100023100
|
||||
monitor1,host=host2 memory=1027 1663840496400340001";
|
||||
let request = InfluxdbRequest {
|
||||
precision: None,
|
||||
lines: lines.to_string(),
|
||||
};
|
||||
instance.exec(&request, QueryContext::arc()).await.unwrap();
|
||||
|
||||
let mut output = instance
|
||||
.do_query(
|
||||
"SELECT ts, host, cpu, memory FROM monitor1 ORDER BY ts",
|
||||
QueryContext::arc(),
|
||||
)
|
||||
.await;
|
||||
let output = output.remove(0).unwrap();
|
||||
let Output::Stream(stream) = output else { unreachable!() };
|
||||
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
assert_eq!(
|
||||
recordbatches.pretty_print().unwrap(),
|
||||
"\
|
||||
+-------------------------+-------+------+--------+
|
||||
| ts | host | cpu | memory |
|
||||
+-------------------------+-------+------+--------+
|
||||
| 2022-09-22T09:54:56.100 | host1 | 66.6 | 1024.0 |
|
||||
| 2022-09-22T09:54:56.400 | host2 | | 1027.0 |
|
||||
+-------------------------+-------+------+--------+"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,95 +35,3 @@ impl OpentsdbProtocolHandler for Instance {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::Output;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use itertools::Itertools;
|
||||
use servers::query_handler::sql::SqlQueryHandler;
|
||||
use session::context::QueryContext;
|
||||
|
||||
use super::*;
|
||||
use crate::tests;
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_standalone_exec() {
|
||||
let standalone = tests::create_standalone_instance("test_standalone_exec").await;
|
||||
let instance = &standalone.instance;
|
||||
|
||||
test_exec(instance).await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_distributed_exec() {
|
||||
let distributed = tests::create_distributed_instance("test_distributed_exec").await;
|
||||
let instance = &distributed.frontend;
|
||||
|
||||
test_exec(instance).await;
|
||||
}
|
||||
|
||||
async fn test_exec(instance: &Arc<Instance>) {
|
||||
let ctx = QueryContext::arc();
|
||||
let data_point1 = DataPoint::new(
|
||||
"my_metric_1".to_string(),
|
||||
1000,
|
||||
1.0,
|
||||
vec![
|
||||
("tagk1".to_string(), "tagv1".to_string()),
|
||||
("tagk2".to_string(), "tagv2".to_string()),
|
||||
],
|
||||
);
|
||||
// should create new table "my_metric_1" directly
|
||||
let result = instance.exec(&data_point1, ctx.clone()).await;
|
||||
assert!(result.is_ok());
|
||||
|
||||
let data_point2 = DataPoint::new(
|
||||
"my_metric_1".to_string(),
|
||||
2000,
|
||||
2.0,
|
||||
vec![
|
||||
("tagk2".to_string(), "tagv2".to_string()),
|
||||
("tagk3".to_string(), "tagv3".to_string()),
|
||||
],
|
||||
);
|
||||
// should create new column "tagk3" directly
|
||||
let result = instance.exec(&data_point2, ctx.clone()).await;
|
||||
assert!(result.is_ok());
|
||||
|
||||
let data_point3 = DataPoint::new("my_metric_1".to_string(), 3000, 3.0, vec![]);
|
||||
// should handle null tags properly
|
||||
let result = instance.exec(&data_point3, ctx.clone()).await;
|
||||
assert!(result.is_ok());
|
||||
|
||||
let output = instance
|
||||
.do_query(
|
||||
"select * from my_metric_1 order by greptime_timestamp",
|
||||
Arc::new(QueryContext::new()),
|
||||
)
|
||||
.await
|
||||
.remove(0)
|
||||
.unwrap();
|
||||
match output {
|
||||
Output::Stream(stream) => {
|
||||
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
let pretty_print = recordbatches.pretty_print().unwrap();
|
||||
let expected = vec![
|
||||
"+---------------------+----------------+-------+-------+-------+",
|
||||
"| greptime_timestamp | greptime_value | tagk1 | tagk2 | tagk3 |",
|
||||
"+---------------------+----------------+-------+-------+-------+",
|
||||
"| 1970-01-01T00:00:01 | 1.0 | tagv1 | tagv2 | |",
|
||||
"| 1970-01-01T00:00:02 | 2.0 | | tagv2 | tagv3 |",
|
||||
"| 1970-01-01T00:00:03 | 3.0 | | | |",
|
||||
"+---------------------+----------------+-------+-------+-------+",
|
||||
]
|
||||
.into_iter()
|
||||
.join("\n");
|
||||
assert_eq!(pretty_print, expected);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -153,168 +153,3 @@ impl PrometheusProtocolHandler for Instance {
|
||||
todo!();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::prometheus::remote::label_matcher::Type as MatcherType;
|
||||
use api::prometheus::remote::{Label, LabelMatcher, Sample};
|
||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||
use servers::query_handler::sql::SqlQueryHandler;
|
||||
use session::context::QueryContext;
|
||||
|
||||
use super::*;
|
||||
use crate::tests;
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_standalone_prometheus_remote_rw() {
|
||||
let standalone =
|
||||
tests::create_standalone_instance("test_standalone_prometheus_remote_rw").await;
|
||||
let instance = &standalone.instance;
|
||||
|
||||
test_prometheus_remote_rw(instance).await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_distributed_prometheus_remote_rw() {
|
||||
let distributed =
|
||||
tests::create_distributed_instance("test_distributed_prometheus_remote_rw").await;
|
||||
let instance = &distributed.frontend;
|
||||
|
||||
test_prometheus_remote_rw(instance).await;
|
||||
}
|
||||
|
||||
async fn test_prometheus_remote_rw(instance: &Arc<Instance>) {
|
||||
let write_request = WriteRequest {
|
||||
timeseries: prometheus::mock_timeseries(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let db = "prometheus";
|
||||
let ctx = Arc::new(QueryContext::with(DEFAULT_CATALOG_NAME, db));
|
||||
|
||||
assert!(SqlQueryHandler::do_query(
|
||||
instance.as_ref(),
|
||||
"CREATE DATABASE IF NOT EXISTS prometheus",
|
||||
ctx.clone(),
|
||||
)
|
||||
.await
|
||||
.get(0)
|
||||
.unwrap()
|
||||
.is_ok());
|
||||
|
||||
instance.write(write_request, ctx.clone()).await.unwrap();
|
||||
|
||||
let read_request = ReadRequest {
|
||||
queries: vec![
|
||||
Query {
|
||||
start_timestamp_ms: 1000,
|
||||
end_timestamp_ms: 2000,
|
||||
matchers: vec![LabelMatcher {
|
||||
name: prometheus::METRIC_NAME_LABEL.to_string(),
|
||||
value: "metric1".to_string(),
|
||||
r#type: 0,
|
||||
}],
|
||||
..Default::default()
|
||||
},
|
||||
Query {
|
||||
start_timestamp_ms: 1000,
|
||||
end_timestamp_ms: 3000,
|
||||
matchers: vec![
|
||||
LabelMatcher {
|
||||
name: prometheus::METRIC_NAME_LABEL.to_string(),
|
||||
value: "metric3".to_string(),
|
||||
r#type: 0,
|
||||
},
|
||||
LabelMatcher {
|
||||
name: "app".to_string(),
|
||||
value: "biz".to_string(),
|
||||
r#type: MatcherType::Eq as i32,
|
||||
},
|
||||
],
|
||||
..Default::default()
|
||||
},
|
||||
],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let resp = instance.read(read_request, ctx).await.unwrap();
|
||||
assert_eq!(resp.content_type, "application/x-protobuf");
|
||||
assert_eq!(resp.content_encoding, "snappy");
|
||||
let body = prometheus::snappy_decompress(&resp.body).unwrap();
|
||||
let read_response = ReadResponse::decode(&body[..]).unwrap();
|
||||
let query_results = read_response.results;
|
||||
assert_eq!(2, query_results.len());
|
||||
|
||||
assert_eq!(1, query_results[0].timeseries.len());
|
||||
let timeseries = &query_results[0].timeseries[0];
|
||||
|
||||
assert_eq!(
|
||||
vec![
|
||||
Label {
|
||||
name: prometheus::METRIC_NAME_LABEL.to_string(),
|
||||
value: "metric1".to_string(),
|
||||
},
|
||||
Label {
|
||||
name: "job".to_string(),
|
||||
value: "spark".to_string(),
|
||||
},
|
||||
],
|
||||
timeseries.labels
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
timeseries.samples,
|
||||
vec![
|
||||
Sample {
|
||||
value: 1.0,
|
||||
timestamp: 1000,
|
||||
},
|
||||
Sample {
|
||||
value: 2.0,
|
||||
timestamp: 2000,
|
||||
}
|
||||
]
|
||||
);
|
||||
|
||||
assert_eq!(1, query_results[1].timeseries.len());
|
||||
let timeseries = &query_results[1].timeseries[0];
|
||||
|
||||
assert_eq!(
|
||||
vec![
|
||||
Label {
|
||||
name: prometheus::METRIC_NAME_LABEL.to_string(),
|
||||
value: "metric3".to_string(),
|
||||
},
|
||||
Label {
|
||||
name: "idc".to_string(),
|
||||
value: "z002".to_string(),
|
||||
},
|
||||
Label {
|
||||
name: "app".to_string(),
|
||||
value: "biz".to_string(),
|
||||
},
|
||||
],
|
||||
timeseries.labels
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
timeseries.samples,
|
||||
vec![
|
||||
Sample {
|
||||
value: 5.0,
|
||||
timestamp: 1000,
|
||||
},
|
||||
Sample {
|
||||
value: 6.0,
|
||||
timestamp: 2000,
|
||||
},
|
||||
Sample {
|
||||
value: 7.0,
|
||||
timestamp: 3000,
|
||||
}
|
||||
]
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,25 +18,13 @@
|
||||
pub mod catalog;
|
||||
pub mod datanode;
|
||||
pub mod error;
|
||||
mod expr_factory;
|
||||
pub mod expr_factory;
|
||||
pub mod frontend;
|
||||
pub mod grpc;
|
||||
pub mod influxdb;
|
||||
pub mod heartbeat;
|
||||
pub mod instance;
|
||||
pub(crate) mod metrics;
|
||||
pub mod mysql;
|
||||
pub mod opentsdb;
|
||||
pub mod postgres;
|
||||
pub mod prom;
|
||||
pub mod prometheus;
|
||||
mod script;
|
||||
mod server;
|
||||
pub mod service_config;
|
||||
pub mod statement;
|
||||
mod table;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
#[cfg(test)]
|
||||
// allowed because https://docs.rs/rstest_reuse/0.5.0/rstest_reuse/#use-rstest_reuse-at-the-top-of-your-crate
|
||||
#[allow(clippy::single_component_path_imports)]
|
||||
use rstest_reuse;
|
||||
pub mod table;
|
||||
|
||||
@@ -37,9 +37,8 @@ use snafu::ResultExt;
|
||||
use crate::error::Error::StartServer;
|
||||
use crate::error::{self, Result};
|
||||
use crate::frontend::FrontendOptions;
|
||||
use crate::influxdb::InfluxdbOptions;
|
||||
use crate::instance::FrontendInstance;
|
||||
use crate::prometheus::PrometheusOptions;
|
||||
use crate::service_config::{InfluxdbOptions, PrometheusOptions};
|
||||
|
||||
pub(crate) struct Services;
|
||||
|
||||
|
||||
29
src/frontend/src/service_config.rs
Normal file
29
src/frontend/src/service_config.rs
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod grpc;
|
||||
pub mod influxdb;
|
||||
pub mod mysql;
|
||||
pub mod opentsdb;
|
||||
pub mod postgres;
|
||||
pub mod prom;
|
||||
pub mod prometheus;
|
||||
|
||||
pub use grpc::GrpcOptions;
|
||||
pub use influxdb::InfluxdbOptions;
|
||||
pub use mysql::MysqlOptions;
|
||||
pub use opentsdb::OpentsdbOptions;
|
||||
pub use postgres::PostgresOptions;
|
||||
pub use prom::PromOptions;
|
||||
pub use prometheus::PrometheusOptions;
|
||||
@@ -59,7 +59,7 @@ impl StatementExecutor {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn execute_stmt(
|
||||
pub async fn execute_stmt(
|
||||
&self,
|
||||
stmt: QueryStatement,
|
||||
query_ctx: QueryContextRef,
|
||||
|
||||
@@ -12,20 +12,20 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use common_query::Output;
|
||||
use query::parser::{PromQuery, QueryLanguageParser};
|
||||
use query::parser::{PromQuery, QueryLanguageParser, ANALYZE_NODE_NAME, EXPLAIN_NODE_NAME};
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::ResultExt;
|
||||
use sql::statements::tql::Tql;
|
||||
|
||||
use crate::error::{
|
||||
ExecLogicalPlanSnafu, NotSupportedSnafu, ParseQuerySnafu, PlanStatementSnafu, Result,
|
||||
};
|
||||
use crate::error::{ExecLogicalPlanSnafu, ParseQuerySnafu, PlanStatementSnafu, Result};
|
||||
use crate::statement::StatementExecutor;
|
||||
|
||||
impl StatementExecutor {
|
||||
pub(super) async fn execute_tql(&self, tql: Tql, query_ctx: QueryContextRef) -> Result<Output> {
|
||||
let plan = match tql {
|
||||
let stmt = match tql {
|
||||
Tql::Eval(eval) => {
|
||||
let promql = PromQuery {
|
||||
start: eval.start,
|
||||
@@ -33,20 +33,39 @@ impl StatementExecutor {
|
||||
step: eval.step,
|
||||
query: eval.query,
|
||||
};
|
||||
let stmt = QueryLanguageParser::parse_promql(&promql).context(ParseQuerySnafu)?;
|
||||
self.query_engine
|
||||
.planner()
|
||||
.plan(stmt, query_ctx.clone())
|
||||
.await
|
||||
.context(PlanStatementSnafu)?
|
||||
QueryLanguageParser::parse_promql(&promql).context(ParseQuerySnafu)?
|
||||
}
|
||||
Tql::Explain(_) => {
|
||||
return NotSupportedSnafu {
|
||||
feat: "TQL EXPLAIN",
|
||||
}
|
||||
.fail()
|
||||
Tql::Explain(explain) => {
|
||||
let promql = PromQuery {
|
||||
query: explain.query,
|
||||
..PromQuery::default()
|
||||
};
|
||||
let params = HashMap::from([("name".to_string(), EXPLAIN_NODE_NAME.to_string())]);
|
||||
QueryLanguageParser::parse_promql(&promql)
|
||||
.context(ParseQuerySnafu)?
|
||||
.post_process(params)
|
||||
.unwrap()
|
||||
}
|
||||
Tql::Analyze(tql_analyze) => {
|
||||
let promql = PromQuery {
|
||||
start: tql_analyze.start,
|
||||
end: tql_analyze.end,
|
||||
step: tql_analyze.step,
|
||||
query: tql_analyze.query,
|
||||
};
|
||||
let params = HashMap::from([("name".to_string(), ANALYZE_NODE_NAME.to_string())]);
|
||||
QueryLanguageParser::parse_promql(&promql)
|
||||
.context(ParseQuerySnafu)?
|
||||
.post_process(params)
|
||||
.unwrap()
|
||||
}
|
||||
};
|
||||
let plan = self
|
||||
.query_engine
|
||||
.planner()
|
||||
.plan(stmt, query_ctx.clone())
|
||||
.await
|
||||
.context(PlanStatementSnafu)?;
|
||||
self.query_engine
|
||||
.execute(plan, query_ctx)
|
||||
.await
|
||||
|
||||
@@ -217,7 +217,7 @@ impl Table for DistTable {
|
||||
}
|
||||
|
||||
impl DistTable {
|
||||
pub(crate) fn new(
|
||||
pub fn new(
|
||||
table_name: TableName,
|
||||
table_info: TableInfoRef,
|
||||
partition_manager: PartitionRuleManagerRef,
|
||||
@@ -233,7 +233,7 @@ impl DistTable {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn table_global_value(
|
||||
pub async fn table_global_value(
|
||||
&self,
|
||||
key: &TableGlobalKey,
|
||||
) -> Result<Option<TableGlobalValue>> {
|
||||
@@ -496,25 +496,10 @@ mod test {
|
||||
use std::collections::HashMap;
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
|
||||
use api::v1::column::SemanticType;
|
||||
use api::v1::{column, Column, ColumnDataType, InsertRequest as GrpcInsertRequest};
|
||||
use catalog::error::Result;
|
||||
use catalog::remote::{KvBackend, ValueIter};
|
||||
use common_query::physical_plan::DfPhysicalPlanAdapter;
|
||||
use common_query::DfPhysicalPlan;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::coalesce_partitions::CoalescePartitionsExec;
|
||||
use datafusion::physical_plan::expressions::{col as physical_col, PhysicalSortExpr};
|
||||
use datafusion::physical_plan::sorts::sort::SortExec;
|
||||
use datafusion::prelude::SessionContext;
|
||||
use datafusion::sql::sqlparser;
|
||||
use datafusion_expr::expr_fn::{and, binary_expr, col, or};
|
||||
use datafusion_expr::{lit, Operator};
|
||||
use datanode::instance::Instance;
|
||||
use datatypes::arrow::compute::SortOptions;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, Schema};
|
||||
use itertools::Itertools;
|
||||
use meta_client::client::MetaClient;
|
||||
use meta_client::rpc::router::RegionRoute;
|
||||
use meta_client::rpc::{Region, Table, TableRoute};
|
||||
@@ -528,15 +513,10 @@ mod test {
|
||||
use partition::range::RangePartitionRule;
|
||||
use partition::route::TableRoutes;
|
||||
use partition::PartitionRuleRef;
|
||||
use session::context::QueryContext;
|
||||
use sql::parser::ParserContext;
|
||||
use sql::statements::statement::Statement;
|
||||
use store_api::storage::RegionNumber;
|
||||
use table::metadata::{TableInfoBuilder, TableMetaBuilder};
|
||||
use table::{meter_insert_request, TableRef};
|
||||
use table::meter_insert_request;
|
||||
|
||||
use super::*;
|
||||
use crate::expr_factory;
|
||||
|
||||
struct DummyKvBackend;
|
||||
|
||||
@@ -745,321 +725,6 @@ mod test {
|
||||
assert_eq!(range_columns_rule.regions(), &vec![1, 2, 3]);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_dist_table_scan() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let table = Arc::new(new_dist_table("test_dist_table_scan").await);
|
||||
// should scan all regions
|
||||
// select a, row_id from numbers
|
||||
let projection = Some(vec![1, 2]);
|
||||
let filters = vec![];
|
||||
let expected_output = vec![
|
||||
"+-----+--------+",
|
||||
"| a | row_id |",
|
||||
"+-----+--------+",
|
||||
"| 0 | 1 |",
|
||||
"| 1 | 2 |",
|
||||
"| 2 | 3 |",
|
||||
"| 3 | 4 |",
|
||||
"| 4 | 5 |",
|
||||
"| 10 | 1 |",
|
||||
"| 11 | 2 |",
|
||||
"| 12 | 3 |",
|
||||
"| 13 | 4 |",
|
||||
"| 14 | 5 |",
|
||||
"| 30 | 1 |",
|
||||
"| 31 | 2 |",
|
||||
"| 32 | 3 |",
|
||||
"| 33 | 4 |",
|
||||
"| 34 | 5 |",
|
||||
"| 100 | 1 |",
|
||||
"| 101 | 2 |",
|
||||
"| 102 | 3 |",
|
||||
"| 103 | 4 |",
|
||||
"| 104 | 5 |",
|
||||
"+-----+--------+",
|
||||
];
|
||||
exec_table_scan(table.clone(), projection, filters, 4, expected_output).await;
|
||||
|
||||
// should scan only region 1
|
||||
// select a, row_id from numbers where a < 10
|
||||
let projection = Some(vec![1, 2]);
|
||||
let filters = vec![binary_expr(col("a"), Operator::Lt, lit(10)).into()];
|
||||
let expected_output = vec![
|
||||
"+---+--------+",
|
||||
"| a | row_id |",
|
||||
"+---+--------+",
|
||||
"| 0 | 1 |",
|
||||
"| 1 | 2 |",
|
||||
"| 2 | 3 |",
|
||||
"| 3 | 4 |",
|
||||
"| 4 | 5 |",
|
||||
"+---+--------+",
|
||||
];
|
||||
exec_table_scan(table.clone(), projection, filters, 1, expected_output).await;
|
||||
|
||||
// should scan region 1 and 2
|
||||
// select a, row_id from numbers where a < 15
|
||||
let projection = Some(vec![1, 2]);
|
||||
let filters = vec![binary_expr(col("a"), Operator::Lt, lit(15)).into()];
|
||||
let expected_output = vec![
|
||||
"+----+--------+",
|
||||
"| a | row_id |",
|
||||
"+----+--------+",
|
||||
"| 0 | 1 |",
|
||||
"| 1 | 2 |",
|
||||
"| 2 | 3 |",
|
||||
"| 3 | 4 |",
|
||||
"| 4 | 5 |",
|
||||
"| 10 | 1 |",
|
||||
"| 11 | 2 |",
|
||||
"| 12 | 3 |",
|
||||
"| 13 | 4 |",
|
||||
"| 14 | 5 |",
|
||||
"+----+--------+",
|
||||
];
|
||||
exec_table_scan(table.clone(), projection, filters, 2, expected_output).await;
|
||||
|
||||
// should scan region 2 and 3
|
||||
// select a, row_id from numbers where a < 40 and a >= 10
|
||||
let projection = Some(vec![1, 2]);
|
||||
let filters = vec![and(
|
||||
binary_expr(col("a"), Operator::Lt, lit(40)),
|
||||
binary_expr(col("a"), Operator::GtEq, lit(10)),
|
||||
)
|
||||
.into()];
|
||||
let expected_output = vec![
|
||||
"+----+--------+",
|
||||
"| a | row_id |",
|
||||
"+----+--------+",
|
||||
"| 10 | 1 |",
|
||||
"| 11 | 2 |",
|
||||
"| 12 | 3 |",
|
||||
"| 13 | 4 |",
|
||||
"| 14 | 5 |",
|
||||
"| 30 | 1 |",
|
||||
"| 31 | 2 |",
|
||||
"| 32 | 3 |",
|
||||
"| 33 | 4 |",
|
||||
"| 34 | 5 |",
|
||||
"+----+--------+",
|
||||
];
|
||||
exec_table_scan(table.clone(), projection, filters, 2, expected_output).await;
|
||||
|
||||
// should scan all regions
|
||||
// select a, row_id from numbers where a < 1000 and row_id == 1
|
||||
let projection = Some(vec![1, 2]);
|
||||
let filters = vec![and(
|
||||
binary_expr(col("a"), Operator::Lt, lit(1000)),
|
||||
binary_expr(col("row_id"), Operator::Eq, lit(1)),
|
||||
)
|
||||
.into()];
|
||||
let expected_output = vec![
|
||||
"+-----+--------+",
|
||||
"| a | row_id |",
|
||||
"+-----+--------+",
|
||||
"| 0 | 1 |",
|
||||
"| 10 | 1 |",
|
||||
"| 30 | 1 |",
|
||||
"| 100 | 1 |",
|
||||
"+-----+--------+",
|
||||
];
|
||||
exec_table_scan(table.clone(), projection, filters, 4, expected_output).await;
|
||||
}
|
||||
|
||||
async fn exec_table_scan(
|
||||
table: TableRef,
|
||||
projection: Option<Vec<usize>>,
|
||||
filters: Vec<Expr>,
|
||||
expected_partitions: usize,
|
||||
expected_output: Vec<&str>,
|
||||
) {
|
||||
let expected_output = expected_output.into_iter().join("\n");
|
||||
let table_scan = table
|
||||
.scan(projection.as_ref(), filters.as_slice(), None)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
table_scan.output_partitioning().partition_count(),
|
||||
expected_partitions
|
||||
);
|
||||
|
||||
let merge =
|
||||
CoalescePartitionsExec::new(Arc::new(DfPhysicalPlanAdapter(table_scan.clone())));
|
||||
|
||||
let sort = SortExec::new(
|
||||
vec![PhysicalSortExpr {
|
||||
expr: physical_col("a", table_scan.schema().arrow_schema()).unwrap(),
|
||||
options: SortOptions::default(),
|
||||
}],
|
||||
Arc::new(merge),
|
||||
)
|
||||
.with_fetch(None);
|
||||
assert_eq!(sort.output_partitioning().partition_count(), 1);
|
||||
|
||||
let session_ctx = SessionContext::new();
|
||||
let stream = sort.execute(0, session_ctx.task_ctx()).unwrap();
|
||||
let stream = Box::pin(RecordBatchStreamAdapter::try_new(stream).unwrap());
|
||||
|
||||
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
assert_eq!(recordbatches.pretty_print().unwrap(), expected_output);
|
||||
}
|
||||
|
||||
async fn new_dist_table(test_name: &str) -> DistTable {
|
||||
let column_schemas = vec![
|
||||
ColumnSchema::new("ts", ConcreteDataType::int64_datatype(), false),
|
||||
ColumnSchema::new("a", ConcreteDataType::int32_datatype(), true),
|
||||
ColumnSchema::new("row_id", ConcreteDataType::int32_datatype(), true),
|
||||
];
|
||||
let schema = Arc::new(Schema::new(column_schemas.clone()));
|
||||
|
||||
let instance = crate::tests::create_distributed_instance(test_name).await;
|
||||
let dist_instance = &instance.dist_instance;
|
||||
let datanode_instances = instance.datanodes;
|
||||
|
||||
let catalog_manager = dist_instance.catalog_manager();
|
||||
let partition_manager = catalog_manager.partition_manager();
|
||||
let datanode_clients = catalog_manager.datanode_clients();
|
||||
|
||||
let table_name = TableName::new("greptime", "public", "dist_numbers");
|
||||
|
||||
let sql = "
|
||||
CREATE TABLE greptime.public.dist_numbers (
|
||||
ts BIGINT,
|
||||
a INT,
|
||||
row_id INT,
|
||||
TIME INDEX (ts),
|
||||
)
|
||||
PARTITION BY RANGE COLUMNS (a) (
|
||||
PARTITION r0 VALUES LESS THAN (10),
|
||||
PARTITION r1 VALUES LESS THAN (20),
|
||||
PARTITION r2 VALUES LESS THAN (50),
|
||||
PARTITION r3 VALUES LESS THAN (MAXVALUE),
|
||||
)
|
||||
ENGINE=mito";
|
||||
|
||||
let create_table =
|
||||
match ParserContext::create_with_dialect(sql, &sqlparser::dialect::GenericDialect {})
|
||||
.unwrap()
|
||||
.pop()
|
||||
.unwrap()
|
||||
{
|
||||
Statement::CreateTable(c) => c,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let mut expr = expr_factory::create_to_expr(&create_table, QueryContext::arc()).unwrap();
|
||||
let _result = dist_instance
|
||||
.create_table(&mut expr, create_table.partitions)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let table_route = partition_manager
|
||||
.find_table_route(&table_name)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut region_to_datanode_mapping = HashMap::new();
|
||||
for region_route in table_route.region_routes.iter() {
|
||||
let region_id = region_route.region.id as u32;
|
||||
let datanode_id = region_route.leader_peer.as_ref().unwrap().id;
|
||||
region_to_datanode_mapping.insert(region_id, datanode_id);
|
||||
}
|
||||
|
||||
let mut global_start_ts = 1;
|
||||
let regional_numbers = vec![
|
||||
(0, (0..5).collect::<Vec<i32>>()),
|
||||
(1, (10..15).collect::<Vec<i32>>()),
|
||||
(2, (30..35).collect::<Vec<i32>>()),
|
||||
(3, (100..105).collect::<Vec<i32>>()),
|
||||
];
|
||||
for (region_number, numbers) in regional_numbers {
|
||||
let datanode_id = *region_to_datanode_mapping.get(®ion_number).unwrap();
|
||||
let instance = datanode_instances.get(&datanode_id).unwrap().clone();
|
||||
|
||||
let start_ts = global_start_ts;
|
||||
global_start_ts += numbers.len() as i64;
|
||||
|
||||
insert_testing_data(
|
||||
&table_name,
|
||||
instance.clone(),
|
||||
numbers,
|
||||
start_ts,
|
||||
region_number,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
let meta = TableMetaBuilder::default()
|
||||
.schema(schema)
|
||||
.primary_key_indices(vec![])
|
||||
.next_column_id(1)
|
||||
.build()
|
||||
.unwrap();
|
||||
let table_info = TableInfoBuilder::default()
|
||||
.name(&table_name.table_name)
|
||||
.meta(meta)
|
||||
.build()
|
||||
.unwrap();
|
||||
DistTable {
|
||||
table_name,
|
||||
table_info: Arc::new(table_info),
|
||||
partition_manager,
|
||||
datanode_clients,
|
||||
backend: catalog_manager.backend(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn insert_testing_data(
|
||||
table_name: &TableName,
|
||||
dn_instance: Arc<Instance>,
|
||||
data: Vec<i32>,
|
||||
start_ts: i64,
|
||||
region_number: RegionNumber,
|
||||
) {
|
||||
let row_count = data.len() as u32;
|
||||
let columns = vec![
|
||||
Column {
|
||||
column_name: "ts".to_string(),
|
||||
values: Some(column::Values {
|
||||
i64_values: (start_ts..start_ts + row_count as i64).collect::<Vec<i64>>(),
|
||||
..Default::default()
|
||||
}),
|
||||
datatype: ColumnDataType::Int64 as i32,
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
..Default::default()
|
||||
},
|
||||
Column {
|
||||
column_name: "a".to_string(),
|
||||
values: Some(column::Values {
|
||||
i32_values: data,
|
||||
..Default::default()
|
||||
}),
|
||||
datatype: ColumnDataType::Int32 as i32,
|
||||
..Default::default()
|
||||
},
|
||||
Column {
|
||||
column_name: "row_id".to_string(),
|
||||
values: Some(column::Values {
|
||||
i32_values: (1..=row_count as i32).collect::<Vec<i32>>(),
|
||||
..Default::default()
|
||||
}),
|
||||
datatype: ColumnDataType::Int32 as i32,
|
||||
..Default::default()
|
||||
},
|
||||
];
|
||||
let request = GrpcInsertRequest {
|
||||
table_name: table_name.table_name.clone(),
|
||||
columns,
|
||||
row_count,
|
||||
region_number,
|
||||
};
|
||||
dn_instance
|
||||
.handle_insert(request, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_find_regions() {
|
||||
let partition_manager = Arc::new(PartitionRuleManager::new(Arc::new(TableRoutes::new(
|
||||
|
||||
@@ -67,6 +67,19 @@ pub enum Error {
|
||||
source: raft_engine::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Cannot override compacted entry, namespace: {}, first index: {}, attempt index: {}",
|
||||
namespace,
|
||||
first_index,
|
||||
attempt_index
|
||||
))]
|
||||
OverrideCompactedEntry {
|
||||
namespace: u64,
|
||||
first_index: u64,
|
||||
attempt_index: u64,
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
|
||||
@@ -75,14 +75,16 @@ impl LogStore for NoopLogStore {
|
||||
_id: Id,
|
||||
) -> Result<store_api::logstore::entry_stream::SendableEntryStream<'_, Self::Entry, Self::Error>>
|
||||
{
|
||||
todo!()
|
||||
Ok(Box::pin(futures::stream::once(futures::future::ready(Ok(
|
||||
vec![],
|
||||
)))))
|
||||
}
|
||||
|
||||
async fn create_namespace(&mut self, _ns: &Self::Namespace) -> Result<()> {
|
||||
async fn create_namespace(&self, _ns: &Self::Namespace) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn delete_namespace(&mut self, _ns: &Self::Namespace) -> Result<()> {
|
||||
async fn delete_namespace(&self, _ns: &Self::Namespace) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -126,7 +128,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_noop_logstore() {
|
||||
let mut store = NoopLogStore::default();
|
||||
let store = NoopLogStore::default();
|
||||
let e = store.entry("".as_bytes(), 1, NamespaceImpl::default());
|
||||
store.append(e.clone()).await.unwrap();
|
||||
store
|
||||
|
||||
@@ -26,6 +26,7 @@ use store_api::logstore::namespace::Namespace as NamespaceTrait;
|
||||
use store_api::logstore::{AppendResponse, LogStore};
|
||||
|
||||
use crate::config::LogConfig;
|
||||
use crate::error;
|
||||
use crate::error::{
|
||||
AddEntryLogBatchSnafu, Error, FetchEntrySnafu, IllegalNamespaceSnafu, IllegalStateSnafu,
|
||||
RaftEngineSnafu, StartGcTaskSnafu, StopGcTaskSnafu,
|
||||
@@ -107,6 +108,13 @@ impl RaftEngineLogStore {
|
||||
.start(common_runtime::bg_runtime())
|
||||
.context(StartGcTaskSnafu)
|
||||
}
|
||||
|
||||
fn span(&self, namespace: &<Self as LogStore>::Namespace) -> (Option<u64>, Option<u64>) {
|
||||
(
|
||||
self.engine.first_index(namespace.id()),
|
||||
self.engine.last_index(namespace.id()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for RaftEngineLogStore {
|
||||
@@ -132,11 +140,23 @@ impl LogStore for RaftEngineLogStore {
|
||||
async fn append(&self, e: Self::Entry) -> Result<AppendResponse, Self::Error> {
|
||||
ensure!(self.started(), IllegalStateSnafu);
|
||||
let entry_id = e.id;
|
||||
let namespace_id = e.namespace_id;
|
||||
let mut batch = LogBatch::with_capacity(1);
|
||||
batch
|
||||
.add_entries::<MessageType>(e.namespace_id, &[e])
|
||||
.add_entries::<MessageType>(namespace_id, &[e])
|
||||
.context(AddEntryLogBatchSnafu)?;
|
||||
|
||||
if let Some(first_index) = self.engine.first_index(namespace_id) {
|
||||
ensure!(
|
||||
entry_id >= first_index,
|
||||
error::OverrideCompactedEntrySnafu {
|
||||
namespace: namespace_id,
|
||||
first_index,
|
||||
attempt_index: entry_id,
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
self.engine
|
||||
.write(&mut batch, self.config.sync_write)
|
||||
.context(RaftEngineSnafu)?;
|
||||
@@ -151,11 +171,38 @@ impl LogStore for RaftEngineLogStore {
|
||||
entries: Vec<Self::Entry>,
|
||||
) -> Result<Vec<Id>, Self::Error> {
|
||||
ensure!(self.started(), IllegalStateSnafu);
|
||||
let entry_ids = entries.iter().map(Entry::get_id).collect::<Vec<_>>();
|
||||
if entries.is_empty() {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
let mut min_entry_id = u64::MAX;
|
||||
let entry_ids = entries
|
||||
.iter()
|
||||
.map(|e| {
|
||||
let id = e.get_id();
|
||||
if id < min_entry_id {
|
||||
min_entry_id = id;
|
||||
}
|
||||
id
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut batch = LogBatch::with_capacity(entries.len());
|
||||
batch
|
||||
.add_entries::<MessageType>(ns.id, &entries)
|
||||
.context(AddEntryLogBatchSnafu)?;
|
||||
|
||||
if let Some(first_index) = self.engine.first_index(ns.id) {
|
||||
ensure!(
|
||||
min_entry_id >= first_index,
|
||||
error::OverrideCompactedEntrySnafu {
|
||||
namespace: ns.id,
|
||||
first_index,
|
||||
attempt_index: min_entry_id,
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
self.engine
|
||||
.write(&mut batch, self.config.sync_write)
|
||||
.context(RaftEngineSnafu)?;
|
||||
@@ -175,6 +222,12 @@ impl LogStore for RaftEngineLogStore {
|
||||
let last_index = engine.last_index(ns.id).unwrap_or(0);
|
||||
let mut start_index = id.max(engine.first_index(ns.id).unwrap_or(last_index + 1));
|
||||
|
||||
info!(
|
||||
"Read logstore, namespace: {}, start: {}, span: {:?}",
|
||||
ns.id(),
|
||||
id,
|
||||
self.span(ns)
|
||||
);
|
||||
let max_batch_size = self.config.read_batch_size;
|
||||
let (tx, mut rx) = tokio::sync::mpsc::channel(max_batch_size);
|
||||
let ns = ns.clone();
|
||||
@@ -220,7 +273,7 @@ impl LogStore for RaftEngineLogStore {
|
||||
Ok(Box::pin(s))
|
||||
}
|
||||
|
||||
async fn create_namespace(&mut self, ns: &Self::Namespace) -> Result<(), Self::Error> {
|
||||
async fn create_namespace(&self, ns: &Self::Namespace) -> Result<(), Self::Error> {
|
||||
ensure!(
|
||||
ns.id != SYSTEM_NAMESPACE,
|
||||
IllegalNamespaceSnafu { ns: ns.id }
|
||||
@@ -237,7 +290,7 @@ impl LogStore for RaftEngineLogStore {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn delete_namespace(&mut self, ns: &Self::Namespace) -> Result<(), Self::Error> {
|
||||
async fn delete_namespace(&self, ns: &Self::Namespace) -> Result<(), Self::Error> {
|
||||
ensure!(
|
||||
ns.id != SYSTEM_NAMESPACE,
|
||||
IllegalNamespaceSnafu { ns: ns.id }
|
||||
@@ -290,9 +343,11 @@ impl LogStore for RaftEngineLogStore {
|
||||
ensure!(self.started(), IllegalStateSnafu);
|
||||
let obsoleted = self.engine.compact_to(namespace.id(), id + 1);
|
||||
info!(
|
||||
"Namespace {} obsoleted {} entries",
|
||||
"Namespace {} obsoleted {} entries, compacted index: {}, span: {:?}",
|
||||
namespace.id(),
|
||||
obsoleted
|
||||
obsoleted,
|
||||
id,
|
||||
self.span(&namespace)
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
@@ -343,7 +398,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_manage_namespace() {
|
||||
let dir = create_temp_dir("raft-engine-logstore-test");
|
||||
let mut logstore = RaftEngineLogStore::try_new(LogConfig {
|
||||
let logstore = RaftEngineLogStore::try_new(LogConfig {
|
||||
log_file_dir: dir.path().to_str().unwrap().to_string(),
|
||||
..Default::default()
|
||||
})
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::meta::Role;
|
||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
|
||||
use meta_client::client::{MetaClient, MetaClientBuilder};
|
||||
use meta_client::rpc::lock::{LockRequest, UnlockRequest};
|
||||
@@ -33,7 +34,7 @@ async fn run() {
|
||||
.connect_timeout(Duration::from_secs(5))
|
||||
.tcp_nodelay(true);
|
||||
let channel_manager = ChannelManager::with_config(config);
|
||||
let mut meta_client = MetaClientBuilder::new(id.0, id.1)
|
||||
let mut meta_client = MetaClientBuilder::new(id.0, id.1, Role::Datanode)
|
||||
.enable_lock()
|
||||
.channel_manager(channel_manager)
|
||||
.build();
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
use std::collections::HashMap;
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::meta::{HeartbeatRequest, Peer};
|
||||
use api::v1::meta::{HeartbeatRequest, Peer, Role};
|
||||
use chrono::DateTime;
|
||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
@@ -43,7 +43,7 @@ async fn run() {
|
||||
.connect_timeout(Duration::from_secs(5))
|
||||
.tcp_nodelay(true);
|
||||
let channel_manager = ChannelManager::with_config(config);
|
||||
let mut meta_client = MetaClientBuilder::new(id.0, id.1)
|
||||
let mut meta_client = MetaClientBuilder::new(id.0, id.1, Role::Datanode)
|
||||
.enable_heartbeat()
|
||||
.enable_router()
|
||||
.enable_store()
|
||||
|
||||
@@ -18,6 +18,7 @@ mod lock;
|
||||
mod router;
|
||||
mod store;
|
||||
|
||||
use api::v1::meta::Role;
|
||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
|
||||
use common_telemetry::info;
|
||||
use heartbeat::Client as HeartbeatClient;
|
||||
@@ -43,6 +44,7 @@ pub type Id = (u64, u64);
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct MetaClientBuilder {
|
||||
id: Id,
|
||||
role: Role,
|
||||
enable_heartbeat: bool,
|
||||
enable_router: bool,
|
||||
enable_store: bool,
|
||||
@@ -51,9 +53,10 @@ pub struct MetaClientBuilder {
|
||||
}
|
||||
|
||||
impl MetaClientBuilder {
|
||||
pub fn new(cluster_id: u64, member_id: u64) -> Self {
|
||||
pub fn new(cluster_id: u64, member_id: u64, role: Role) -> Self {
|
||||
Self {
|
||||
id: (cluster_id, member_id),
|
||||
role,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
@@ -107,16 +110,16 @@ impl MetaClientBuilder {
|
||||
let mgr = client.channel_manager.clone();
|
||||
|
||||
if self.enable_heartbeat {
|
||||
client.heartbeat = Some(HeartbeatClient::new(self.id, mgr.clone()));
|
||||
client.heartbeat = Some(HeartbeatClient::new(self.id, self.role, mgr.clone()));
|
||||
}
|
||||
if self.enable_router {
|
||||
client.router = Some(RouterClient::new(self.id, mgr.clone()));
|
||||
client.router = Some(RouterClient::new(self.id, self.role, mgr.clone()));
|
||||
}
|
||||
if self.enable_store {
|
||||
client.store = Some(StoreClient::new(self.id, mgr.clone()));
|
||||
client.store = Some(StoreClient::new(self.id, self.role, mgr.clone()));
|
||||
}
|
||||
if self.enable_lock {
|
||||
client.lock = Some(LockClient::new(self.id, mgr));
|
||||
client.lock = Some(LockClient::new(self.id, self.role, mgr));
|
||||
}
|
||||
|
||||
client
|
||||
@@ -409,28 +412,34 @@ mod tests {
|
||||
async fn test_meta_client_builder() {
|
||||
let urls = &["127.0.0.1:3001", "127.0.0.1:3002"];
|
||||
|
||||
let mut meta_client = MetaClientBuilder::new(0, 0).enable_heartbeat().build();
|
||||
let mut meta_client = MetaClientBuilder::new(0, 0, Role::Datanode)
|
||||
.enable_heartbeat()
|
||||
.build();
|
||||
assert!(meta_client.heartbeat_client().is_ok());
|
||||
assert!(meta_client.router_client().is_err());
|
||||
assert!(meta_client.store_client().is_err());
|
||||
meta_client.start(urls).await.unwrap();
|
||||
assert!(meta_client.heartbeat_client().unwrap().is_started().await);
|
||||
|
||||
let mut meta_client = MetaClientBuilder::new(0, 0).enable_router().build();
|
||||
let mut meta_client = MetaClientBuilder::new(0, 0, Role::Datanode)
|
||||
.enable_router()
|
||||
.build();
|
||||
assert!(meta_client.heartbeat_client().is_err());
|
||||
assert!(meta_client.router_client().is_ok());
|
||||
assert!(meta_client.store_client().is_err());
|
||||
meta_client.start(urls).await.unwrap();
|
||||
assert!(meta_client.router_client().unwrap().is_started().await);
|
||||
|
||||
let mut meta_client = MetaClientBuilder::new(0, 0).enable_store().build();
|
||||
let mut meta_client = MetaClientBuilder::new(0, 0, Role::Datanode)
|
||||
.enable_store()
|
||||
.build();
|
||||
assert!(meta_client.heartbeat_client().is_err());
|
||||
assert!(meta_client.router_client().is_err());
|
||||
assert!(meta_client.store_client().is_ok());
|
||||
meta_client.start(urls).await.unwrap();
|
||||
assert!(meta_client.store_client().unwrap().is_started().await);
|
||||
|
||||
let mut meta_client = MetaClientBuilder::new(1, 2)
|
||||
let mut meta_client = MetaClientBuilder::new(1, 2, Role::Datanode)
|
||||
.enable_heartbeat()
|
||||
.enable_router()
|
||||
.enable_store()
|
||||
@@ -449,7 +458,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_not_start_heartbeat_client() {
|
||||
let urls = &["127.0.0.1:3001", "127.0.0.1:3002"];
|
||||
let mut meta_client = MetaClientBuilder::new(0, 0)
|
||||
let mut meta_client = MetaClientBuilder::new(0, 0, Role::Datanode)
|
||||
.enable_router()
|
||||
.enable_store()
|
||||
.build();
|
||||
@@ -494,7 +503,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_not_start_router_client() {
|
||||
let urls = &["127.0.0.1:3001", "127.0.0.1:3002"];
|
||||
let mut meta_client = MetaClientBuilder::new(0, 0)
|
||||
let mut meta_client = MetaClientBuilder::new(0, 0, Role::Datanode)
|
||||
.enable_heartbeat()
|
||||
.enable_store()
|
||||
.build();
|
||||
@@ -509,7 +518,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_not_start_store_client() {
|
||||
let urls = &["127.0.0.1:3001", "127.0.0.1:3002"];
|
||||
let mut meta_client = MetaClientBuilder::new(0, 0)
|
||||
let mut meta_client = MetaClientBuilder::new(0, 0, Role::Datanode)
|
||||
.enable_heartbeat()
|
||||
.enable_router()
|
||||
.build();
|
||||
@@ -522,7 +531,7 @@ mod tests {
|
||||
#[should_panic]
|
||||
#[test]
|
||||
fn test_failed_when_start_nothing() {
|
||||
let _ = MetaClientBuilder::new(0, 0).build();
|
||||
let _ = MetaClientBuilder::new(0, 0, Role::Datanode).build();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -16,7 +16,7 @@ use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::heartbeat_client::HeartbeatClient;
|
||||
use api::v1::meta::{AskLeaderRequest, HeartbeatRequest, HeartbeatResponse, RequestHeader};
|
||||
use api::v1::meta::{AskLeaderRequest, HeartbeatRequest, HeartbeatResponse, RequestHeader, Role};
|
||||
use common_grpc::channel_manager::ChannelManager;
|
||||
use common_telemetry::{debug, info};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
@@ -32,13 +32,14 @@ use crate::rpc::util;
|
||||
|
||||
pub struct HeartbeatSender {
|
||||
id: Id,
|
||||
role: Role,
|
||||
sender: mpsc::Sender<HeartbeatRequest>,
|
||||
}
|
||||
|
||||
impl HeartbeatSender {
|
||||
#[inline]
|
||||
fn new(id: Id, sender: mpsc::Sender<HeartbeatRequest>) -> Self {
|
||||
Self { id, sender }
|
||||
fn new(id: Id, role: Role, sender: mpsc::Sender<HeartbeatRequest>) -> Self {
|
||||
Self { id, role, sender }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -48,7 +49,7 @@ impl HeartbeatSender {
|
||||
|
||||
#[inline]
|
||||
pub async fn send(&self, mut req: HeartbeatRequest) -> Result<()> {
|
||||
req.set_header(self.id);
|
||||
req.set_header(self.id, self.role);
|
||||
self.sender.send(req).await.map_err(|e| {
|
||||
error::SendHeartbeatSnafu {
|
||||
err_msg: e.to_string(),
|
||||
@@ -92,9 +93,10 @@ pub struct Client {
|
||||
}
|
||||
|
||||
impl Client {
|
||||
pub fn new(id: Id, channel_manager: ChannelManager) -> Self {
|
||||
pub fn new(id: Id, role: Role, channel_manager: ChannelManager) -> Self {
|
||||
let inner = Arc::new(RwLock::new(Inner {
|
||||
id,
|
||||
role,
|
||||
channel_manager,
|
||||
peers: HashSet::default(),
|
||||
leader: None,
|
||||
@@ -132,6 +134,7 @@ impl Client {
|
||||
#[derive(Debug)]
|
||||
struct Inner {
|
||||
id: Id,
|
||||
role: Role,
|
||||
channel_manager: ChannelManager,
|
||||
peers: HashSet<String>,
|
||||
leader: Option<String>,
|
||||
@@ -167,7 +170,7 @@ impl Inner {
|
||||
}
|
||||
);
|
||||
|
||||
let header = RequestHeader::new(self.id);
|
||||
let header = RequestHeader::new(self.id, self.role);
|
||||
let mut leader = None;
|
||||
for addr in &self.peers {
|
||||
let req = AskLeaderRequest {
|
||||
@@ -195,8 +198,10 @@ impl Inner {
|
||||
let mut leader = self.make_client(leader)?;
|
||||
|
||||
let (sender, receiver) = mpsc::channel::<HeartbeatRequest>(128);
|
||||
|
||||
let header = RequestHeader::new(self.id, self.role);
|
||||
let handshake = HeartbeatRequest {
|
||||
header: Some(RequestHeader::new(self.id)),
|
||||
header: Some(header),
|
||||
..Default::default()
|
||||
};
|
||||
sender.send(handshake).await.map_err(|e| {
|
||||
@@ -221,7 +226,7 @@ impl Inner {
|
||||
info!("Success to create heartbeat stream to server: {:#?}", res);
|
||||
|
||||
Ok((
|
||||
HeartbeatSender::new(self.id, sender),
|
||||
HeartbeatSender::new(self.id, self.role, sender),
|
||||
HeartbeatStream::new(self.id, stream),
|
||||
))
|
||||
}
|
||||
@@ -247,7 +252,7 @@ mod test {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_start_client() {
|
||||
let mut client = Client::new((0, 0), ChannelManager::default());
|
||||
let mut client = Client::new((0, 0), Role::Datanode, ChannelManager::default());
|
||||
assert!(!client.is_started().await);
|
||||
client
|
||||
.start(&["127.0.0.1:1000", "127.0.0.1:1001"])
|
||||
@@ -258,7 +263,7 @@ mod test {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_already_start() {
|
||||
let mut client = Client::new((0, 0), ChannelManager::default());
|
||||
let mut client = Client::new((0, 0), Role::Datanode, ChannelManager::default());
|
||||
client
|
||||
.start(&["127.0.0.1:1000", "127.0.0.1:1001"])
|
||||
.await
|
||||
@@ -274,7 +279,7 @@ mod test {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_start_with_duplicate_peers() {
|
||||
let mut client = Client::new((0, 0), ChannelManager::default());
|
||||
let mut client = Client::new((0, 0), Role::Datanode, ChannelManager::default());
|
||||
client
|
||||
.start(&["127.0.0.1:1000", "127.0.0.1:1000", "127.0.0.1:1000"])
|
||||
.await
|
||||
@@ -285,7 +290,7 @@ mod test {
|
||||
#[tokio::test]
|
||||
async fn test_heartbeat_stream() {
|
||||
let (sender, mut receiver) = mpsc::channel::<HeartbeatRequest>(100);
|
||||
let sender = HeartbeatSender::new((8, 8), sender);
|
||||
let sender = HeartbeatSender::new((8, 8), Role::Datanode, sender);
|
||||
tokio::spawn(async move {
|
||||
for _ in 0..10 {
|
||||
sender.send(HeartbeatRequest::default()).await.unwrap();
|
||||
|
||||
@@ -16,7 +16,7 @@ use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::lock_client::LockClient;
|
||||
use api::v1::meta::{LockRequest, LockResponse, UnlockRequest, UnlockResponse};
|
||||
use api::v1::meta::{LockRequest, LockResponse, Role, UnlockRequest, UnlockResponse};
|
||||
use common_grpc::channel_manager::ChannelManager;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use tokio::sync::RwLock;
|
||||
@@ -32,9 +32,10 @@ pub struct Client {
|
||||
}
|
||||
|
||||
impl Client {
|
||||
pub fn new(id: Id, channel_manager: ChannelManager) -> Self {
|
||||
pub fn new(id: Id, role: Role, channel_manager: ChannelManager) -> Self {
|
||||
let inner = Arc::new(RwLock::new(Inner {
|
||||
id,
|
||||
role,
|
||||
channel_manager,
|
||||
peers: vec![],
|
||||
}));
|
||||
@@ -70,6 +71,7 @@ impl Client {
|
||||
#[derive(Debug)]
|
||||
struct Inner {
|
||||
id: Id,
|
||||
role: Role,
|
||||
channel_manager: ChannelManager,
|
||||
peers: Vec<String>,
|
||||
}
|
||||
@@ -125,7 +127,7 @@ impl Inner {
|
||||
|
||||
async fn lock(&self, mut req: LockRequest) -> Result<LockResponse> {
|
||||
let mut client = self.random_client()?;
|
||||
req.set_header(self.id);
|
||||
req.set_header(self.id, self.role);
|
||||
let res = client.lock(req).await.context(error::TonicStatusSnafu)?;
|
||||
|
||||
Ok(res.into_inner())
|
||||
@@ -133,7 +135,7 @@ impl Inner {
|
||||
|
||||
async fn unlock(&self, mut req: UnlockRequest) -> Result<UnlockResponse> {
|
||||
let mut client = self.random_client()?;
|
||||
req.set_header(self.id);
|
||||
req.set_header(self.id, self.role);
|
||||
let res = client.unlock(req).await.context(error::TonicStatusSnafu)?;
|
||||
|
||||
Ok(res.into_inner())
|
||||
@@ -146,7 +148,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_start_client() {
|
||||
let mut client = Client::new((0, 0), ChannelManager::default());
|
||||
let mut client = Client::new((0, 0), Role::Datanode, ChannelManager::default());
|
||||
assert!(!client.is_started().await);
|
||||
client
|
||||
.start(&["127.0.0.1:1000", "127.0.0.1:1001"])
|
||||
@@ -157,7 +159,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_already_start() {
|
||||
let mut client = Client::new((0, 0), ChannelManager::default());
|
||||
let mut client = Client::new((0, 0), Role::Datanode, ChannelManager::default());
|
||||
client
|
||||
.start(&["127.0.0.1:1000", "127.0.0.1:1001"])
|
||||
.await
|
||||
@@ -173,7 +175,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_start_with_duplicate_peers() {
|
||||
let mut client = Client::new((0, 0), ChannelManager::default());
|
||||
let mut client = Client::new((0, 0), Role::Datanode, ChannelManager::default());
|
||||
client
|
||||
.start(&["127.0.0.1:1000", "127.0.0.1:1000", "127.0.0.1:1000"])
|
||||
.await
|
||||
|
||||
@@ -16,7 +16,7 @@ use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::router_client::RouterClient;
|
||||
use api::v1::meta::{CreateRequest, DeleteRequest, RouteRequest, RouteResponse};
|
||||
use api::v1::meta::{CreateRequest, DeleteRequest, Role, RouteRequest, RouteResponse};
|
||||
use common_grpc::channel_manager::ChannelManager;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use tokio::sync::RwLock;
|
||||
@@ -32,9 +32,10 @@ pub struct Client {
|
||||
}
|
||||
|
||||
impl Client {
|
||||
pub fn new(id: Id, channel_manager: ChannelManager) -> Self {
|
||||
pub fn new(id: Id, role: Role, channel_manager: ChannelManager) -> Self {
|
||||
let inner = Arc::new(RwLock::new(Inner {
|
||||
id,
|
||||
role,
|
||||
channel_manager,
|
||||
peers: vec![],
|
||||
}));
|
||||
@@ -75,6 +76,7 @@ impl Client {
|
||||
#[derive(Debug)]
|
||||
struct Inner {
|
||||
id: Id,
|
||||
role: Role,
|
||||
channel_manager: ChannelManager,
|
||||
peers: Vec<String>,
|
||||
}
|
||||
@@ -105,7 +107,7 @@ impl Inner {
|
||||
|
||||
async fn create(&self, mut req: CreateRequest) -> Result<RouteResponse> {
|
||||
let mut client = self.random_client()?;
|
||||
req.set_header(self.id);
|
||||
req.set_header(self.id, self.role);
|
||||
let res = client.create(req).await.context(error::TonicStatusSnafu)?;
|
||||
|
||||
Ok(res.into_inner())
|
||||
@@ -113,7 +115,7 @@ impl Inner {
|
||||
|
||||
async fn route(&self, mut req: RouteRequest) -> Result<RouteResponse> {
|
||||
let mut client = self.random_client()?;
|
||||
req.set_header(self.id);
|
||||
req.set_header(self.id, self.role);
|
||||
let res = client.route(req).await.context(error::TonicStatusSnafu)?;
|
||||
|
||||
Ok(res.into_inner())
|
||||
@@ -121,7 +123,7 @@ impl Inner {
|
||||
|
||||
async fn delete(&self, mut req: DeleteRequest) -> Result<RouteResponse> {
|
||||
let mut client = self.random_client()?;
|
||||
req.set_header(self.id);
|
||||
req.set_header(self.id, self.role);
|
||||
let res = client.delete(req).await.context(error::TonicStatusSnafu)?;
|
||||
|
||||
Ok(res.into_inner())
|
||||
@@ -159,7 +161,7 @@ mod test {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_start_client() {
|
||||
let mut client = Client::new((0, 0), ChannelManager::default());
|
||||
let mut client = Client::new((0, 0), Role::Frontend, ChannelManager::default());
|
||||
assert!(!client.is_started().await);
|
||||
client
|
||||
.start(&["127.0.0.1:1000", "127.0.0.1:1001"])
|
||||
@@ -170,7 +172,7 @@ mod test {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_already_start() {
|
||||
let mut client = Client::new((0, 0), ChannelManager::default());
|
||||
let mut client = Client::new((0, 0), Role::Frontend, ChannelManager::default());
|
||||
client
|
||||
.start(&["127.0.0.1:1000", "127.0.0.1:1001"])
|
||||
.await
|
||||
@@ -186,7 +188,7 @@ mod test {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_start_with_duplicate_peers() {
|
||||
let mut client = Client::new((0, 0), ChannelManager::default());
|
||||
let mut client = Client::new((0, 0), Role::Frontend, ChannelManager::default());
|
||||
client
|
||||
.start(&["127.0.0.1:1000", "127.0.0.1:1000", "127.0.0.1:1000"])
|
||||
.await
|
||||
|
||||
@@ -20,7 +20,7 @@ use api::v1::meta::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||
DeleteRangeResponse, MoveValueRequest, MoveValueResponse, PutRequest, PutResponse,
|
||||
RangeRequest, RangeResponse,
|
||||
RangeRequest, RangeResponse, Role,
|
||||
};
|
||||
use common_grpc::channel_manager::ChannelManager;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
@@ -37,9 +37,10 @@ pub struct Client {
|
||||
}
|
||||
|
||||
impl Client {
|
||||
pub fn new(id: Id, channel_manager: ChannelManager) -> Self {
|
||||
pub fn new(id: Id, role: Role, channel_manager: ChannelManager) -> Self {
|
||||
let inner = Arc::new(RwLock::new(Inner {
|
||||
id,
|
||||
role,
|
||||
channel_manager,
|
||||
peers: vec![],
|
||||
}));
|
||||
@@ -108,6 +109,7 @@ impl Client {
|
||||
#[derive(Debug)]
|
||||
struct Inner {
|
||||
id: Id,
|
||||
role: Role,
|
||||
channel_manager: ChannelManager,
|
||||
peers: Vec<String>,
|
||||
}
|
||||
@@ -138,7 +140,7 @@ impl Inner {
|
||||
|
||||
async fn range(&self, mut req: RangeRequest) -> Result<RangeResponse> {
|
||||
let mut client = self.random_client()?;
|
||||
req.set_header(self.id);
|
||||
req.set_header(self.id, self.role);
|
||||
let res = client.range(req).await.context(error::TonicStatusSnafu)?;
|
||||
|
||||
Ok(res.into_inner())
|
||||
@@ -146,7 +148,7 @@ impl Inner {
|
||||
|
||||
async fn put(&self, mut req: PutRequest) -> Result<PutResponse> {
|
||||
let mut client = self.random_client()?;
|
||||
req.set_header(self.id);
|
||||
req.set_header(self.id, self.role);
|
||||
let res = client.put(req).await.context(error::TonicStatusSnafu)?;
|
||||
|
||||
Ok(res.into_inner())
|
||||
@@ -154,7 +156,7 @@ impl Inner {
|
||||
|
||||
async fn batch_get(&self, mut req: BatchGetRequest) -> Result<BatchGetResponse> {
|
||||
let mut client = self.random_client()?;
|
||||
req.set_header(self.id);
|
||||
req.set_header(self.id, self.role);
|
||||
|
||||
let res = client
|
||||
.batch_get(req)
|
||||
@@ -166,7 +168,7 @@ impl Inner {
|
||||
|
||||
async fn batch_put(&self, mut req: BatchPutRequest) -> Result<BatchPutResponse> {
|
||||
let mut client = self.random_client()?;
|
||||
req.set_header(self.id);
|
||||
req.set_header(self.id, self.role);
|
||||
let res = client
|
||||
.batch_put(req)
|
||||
.await
|
||||
@@ -177,7 +179,7 @@ impl Inner {
|
||||
|
||||
async fn batch_delete(&self, mut req: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
|
||||
let mut client = self.random_client()?;
|
||||
req.set_header(self.id);
|
||||
req.set_header(self.id, self.role);
|
||||
let res = client
|
||||
.batch_delete(req)
|
||||
.await
|
||||
@@ -191,7 +193,7 @@ impl Inner {
|
||||
mut req: CompareAndPutRequest,
|
||||
) -> Result<CompareAndPutResponse> {
|
||||
let mut client = self.random_client()?;
|
||||
req.set_header(self.id);
|
||||
req.set_header(self.id, self.role);
|
||||
let res = client
|
||||
.compare_and_put(req)
|
||||
.await
|
||||
@@ -202,7 +204,7 @@ impl Inner {
|
||||
|
||||
async fn delete_range(&self, mut req: DeleteRangeRequest) -> Result<DeleteRangeResponse> {
|
||||
let mut client = self.random_client()?;
|
||||
req.set_header(self.id);
|
||||
req.set_header(self.id, self.role);
|
||||
let res = client
|
||||
.delete_range(req)
|
||||
.await
|
||||
@@ -213,7 +215,7 @@ impl Inner {
|
||||
|
||||
async fn move_value(&self, mut req: MoveValueRequest) -> Result<MoveValueResponse> {
|
||||
let mut client = self.random_client()?;
|
||||
req.set_header(self.id);
|
||||
req.set_header(self.id, self.role);
|
||||
let res = client
|
||||
.move_value(req)
|
||||
.await
|
||||
@@ -254,7 +256,7 @@ mod test {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_start_client() {
|
||||
let mut client = Client::new((0, 0), ChannelManager::default());
|
||||
let mut client = Client::new((0, 0), Role::Frontend, ChannelManager::default());
|
||||
assert!(!client.is_started().await);
|
||||
client
|
||||
.start(&["127.0.0.1:1000", "127.0.0.1:1001"])
|
||||
@@ -265,7 +267,7 @@ mod test {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_already_start() {
|
||||
let mut client = Client::new((0, 0), ChannelManager::default());
|
||||
let mut client = Client::new((0, 0), Role::Frontend, ChannelManager::default());
|
||||
client
|
||||
.start(&["127.0.0.1:1000", "127.0.0.1:1001"])
|
||||
.await
|
||||
@@ -281,7 +283,7 @@ mod test {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_start_with_duplicate_peers() {
|
||||
let mut client = Client::new((0, 0), ChannelManager::default());
|
||||
let mut client = Client::new((0, 0), Role::Frontend, ChannelManager::default());
|
||||
client
|
||||
.start(&["127.0.0.1:1000", "127.0.0.1:1000", "127.0.0.1:1000"])
|
||||
.await
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::meta::Role;
|
||||
use meta_srv::metasrv::SelectorRef;
|
||||
use meta_srv::mocks as server_mock;
|
||||
use meta_srv::mocks::MockInfo;
|
||||
@@ -41,7 +42,7 @@ pub async fn mock_client_by(mock_info: MockInfo) -> MetaClient {
|
||||
} = mock_info;
|
||||
|
||||
let id = (1000u64, 2000u64);
|
||||
let mut meta_client = MetaClientBuilder::new(id.0, id.1)
|
||||
let mut meta_client = MetaClientBuilder::new(id.0, id.1, Role::Datanode)
|
||||
.enable_heartbeat()
|
||||
.enable_router()
|
||||
.enable_store()
|
||||
|
||||
@@ -17,6 +17,7 @@ common-base = { path = "../common/base" }
|
||||
common-catalog = { path = "../common/catalog" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-grpc = { path = "../common/grpc" }
|
||||
common-meta = { path = "../common/meta" }
|
||||
common-procedure = { path = "../common/procedure" }
|
||||
common-runtime = { path = "../common/runtime" }
|
||||
common-telemetry = { path = "../common/telemetry" }
|
||||
@@ -29,6 +30,7 @@ h2 = "0.3"
|
||||
http-body = "0.4"
|
||||
lazy_static = "1.4"
|
||||
metrics.workspace = true
|
||||
once_cell = "1.17"
|
||||
parking_lot = "0.12"
|
||||
prost.workspace = true
|
||||
rand.workspace = true
|
||||
|
||||
@@ -154,6 +154,14 @@ pub enum Error {
|
||||
#[snafu(display("Failed to get sequence: {}", err_msg))]
|
||||
NextSequence { err_msg: String, location: Location },
|
||||
|
||||
#[snafu(display("Sequence out of range: {}, start={}, step={}", name, start, step))]
|
||||
SequenceOutOfRange {
|
||||
name: String,
|
||||
start: u64,
|
||||
step: u64,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("MetaSrv has no leader at this moment"))]
|
||||
NoLeader { location: Location },
|
||||
|
||||
@@ -313,6 +321,9 @@ pub enum Error {
|
||||
err_msg: String,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing request header"))]
|
||||
MissingRequestHeader { location: Location },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -362,6 +373,7 @@ impl ErrorExt for Error {
|
||||
| Error::StartGrpc { .. } => StatusCode::Internal,
|
||||
Error::EmptyKey { .. }
|
||||
| Error::MissingRequiredParameter { .. }
|
||||
| Error::MissingRequestHeader { .. }
|
||||
| Error::EmptyTableName { .. }
|
||||
| Error::InvalidLeaseKey { .. }
|
||||
| Error::InvalidStatKey { .. }
|
||||
@@ -375,6 +387,7 @@ impl ErrorExt for Error {
|
||||
| Error::UnexceptedSequenceValue { .. }
|
||||
| Error::TableRouteNotFound { .. }
|
||||
| Error::NextSequence { .. }
|
||||
| Error::SequenceOutOfRange { .. }
|
||||
| Error::MoveValue { .. }
|
||||
| Error::InvalidKvsLength { .. }
|
||||
| Error::InvalidTxnResult { .. }
|
||||
|
||||
@@ -22,6 +22,7 @@ use api::v1::meta::{
|
||||
};
|
||||
pub use check_leader_handler::CheckLeaderHandler;
|
||||
pub use collect_stats_handler::CollectStatsHandler;
|
||||
use common_meta::instruction::Instruction;
|
||||
use common_telemetry::{info, warn};
|
||||
use dashmap::DashMap;
|
||||
pub use failure_handler::RegionFailureHandler;
|
||||
@@ -34,18 +35,15 @@ use snafu::OptionExt;
|
||||
use tokio::sync::mpsc::Sender;
|
||||
use tokio::sync::{oneshot, Notify, RwLock};
|
||||
|
||||
use self::instruction::Instruction;
|
||||
use self::node_stat::Stat;
|
||||
use crate::error::{self, Result};
|
||||
use crate::metasrv::Context;
|
||||
use crate::metrics::METRIC_META_HEARTBEAT_CONNECTION_NUM;
|
||||
use crate::sequence::Sequence;
|
||||
use crate::service::mailbox::{Channel, Mailbox, MailboxReceiver, MailboxRef, MessageId};
|
||||
|
||||
mod check_leader_handler;
|
||||
mod collect_stats_handler;
|
||||
mod failure_handler;
|
||||
mod instruction;
|
||||
mod keep_lease_handler;
|
||||
pub mod mailbox_handler;
|
||||
pub mod node_stat;
|
||||
@@ -73,9 +71,9 @@ pub struct HeartbeatAccumulator {
|
||||
}
|
||||
|
||||
impl HeartbeatAccumulator {
|
||||
pub fn into_mailbox_messages(self) -> Vec<MailboxMessage> {
|
||||
pub fn into_mailbox_message(self) -> Option<MailboxMessage> {
|
||||
// TODO(jiachun): to HeartbeatResponse payload
|
||||
vec![]
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,11 +85,11 @@ pub struct Pusher {
|
||||
impl Pusher {
|
||||
pub fn new(
|
||||
sender: Sender<std::result::Result<HeartbeatResponse, tonic::Status>>,
|
||||
req_header: &Option<RequestHeader>,
|
||||
req_header: &RequestHeader,
|
||||
) -> Self {
|
||||
let res_header = ResponseHeader {
|
||||
protocol_version: PROTOCOL_VERSION,
|
||||
cluster_id: req_header.as_ref().map_or(0, |h| h.cluster_id),
|
||||
cluster_id: req_header.cluster_id,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@@ -173,7 +171,7 @@ impl HeartbeatHandlerGroup {
|
||||
let header = std::mem::take(&mut acc.header);
|
||||
let res = HeartbeatResponse {
|
||||
header,
|
||||
mailbox_messages: acc.into_mailbox_messages(),
|
||||
mailbox_message: acc.into_mailbox_message(),
|
||||
};
|
||||
Ok(res)
|
||||
}
|
||||
@@ -243,6 +241,18 @@ impl HeartbeatMailbox {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
async fn next_message_id(&self) -> Result<u64> {
|
||||
// In this implementation, we pre-occupy the message_id of 0,
|
||||
// and we use `message_id = 0` to mark a Message as a one-way call.
|
||||
loop {
|
||||
let next = self.sequence.next().await?;
|
||||
if next > 0 {
|
||||
return Ok(next);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -253,7 +263,7 @@ impl Mailbox for HeartbeatMailbox {
|
||||
mut msg: MailboxMessage,
|
||||
timeout: Duration,
|
||||
) -> Result<MailboxReceiver> {
|
||||
let message_id = self.sequence.next().await?;
|
||||
let message_id = self.next_message_id().await?;
|
||||
|
||||
let pusher_id = match ch {
|
||||
Channel::Datanode(id) => format!("{}-{}", Role::Datanode as i32, id),
|
||||
@@ -275,7 +285,7 @@ impl Mailbox for HeartbeatMailbox {
|
||||
msg.id = message_id;
|
||||
let res = HeartbeatResponse {
|
||||
header: Some(header),
|
||||
mailbox_messages: vec![msg],
|
||||
mailbox_message: Some(msg),
|
||||
};
|
||||
|
||||
pusher.push(res).await?;
|
||||
@@ -290,15 +300,7 @@ impl Mailbox for HeartbeatMailbox {
|
||||
tx.send(maybe_msg)
|
||||
.map_err(|_| error::MailboxClosedSnafu { id }.build())?;
|
||||
} else if let Ok(finally_msg) = maybe_msg {
|
||||
let MailboxMessage {
|
||||
id,
|
||||
subject,
|
||||
from,
|
||||
to,
|
||||
timestamp_millis,
|
||||
..
|
||||
} = finally_msg;
|
||||
warn!("The response arrived too late, id={id}, subject={subject}, from={from}, to={to}, timestamp={timestamp_millis}");
|
||||
warn!("The response arrived too late: {finally_msg:?}");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -352,7 +354,7 @@ mod tests {
|
||||
protocol_version: PROTOCOL_VERSION,
|
||||
..Default::default()
|
||||
};
|
||||
let pusher: Pusher = Pusher::new(pusher_tx, &Option::from(res_header));
|
||||
let pusher: Pusher = Pusher::new(pusher_tx, &res_header);
|
||||
let handler_group = HeartbeatHandlerGroup::default();
|
||||
handler_group
|
||||
.register(format!("{}-{}", Role::Datanode as i32, datanode_id), pusher)
|
||||
@@ -376,8 +378,9 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let recv_obj = pusher_rx.recv().await.unwrap().unwrap();
|
||||
assert_eq!(recv_obj.mailbox_messages[0].timestamp_millis, 123);
|
||||
assert_eq!(recv_obj.mailbox_messages[0].subject, "req-test".to_string());
|
||||
let message = recv_obj.mailbox_message.unwrap();
|
||||
assert_eq!(message.timestamp_millis, 123);
|
||||
assert_eq!(message.subject, "req-test".to_string());
|
||||
|
||||
(mailbox, receiver)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user